*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
-import akka.dispatch.ExecutionContexts;
-import akka.dispatch.OnComplete;
-import akka.util.Timeout;
import com.google.common.base.Preconditions;
import com.google.common.collect.BiMap;
import com.google.common.collect.ImmutableBiMap;
import com.google.common.collect.ImmutableBiMap.Builder;
-import com.google.common.primitives.UnsignedLong;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.client.BackendInfo;
+import javax.annotation.concurrent.ThreadSafe;
import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
-import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
+import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import scala.concurrent.ExecutionContext;
/**
* {@link BackendInfoResolver} implementation for static shard configuration based on ShardManager. Each string-named
*
* @author Robert Varga
*/
-final class ModuleShardBackendResolver extends BackendInfoResolver<ShardBackendInfo> {
- private static final ExecutionContext DIRECT_EXECUTION_CONTEXT =
- ExecutionContexts.fromExecutor(MoreExecutors.directExecutor());
- private static final CompletableFuture<ShardBackendInfo> NULL_FUTURE = CompletableFuture.completedFuture(null);
+@ThreadSafe
+final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
private static final Logger LOG = LoggerFactory.getLogger(ModuleShardBackendResolver.class);
- /**
- * Fall-over-dead timeout. If we do not make progress in this long, just fall over and propagate the failure.
- * All users are expected to fail, possibly attempting to recover by restarting. It is fair to remain
- * non-operational.
- */
- // TODO: maybe make this configurable somehow?
- private static final Timeout DEAD_TIMEOUT = Timeout.apply(15, TimeUnit.MINUTES);
-
+ private final ConcurrentMap<Long, ShardState> backends = new ConcurrentHashMap<>();
private final ActorContext actorContext;
@GuardedBy("this")
private volatile BiMap<String, Long> shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L);
// FIXME: we really need just ActorContext.findPrimaryShardAsync()
- ModuleShardBackendResolver(final ActorContext actorContext) {
+ ModuleShardBackendResolver(final ClientIdentifier clientId, final ActorContext actorContext) {
+ super(clientId, actorContext);
this.actorContext = Preconditions.checkNotNull(actorContext);
}
- @Override
- protected void invalidateBackendInfo(final CompletionStage<? extends BackendInfo> info) {
- LOG.trace("Initiated invalidation of backend information {}", info);
- info.thenAccept(this::invalidate);
- }
-
- private void invalidate(final BackendInfo result) {
- Preconditions.checkArgument(result instanceof ShardBackendInfo);
- LOG.debug("Invalidating backend information {}", result);
- actorContext.getPrimaryShardInfoCache().remove(((ShardBackendInfo)result).getShardName());
- }
-
Long resolveShardForPath(final YangInstanceIdentifier path) {
final String shardName = actorContext.getShardStrategyFactory().getStrategy(path).findShard(path);
Long cookie = shards.get(shardName);
if (cookie == null) {
cookie = nextShard++;
- Builder<String, Long> b = ImmutableBiMap.builder();
- b.putAll(shards);
- b.put(shardName, cookie);
- shards = b.build();
+ Builder<String, Long> builder = ImmutableBiMap.builder();
+ builder.putAll(shards);
+ builder.put(shardName, cookie);
+ shards = builder.build();
}
}
}
return cookie;
}
+
@Override
- protected CompletableFuture<ShardBackendInfo> resolveBackendInfo(final Long cookie) {
+ public CompletionStage<ShardBackendInfo> getBackendInfo(final Long cookie) {
+ /*
+ * We cannot perform a simple computeIfAbsent() here because we need to control sequencing of when the state
+ * is inserted into the map and retired from it (based on the stage result).
+ *
+ * We do not want to hook another stage one processing completes and hooking a removal on failure from a compute
+ * method runs the inherent risk of stage completing before the insertion does (i.e. we have a removal of
+ * non-existent element.
+ */
+ final ShardState existing = backends.get(cookie);
+ if (existing != null) {
+ return existing.getStage();
+ }
+
final String shardName = shards.inverse().get(cookie);
if (shardName == null) {
LOG.warn("Failing request for non-existent cookie {}", cookie);
- return NULL_FUTURE;
+ throw new IllegalArgumentException("Cookie " + cookie + " does not have a shard assigned");
}
- final CompletableFuture<ShardBackendInfo> ret = new CompletableFuture<>();
+ LOG.debug("Resolving cookie {} to shard {}", cookie, shardName);
+ final ShardState toInsert = resolveBackendInfo(shardName, cookie);
- actorContext.findPrimaryShardAsync(shardName).onComplete(new OnComplete<PrimaryShardInfo>() {
- @Override
- public void onComplete(final Throwable t, final PrimaryShardInfo v) {
- if (t != null) {
- ret.completeExceptionally(t);
- } else {
- ret.complete(createBackendInfo(v, shardName, cookie));
- }
+ final ShardState raced = backends.putIfAbsent(cookie, toInsert);
+ if (raced != null) {
+ // We have had a concurrent insertion, return that
+ LOG.debug("Race during insertion of state for cookie {} shard {}", cookie, shardName);
+ return raced.getStage();
+ }
+
+ // We have succeeded in populating the map, now we need to take care of pruning the entry if it fails to
+ // complete
+ final CompletionStage<ShardBackendInfo> stage = toInsert.getStage();
+ stage.whenComplete((info, failure) -> {
+ if (failure != null) {
+ LOG.debug("Resolution of cookie {} shard {} failed, removing state", cookie, shardName, failure);
+ backends.remove(cookie, toInsert);
+
+ // Remove cache state in case someone else forgot to invalidate it
+ flushCache(shardName);
}
- }, DIRECT_EXECUTION_CONTEXT);
+ });
- LOG.debug("Resolving cookie {} to shard {}", cookie, shardName);
- return ret;
+ return stage;
}
- private static ABIVersion toABIVersion(final short version) {
- switch (version) {
- case DataStoreVersions.BORON_VERSION:
- return ABIVersion.BORON;
- }
+ @Override
+ public CompletionStage<ShardBackendInfo> refreshBackendInfo(final Long cookie,
+ final ShardBackendInfo staleInfo) {
+ final ShardState existing = backends.get(cookie);
+ if (existing != null) {
+ if (!staleInfo.equals(existing.getResult())) {
+ return existing.getStage();
+ }
- throw new IllegalArgumentException("Unsupported version " + version);
- }
+ LOG.debug("Invalidating backend information {}", staleInfo);
+ flushCache(staleInfo.getShardName());
- private static ShardBackendInfo createBackendInfo(final Object result, final String shardName, final Long cookie) {
- Preconditions.checkArgument(result instanceof PrimaryShardInfo);
- final PrimaryShardInfo info = (PrimaryShardInfo) result;
+ LOG.trace("Invalidated cache {}", staleInfo);
+ backends.remove(cookie, existing);
+ }
- LOG.debug("Creating backend information for {}", info);
- return new ShardBackendInfo(info.getPrimaryShardActor().resolveOne(DEAD_TIMEOUT).value().get().get(),
- toABIVersion(info.getPrimaryShardVersion()), shardName, UnsignedLong.fromLongBits(cookie),
- info.getLocalShardDataTree());
- }
+ return getBackendInfo(cookie);
+ }
}