X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatabroker%2Factors%2Fdds%2FModuleShardBackendResolver.java;h=74aca03e8686b20d43e57a9550722fcf96eca8c3;hb=abaef4a5ae37f27542155457fe7306a4662b1eeb;hp=a1018967e76668b8e1640f86c83d7c2769a5b3d9;hpb=057b787289f7b909d7013c22ac73a1c91c860af8;p=controller.git
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java
index a1018967e7..74aca03e86 100644
--- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java
+++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java
@@ -7,61 +7,47 @@
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
-import akka.actor.ActorRef;
+import static akka.pattern.Patterns.ask;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import akka.dispatch.ExecutionContexts;
+import akka.dispatch.OnComplete;
import akka.util.Timeout;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
import com.google.common.collect.BiMap;
import com.google.common.collect.ImmutableBiMap;
import com.google.common.collect.ImmutableBiMap.Builder;
-import com.google.common.primitives.UnsignedLong;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.client.BackendInfo;
+import org.checkerframework.checker.lock.qual.GuardedBy;
import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
-import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
-import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
-import org.opendaylight.controller.cluster.common.actor.ExplicitAsk;
+import org.opendaylight.controller.cluster.datastore.shardmanager.RegisterForShardAvailabilityChanges;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import scala.Function1;
-import scala.compat.java8.FutureConverters;
+import scala.concurrent.Future;
/**
* {@link BackendInfoResolver} implementation for static shard configuration based on ShardManager. Each string-named
* shard is assigned a single cookie and this mapping is stored in a bidirectional map. Information about corresponding
- * shard leader is resolved via {@link ActorContext}. The product of resolution is {@link ShardBackendInfo}.
+ * shard leader is resolved via {@link ActorUtils}. The product of resolution is {@link ShardBackendInfo}.
+ *
+ *
+ * This class is thread-safe.
*
* @author Robert Varga
*/
-@SuppressFBWarnings(value = "NP_NONNULL_PARAM_VIOLATION",
- justification = "Pertains to the NULL_FUTURE field below. Null is allowed and is intended")
-final class ModuleShardBackendResolver extends BackendInfoResolver {
- private static final CompletableFuture NULL_FUTURE = CompletableFuture.completedFuture(null);
+final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
private static final Logger LOG = LoggerFactory.getLogger(ModuleShardBackendResolver.class);
- /**
- * Fall-over-dead timeout. If we do not make progress in this long, just fall over and propagate the failure.
- * All users are expected to fail, possibly attempting to recover by restarting. It is fair to remain
- * non-operational.
- */
- // TODO: maybe make this configurable somehow?
- private static final Timeout DEAD_TIMEOUT = Timeout.apply(15, TimeUnit.MINUTES);
+ private final ConcurrentMap backends = new ConcurrentHashMap<>();
- private final ActorContext actorContext;
- // FIXME: this counter should be in superclass somewhere
- private final AtomicLong nextSessionId = new AtomicLong();
- private final Function1 connectFunction;
+ private final Future shardAvailabilityChangesRegFuture;
@GuardedBy("this")
private long nextShard = 1;
@@ -69,26 +55,37 @@ final class ModuleShardBackendResolver extends BackendInfoResolver shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L);
// FIXME: we really need just ActorContext.findPrimaryShardAsync()
- ModuleShardBackendResolver(final ClientIdentifier clientId, final ActorContext actorContext) {
- this.actorContext = Preconditions.checkNotNull(actorContext);
- this.connectFunction = ExplicitAsk.toScala(t -> new ConnectClientRequest(clientId, t, ABIVersion.BORON,
- ABIVersion.current()));
+ ModuleShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils) {
+ super(clientId, actorUtils);
+
+ shardAvailabilityChangesRegFuture = ask(actorUtils.getShardManager(), new RegisterForShardAvailabilityChanges(
+ this::onShardAvailabilityChange), Timeout.apply(60, TimeUnit.MINUTES))
+ .map(reply -> (Registration)reply, ExecutionContexts.global());
+
+ shardAvailabilityChangesRegFuture.onComplete(new OnComplete() {
+ @Override
+ public void onComplete(Throwable failure, Registration reply) {
+ if (failure != null) {
+ LOG.error("RegisterForShardAvailabilityChanges failed", failure);
+ }
+ }
+ }, ExecutionContexts.global());
}
- @Override
- protected void invalidateBackendInfo(final CompletionStage extends BackendInfo> info) {
- LOG.trace("Initiated invalidation of backend information {}", info);
- info.thenAccept(this::invalidate);
- }
+ private void onShardAvailabilityChange(String shardName) {
+ LOG.debug("onShardAvailabilityChange for {}", shardName);
- private void invalidate(final BackendInfo result) {
- Preconditions.checkArgument(result instanceof ShardBackendInfo);
- LOG.debug("Invalidating backend information {}", result);
- actorContext.getPrimaryShardInfoCache().remove(((ShardBackendInfo)result).getShardName());
+ Long cookie = shards.get(shardName);
+ if (cookie == null) {
+ LOG.debug("No shard cookie found for {}", shardName);
+ return;
+ }
+
+ notifyStaleBackendInfoCallbacks(cookie);
}
Long resolveShardForPath(final YangInstanceIdentifier path) {
- final String shardName = actorContext.getShardStrategyFactory().getStrategy(path).findShard(path);
+ final String shardName = actorUtils().getShardStrategyFactory().getStrategy(path).findShard(path);
Long cookie = shards.get(shardName);
if (cookie == null) {
synchronized (this) {
@@ -108,42 +105,83 @@ final class ModuleShardBackendResolver extends BackendInfoResolver resolveBackendInfo(final Long cookie) {
+ public CompletionStage getBackendInfo(final Long cookie) {
+ /*
+ * We cannot perform a simple computeIfAbsent() here because we need to control sequencing of when the state
+ * is inserted into the map and retired from it (based on the stage result).
+ *
+ * We do not want to hook another stage one processing completes and hooking a removal on failure from a compute
+ * method runs the inherent risk of stage completing before the insertion does (i.e. we have a removal of
+ * non-existent element.
+ */
+ final ShardState existing = backends.get(cookie);
+ if (existing != null) {
+ return existing.getStage();
+ }
+
final String shardName = shards.inverse().get(cookie);
if (shardName == null) {
LOG.warn("Failing request for non-existent cookie {}", cookie);
- return NULL_FUTURE;
+ throw new IllegalArgumentException("Cookie " + cookie + " does not have a shard assigned");
+ }
+
+ LOG.debug("Resolving cookie {} to shard {}", cookie, shardName);
+ final ShardState toInsert = resolveBackendInfo(shardName, cookie);
+
+ final ShardState raced = backends.putIfAbsent(cookie, toInsert);
+ if (raced != null) {
+ // We have had a concurrent insertion, return that
+ LOG.debug("Race during insertion of state for cookie {} shard {}", cookie, shardName);
+ return raced.getStage();
}
- final CompletableFuture ret = new CompletableFuture<>();
+ // We have succeeded in populating the map, now we need to take care of pruning the entry if it fails to
+ // complete
+ final CompletionStage stage = toInsert.getStage();
+ stage.whenComplete((info, failure) -> {
+ if (failure != null) {
+ LOG.debug("Resolution of cookie {} shard {} failed, removing state", cookie, shardName, failure);
+ backends.remove(cookie, toInsert);
+
+ // Remove cache state in case someone else forgot to invalidate it
+ flushCache(shardName);
+ }
+ });
+
+ return stage;
+ }
- FutureConverters.toJava(actorContext.findPrimaryShardAsync(shardName)).thenCompose(info -> {
- LOG.debug("Looking up primary info for {} from {}", shardName, info);
- return FutureConverters.toJava(ExplicitAsk.ask(info.getPrimaryShardActor(), connectFunction, DEAD_TIMEOUT));
- }).thenApply(response -> {
- if (response instanceof RequestFailure) {
- final RequestFailure, ?> failure = (RequestFailure, ?>) response;
- LOG.debug("Connect request failed {}", failure, failure.getCause());
- throw Throwables.propagate(failure.getCause());
+ @Override
+ public CompletionStage refreshBackendInfo(final Long cookie,
+ final ShardBackendInfo staleInfo) {
+ final ShardState existing = backends.get(cookie);
+ if (existing != null) {
+ if (!staleInfo.equals(existing.getResult())) {
+ return existing.getStage();
}
- LOG.debug("Resolved backend information to {}", response);
+ LOG.debug("Invalidating backend information {}", staleInfo);
+ flushCache(staleInfo.getName());
- Preconditions.checkArgument(response instanceof ConnectClientSuccess, "Unhandled response {}", response);
- final ConnectClientSuccess success = (ConnectClientSuccess) response;
+ LOG.trace("Invalidated cache {}", staleInfo);
+ backends.remove(cookie, existing);
+ }
- return new ShardBackendInfo(success.getBackend(),
- nextSessionId.getAndIncrement(), success.getVersion(), shardName, UnsignedLong.fromLongBits(cookie),
- success.getDataTree(), success.getMaxMessages());
- }).whenComplete((info, throwablw) -> {
- if (throwablw != null) {
- ret.completeExceptionally(throwablw);
- } else {
- ret.complete(info);
+ return getBackendInfo(cookie);
+ }
+
+ @Override
+ public void close() {
+ shardAvailabilityChangesRegFuture.onComplete(new OnComplete() {
+ @Override
+ public void onComplete(Throwable failure, Registration reply) {
+ reply.close();
}
- });
+ }, ExecutionContexts.global());
+ }
- LOG.debug("Resolving cookie {} to shard {}", cookie, shardName);
- return ret;
+ @Override
+ public String resolveCookieName(Long cookie) {
+ return verifyNotNull(shards.inverse().get(cookie), "Unexpected null cookie: %s", cookie);
}
}