* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.sharding;
-import static akka.actor.ActorRef.noSender;
+import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.actor.Status;
-import akka.cluster.Cluster;
+import akka.actor.Status.Success;
import akka.cluster.ClusterEvent;
import akka.cluster.ClusterEvent.MemberExited;
import akka.cluster.ClusterEvent.MemberRemoved;
import akka.cluster.ClusterEvent.ReachableMember;
import akka.cluster.ClusterEvent.UnreachableMember;
import akka.cluster.Member;
-import akka.cluster.ddata.DistributedData;
-import akka.cluster.ddata.ORMap;
-import akka.cluster.ddata.Replicator;
-import akka.cluster.ddata.Replicator.Changed;
-import akka.cluster.ddata.Replicator.Subscribe;
-import akka.cluster.ddata.Replicator.Update;
+import akka.dispatch.OnComplete;
+import akka.pattern.Patterns;
import akka.util.Timeout;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
-import com.google.common.collect.Sets.SetView;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-import java.util.stream.Collectors;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
+import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.sharding.messages.CreatePrefixShard;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.sharding.messages.LookupPrefixShard;
import org.opendaylight.controller.cluster.sharding.messages.NotifyProducerCreated;
import org.opendaylight.controller.cluster.sharding.messages.NotifyProducerRemoved;
import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
+import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemovalLookup;
import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemoved;
import org.opendaylight.controller.cluster.sharding.messages.ProducerCreated;
import org.opendaylight.controller.cluster.sharding.messages.ProducerRemoved;
-import org.opendaylight.controller.cluster.sharding.messages.RemovePrefixShard;
+import org.opendaylight.controller.cluster.sharding.messages.StartConfigShardLookup;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
+import org.opendaylight.mdsal.dom.broker.DOMDataTreeShardRegistration;
+import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTableEntry;
import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import scala.compat.java8.FutureConverters;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
/**
* Actor that tracks currently open producers/shards on remote nodes and handles notifications of remote
*/
public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardedDataTreeActor.class);
+
private static final String PERSISTENCE_ID = "sharding-service-actor";
private static final Timeout DEFAULT_ASK_TIMEOUT = new Timeout(15, TimeUnit.SECONDS);
+ static final FiniteDuration SHARD_LOOKUP_TASK_INTERVAL = new FiniteDuration(1L, TimeUnit.SECONDS);
+
private final DistributedShardedDOMDataTree shardingService;
private final ActorSystem actorSystem;
private final ClusterWrapper clusterWrapper;
// helper actorContext used only for static calls to executeAsync etc
// for calls that need specific actor context tied to a datastore use the one provided in the DistributedDataStore
- private final ActorContext actorContext;
+ private final ActorUtils actorUtils;
private final ShardingServiceAddressResolver resolver;
- private final DistributedDataStore distributedConfigDatastore;
- private final DistributedDataStore distributedOperDatastore;
+ private final AbstractDataStore distributedConfigDatastore;
+ private final AbstractDataStore distributedOperDatastore;
+ private final int lookupTaskMaxRetries;
private final Map<DOMDataTreeIdentifier, ActorProducerRegistration> idToProducer = new HashMap<>();
- private final Map<DOMDataTreeIdentifier, ShardFrontendRegistration> idToShardRegistration = new HashMap<>();
-
- private final Cluster cluster;
- private final ActorRef replicator;
-
- private ORMap<PrefixShardConfiguration> currentData = ORMap.create();
- private Map<DOMDataTreeIdentifier, PrefixShardConfiguration> currentConfiguration = new HashMap<>();
ShardedDataTreeActor(final ShardedDataTreeActorCreator builder) {
LOG.debug("Creating ShardedDataTreeActor on {}", builder.getClusterWrapper().getCurrentMemberName());
clusterWrapper = builder.getClusterWrapper();
distributedConfigDatastore = builder.getDistributedConfigDatastore();
distributedOperDatastore = builder.getDistributedOperDatastore();
- actorContext = distributedConfigDatastore.getActorContext();
+ lookupTaskMaxRetries = builder.getLookupTaskMaxRetries();
+ actorUtils = distributedConfigDatastore.getActorUtils();
resolver = new ShardingServiceAddressResolver(
DistributedShardedDOMDataTree.ACTOR_ID, clusterWrapper.getCurrentMemberName());
clusterWrapper.subscribeToMemberEvents(self());
- cluster = Cluster.get(actorSystem);
-
- replicator = DistributedData.get(context().system()).replicator();
}
@Override
public void preStart() {
- final Subscribe<ORMap<PrefixShardConfiguration>> subscribe =
- new Subscribe<>(ClusterUtils.CONFIGURATION_KEY, self());
- replicator.tell(subscribe, noSender());
}
@Override
- protected void handleRecover(final Object message) throws Exception {
+ protected void handleRecover(final Object message) {
LOG.debug("Received a recover message {}", message);
}
@Override
- protected void handleCommand(final Object message) throws Exception {
- LOG.debug("Received {}", message);
+ protected void handleCommand(final Object message) {
+ LOG.debug("{} : Received {}", clusterWrapper.getCurrentMemberName(), message);
if (message instanceof ClusterEvent.MemberUp) {
memberUp((ClusterEvent.MemberUp) message);
} else if (message instanceof ClusterEvent.MemberWeaklyUp) {
memberUnreachable((ClusterEvent.UnreachableMember) message);
} else if (message instanceof ClusterEvent.ReachableMember) {
memberReachable((ClusterEvent.ReachableMember) message);
- } else if (message instanceof Changed) {
- onConfigChanged((Changed) message);
} else if (message instanceof ProducerCreated) {
onProducerCreated((ProducerCreated) message);
} else if (message instanceof NotifyProducerCreated) {
onNotifyProducerRemoved((NotifyProducerRemoved) message);
} else if (message instanceof PrefixShardCreated) {
onPrefixShardCreated((PrefixShardCreated) message);
- } else if (message instanceof CreatePrefixShard) {
- onCreatePrefixShard((CreatePrefixShard) message);
- } else if (message instanceof RemovePrefixShard) {
- onRemovePrefixShard((RemovePrefixShard) message);
+ } else if (message instanceof LookupPrefixShard) {
+ onLookupPrefixShard((LookupPrefixShard) message);
+ } else if (message instanceof PrefixShardRemovalLookup) {
+ onPrefixShardRemovalLookup((PrefixShardRemovalLookup) message);
} else if (message instanceof PrefixShardRemoved) {
onPrefixShardRemoved((PrefixShardRemoved) message);
+ } else if (message instanceof StartConfigShardLookup) {
+ onStartConfigShardLookup((StartConfigShardLookup) message);
}
}
- private void onConfigChanged(final Changed<ORMap<PrefixShardConfiguration>> change) {
- LOG.debug("member : {}, Received configuration changed: {}", clusterWrapper.getCurrentMemberName(), change);
-
- currentData = change.dataValue();
- final Map<String, PrefixShardConfiguration> changedConfig = change.dataValue().getEntries();
-
- LOG.debug("Changed set {}", changedConfig);
-
- try {
- final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> newConfig =
- changedConfig.values().stream().collect(
- Collectors.toMap(PrefixShardConfiguration::getPrefix, Function.identity()));
- resolveConfig(newConfig);
- } catch (final IllegalStateException e) {
- LOG.error("Failed, ", e);
- }
-
- }
-
- private void resolveConfig(final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> newConfig) {
-
- // get the removed configurations
- final SetView<DOMDataTreeIdentifier> deleted =
- Sets.difference(currentConfiguration.keySet(), newConfig.keySet());
- shardingService.resolveShardRemovals(deleted);
-
- // get the added configurations
- final SetView<DOMDataTreeIdentifier> additions =
- Sets.difference(newConfig.keySet(), currentConfiguration.keySet());
- shardingService.resolveShardAdditions(additions);
- // we can ignore those that existed previously since the potential changes in replicas will be handled by
- // shard manager.
-
- currentConfiguration = new HashMap<>(newConfig);
- }
-
@Override
public String persistenceId() {
return PERSISTENCE_ID;
private void onProducerCreated(final ProducerCreated message) {
LOG.debug("Received ProducerCreated: {}", message);
- // fastpath if no replication is needed, since there is only one node
- if (resolver.getShardingServicePeerActorAddresses().size() == 1) {
- getSender().tell(new Status.Success(null), noSender());
+ // fastpath if we have no peers
+ if (resolver.getShardingServicePeerActorAddresses().isEmpty()) {
+ getSender().tell(new Status.Success(null), ActorRef.noSender());
}
final ActorRef sender = getSender();
final ActorSelection actorSelection = actorSystem.actorSelection(address);
futures.add(
FutureConverters.toJava(
- actorContext.executeOperationAsync(
+ actorUtils.executeOperationAsync(
actorSelection, new NotifyProducerCreated(subtrees), DEFAULT_ASK_TIMEOUT))
.toCompletableFuture());
}
final CompletableFuture<Void> combinedFuture = CompletableFuture.allOf(
futures.toArray(new CompletableFuture[futures.size()]));
- combinedFuture.thenRun(() -> {
- sender.tell(new Status.Success(null), noSender());
- }).exceptionally(throwable -> {
- sender.tell(new Status.Failure(throwable), self());
- return null;
- });
+ combinedFuture
+ .thenRun(() -> sender.tell(new Success(null), ActorRef.noSender()))
+ .exceptionally(throwable -> {
+ sender.tell(new Status.Failure(throwable), self());
+ return null;
+ });
}
private void onNotifyProducerCreated(final NotifyProducerCreated message) {
final ActorSelection selection = actorSystem.actorSelection(address);
futures.add(FutureConverters.toJava(
- actorContext.executeOperationAsync(selection, new NotifyProducerRemoved(message.getSubtrees())))
+ actorUtils.executeOperationAsync(selection, new NotifyProducerRemoved(message.getSubtrees())))
.toCompletableFuture());
}
final ActorProducerRegistration registration = idToProducer.remove(message.getSubtrees().iterator().next());
if (registration == null) {
LOG.warn("The notification contained a path on which no producer is registered, throwing away");
- getSender().tell(new Status.Success(null), noSender());
+ getSender().tell(new Status.Success(null), ActorRef.noSender());
return;
}
try {
registration.close();
- getSender().tell(new Status.Success(null), noSender());
+ getSender().tell(new Status.Success(null), ActorRef.noSender());
} catch (final DOMDataTreeProducerException e) {
LOG.error("Unable to close producer", e);
- getSender().tell(new Status.Failure(e), noSender());
+ getSender().tell(new Status.Failure(e), ActorRef.noSender());
}
}
@SuppressWarnings("checkstyle:IllegalCatch")
- private void onCreatePrefixShard(final CreatePrefixShard message) {
- LOG.debug("Member: {}, Received CreatePrefixShard: {}", clusterWrapper.getCurrentMemberName(), message);
+ private void onLookupPrefixShard(final LookupPrefixShard message) {
+ LOG.debug("Member: {}, Received LookupPrefixShard: {}", clusterWrapper.getCurrentMemberName(), message);
- final PrefixShardConfiguration configuration = message.getConfiguration();
+ final DOMDataTreeIdentifier prefix = message.getPrefix();
- final Update<ORMap<PrefixShardConfiguration>> update =
- new Update<>(ClusterUtils.CONFIGURATION_KEY, currentData, Replicator.writeLocal(),
- map -> map.put(cluster, configuration.toDataMapKey(), configuration));
+ final ActorUtils utils = prefix.getDatastoreType() == LogicalDatastoreType.CONFIGURATION
+ ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
- replicator.tell(update, self());
+ // schedule a notification task for the reply
+ actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL,
+ new ShardCreationLookupTask(actorSystem, getSender(), clusterWrapper,
+ utils, shardingService, prefix, lookupTaskMaxRetries), actorSystem.dispatcher());
}
private void onPrefixShardCreated(final PrefixShardCreated message) {
LOG.debug("Member: {}, Received PrefixShardCreated: {}", clusterWrapper.getCurrentMemberName(), message);
- final Collection<String> addresses = resolver.getShardingServicePeerActorAddresses();
- final ActorRef sender = getSender();
-
- final List<CompletableFuture<Object>> futures = new ArrayList<>();
-
- for (final String address : addresses) {
- final ActorSelection actorSelection = actorSystem.actorSelection(address);
- futures.add(FutureConverters.toJava(actorContext.executeOperationAsync(actorSelection,
- new CreatePrefixShard(message.getConfiguration()))).toCompletableFuture());
- }
+ final PrefixShardConfiguration config = message.getConfiguration();
- final CompletableFuture<Void> combinedFuture =
- CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
-
- combinedFuture.thenRun(() -> {
- sender.tell(new Status.Success(null), self());
- }).exceptionally(throwable -> {
- sender.tell(new Status.Failure(throwable), self());
- return null;
- });
+ shardingService.resolveShardAdditions(Collections.singleton(config.getPrefix()));
}
- private void onRemovePrefixShard(final RemovePrefixShard message) {
- LOG.debug("Member: {}, Received RemovePrefixShard: {}", clusterWrapper.getCurrentMemberName(), message);
+ private void onPrefixShardRemovalLookup(final PrefixShardRemovalLookup message) {
+ LOG.debug("Member: {}, Received PrefixShardRemovalLookup: {}", clusterWrapper.getCurrentMemberName(), message);
+
+ final ShardRemovalLookupTask removalTask =
+ new ShardRemovalLookupTask(actorSystem, getSender(),
+ actorUtils, message.getPrefix(), lookupTaskMaxRetries);
- //TODO the removal message should have the configuration or some other way to get to the key
- final Update<ORMap<PrefixShardConfiguration>> removal =
- new Update<>(ClusterUtils.CONFIGURATION_KEY, currentData, Replicator.writeLocal(),
- map -> map.remove(cluster, "prefix=" + message.getPrefix()));
- replicator.tell(removal, self());
+ actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL, removalTask, actorSystem.dispatcher());
}
private void onPrefixShardRemoved(final PrefixShardRemoved message) {
LOG.debug("Received PrefixShardRemoved: {}", message);
- final ShardFrontendRegistration registration = idToShardRegistration.get(message.getPrefix());
+ shardingService.resolveShardRemovals(Collections.singleton(message.getPrefix()));
+ }
- if (registration == null) {
- LOG.warn("Received shard removed for {}, but not shard registered at this prefix all registrations: {}",
- message.getPrefix(), idToShardRegistration);
- return;
- }
+ private void onStartConfigShardLookup(final StartConfigShardLookup message) {
+ LOG.debug("Received StartConfigShardLookup: {}", message);
+
+ final ActorUtils context =
+ message.getType().equals(LogicalDatastoreType.CONFIGURATION)
+ ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
- registration.close();
+ // schedule a notification task for the reply
+ actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL,
+ new ConfigShardLookupTask(
+ actorSystem, getSender(), context, message, lookupTaskMaxRetries),
+ actorSystem.dispatcher());
}
private static MemberName memberToName(final Member member) {
}
}
+ /**
+ * Handles the lookup step of cds shard creation once the configuration is updated.
+ */
+ private static class ShardCreationLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ClusterWrapper clusterWrapper;
+ private final ActorUtils context;
+ private final DistributedShardedDOMDataTree shardingService;
+ private final DOMDataTreeIdentifier toLookup;
+ private final int lookupMaxRetries;
+
+ ShardCreationLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ClusterWrapper clusterWrapper,
+ final ActorUtils context,
+ final DistributedShardedDOMDataTree shardingService,
+ final DOMDataTreeIdentifier toLookup,
+ final int lookupMaxRetries) {
+ super(replyTo, lookupMaxRetries);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.clusterWrapper = clusterWrapper;
+ this.context = context;
+ this.shardingService = shardingService;
+ this.toLookup = toLookup;
+ this.lookupMaxRetries = lookupMaxRetries;
+ }
+
+ @Override
+ public void run() {
+ final Future<ActorRef> localShardFuture =
+ context.findLocalShardAsync(ClusterUtils.getCleanShardName(toLookup.getRootIdentifier()));
+
+ localShardFuture.onComplete(new OnComplete<ActorRef>() {
+ @Override
+ public void onComplete(final Throwable throwable, final ActorRef actorRef) {
+ if (throwable != null) {
+ tryReschedule(throwable);
+ } else {
+ LOG.debug("Local backend for shard[{}] lookup successful, starting leader lookup..", toLookup);
+
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL,
+ new ShardLeaderLookupTask(system, replyTo, context, clusterWrapper, actorRef,
+ shardingService, toLookup, lookupMaxRetries),
+ system.dispatcher());
+ }
+ }
+ }, system.dispatcher());
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("Local backend for shard[{}] not found, try: {}, rescheduling..", toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ShardCreationLookupTask.this, system.dispatcher());
+ }
+ }
+
+ /**
+ * Handles the readiness step by waiting for a leader of the created shard.
+ */
+ private static class ShardLeaderLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ActorUtils context;
+ private final ClusterWrapper clusterWrapper;
+ private final ActorRef shard;
+ private final DistributedShardedDOMDataTree shardingService;
+ private final DOMDataTreeIdentifier toLookup;
+ private final int lookupMaxRetries;
+
+ ShardLeaderLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ActorUtils context,
+ final ClusterWrapper clusterWrapper,
+ final ActorRef shard,
+ final DistributedShardedDOMDataTree shardingService,
+ final DOMDataTreeIdentifier toLookup,
+ final int lookupMaxRetries) {
+ super(replyTo, lookupMaxRetries);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.context = context;
+ this.clusterWrapper = clusterWrapper;
+ this.shard = shard;
+ this.shardingService = shardingService;
+ this.toLookup = toLookup;
+ this.lookupMaxRetries = lookupMaxRetries;
+ }
+
+ @Override
+ public void run() {
+
+ final Future<Object> ask = Patterns.ask(shard, FindLeader.INSTANCE, context.getOperationTimeout());
+
+ ask.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(final Throwable throwable, final Object findLeaderReply) {
+ if (throwable != null) {
+ tryReschedule(throwable);
+ } else {
+ final FindLeaderReply findLeader = (FindLeaderReply) findLeaderReply;
+ final Optional<String> leaderActor = findLeader.getLeaderActor();
+ if (leaderActor.isPresent()) {
+ // leader is found, backend seems ready, check if the frontend is ready
+ LOG.debug("{} - Leader for shard[{}] backend ready, starting frontend lookup..",
+ clusterWrapper.getCurrentMemberName(), toLookup);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL,
+ new FrontendLookupTask(
+ system, replyTo, shardingService, toLookup, lookupMaxRetries),
+ system.dispatcher());
+ } else {
+ tryReschedule(null);
+ }
+ }
+ }
+ }, system.dispatcher());
+
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("{} - Leader for shard[{}] backend not found on try: {}, retrying..",
+ clusterWrapper.getCurrentMemberName(), toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ShardLeaderLookupTask.this, system.dispatcher());
+ }
+ }
+
+ /**
+ * After backend is ready this handles the last step - checking if we have a frontend shard for the backend,
+ * once this completes(which should be ready by the time the backend is created, this is just a sanity check in
+ * case they race), the future for the cds shard creation is completed and the shard is ready for use.
+ */
+ private static final class FrontendLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final DistributedShardedDOMDataTree shardingService;
+ private final DOMDataTreeIdentifier toLookup;
+
+ FrontendLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final DistributedShardedDOMDataTree shardingService,
+ final DOMDataTreeIdentifier toLookup,
+ final int lookupMaxRetries) {
+ super(replyTo, lookupMaxRetries);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.shardingService = shardingService;
+ this.toLookup = toLookup;
+ }
+
+ @Override
+ public void run() {
+ final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> entry =
+ shardingService.lookupShardFrontend(toLookup);
+
+ if (entry != null && tableEntryIdCheck(entry, toLookup) && entry.getValue() != null) {
+ replyTo.tell(new Success(null), ActorRef.noSender());
+ } else {
+ tryReschedule(null);
+ }
+ }
+
+ private boolean tableEntryIdCheck(final DOMDataTreePrefixTableEntry<?> entry,
+ final DOMDataTreeIdentifier prefix) {
+ if (entry == null) {
+ return false;
+ }
+
+ if (YangInstanceIdentifier.empty().equals(prefix.getRootIdentifier())) {
+ return true;
+ }
+
+ if (entry.getIdentifier().equals(toLookup.getRootIdentifier().getLastPathArgument())) {
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("Frontend for shard[{}] not found on try: {}, retrying..", toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, FrontendLookupTask.this, system.dispatcher());
+ }
+ }
+
+ /**
+ * Task that is run once a cds shard registration is closed and completes once the backend shard is removed from the
+ * configuration.
+ */
+ private static class ShardRemovalLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ActorUtils context;
+ private final DOMDataTreeIdentifier toLookup;
+
+ ShardRemovalLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ActorUtils context,
+ final DOMDataTreeIdentifier toLookup,
+ final int lookupMaxRetries) {
+ super(replyTo, lookupMaxRetries);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.context = context;
+ this.toLookup = toLookup;
+ }
+
+ @Override
+ public void run() {
+ final Future<ActorRef> localShardFuture =
+ context.findLocalShardAsync(ClusterUtils.getCleanShardName(toLookup.getRootIdentifier()));
+
+ localShardFuture.onComplete(new OnComplete<ActorRef>() {
+ @Override
+ public void onComplete(final Throwable throwable, final ActorRef actorRef) {
+ if (throwable != null) {
+ //TODO Shouldn't we check why findLocalShard failed?
+ LOG.debug("Backend shard[{}] removal lookup successful notifying the registration future",
+ toLookup);
+ replyTo.tell(new Success(null), ActorRef.noSender());
+ } else {
+ tryReschedule(null);
+ }
+ }
+ }, system.dispatcher());
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("Backend shard[{}] removal lookup failed, shard is still present, try: {}, rescheduling..",
+ toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ShardRemovalLookupTask.this, system.dispatcher());
+ }
+ }
+
+ /**
+ * Task for handling the lookup of the backend for the configuration shard.
+ */
+ private static class ConfigShardLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ActorUtils context;
+
+ ConfigShardLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ActorUtils context,
+ final StartConfigShardLookup message,
+ final int lookupMaxRetries) {
+ super(replyTo, lookupMaxRetries);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.context = context;
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("Local backend for prefix configuration shard not found, try: {}, rescheduling..", retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ConfigShardLookupTask.this, system.dispatcher());
+ }
+
+ @Override
+ public void run() {
+ final Optional<ActorRef> localShard =
+ context.findLocalShard(ClusterUtils.PREFIX_CONFIG_SHARD_ID);
+
+ if (!localShard.isPresent()) {
+ tryReschedule(null);
+ } else {
+ LOG.debug("Local backend for prefix configuration shard lookup successful");
+ replyTo.tell(new Status.Success(null), ActorRef.noSender());
+ }
+ }
+ }
+
+ /**
+ * Task for handling the readiness state of the config shard. Reports success once the leader is elected.
+ */
+ private static class ConfigShardReadinessTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ActorUtils context;
+ private final ClusterWrapper clusterWrapper;
+ private final ActorRef shard;
+
+ ConfigShardReadinessTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ActorUtils context,
+ final ClusterWrapper clusterWrapper,
+ final ActorRef shard,
+ final int lookupMaxRetries) {
+ super(replyTo, lookupMaxRetries);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.context = context;
+ this.clusterWrapper = clusterWrapper;
+ this.shard = shard;
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("{} - Leader for config shard not found on try: {}, retrying..",
+ clusterWrapper.getCurrentMemberName(), retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ConfigShardReadinessTask.this, system.dispatcher());
+ }
+
+ @Override
+ public void run() {
+ final Future<Object> ask = Patterns.ask(shard, FindLeader.INSTANCE, context.getOperationTimeout());
+
+ ask.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(final Throwable throwable, final Object findLeaderReply) {
+ if (throwable != null) {
+ tryReschedule(throwable);
+ } else {
+ final FindLeaderReply findLeader = (FindLeaderReply) findLeaderReply;
+ final Optional<String> leaderActor = findLeader.getLeaderActor();
+ if (leaderActor.isPresent()) {
+ // leader is found, backend seems ready, check if the frontend is ready
+ LOG.debug("{} - Leader for config shard is ready. Ending lookup.",
+ clusterWrapper.getCurrentMemberName());
+ replyTo.tell(new Status.Success(null), ActorRef.noSender());
+ } else {
+ tryReschedule(null);
+ }
+ }
+ }
+ }, system.dispatcher());
+ }
+ }
+
public static class ShardedDataTreeActorCreator {
private DistributedShardedDOMDataTree shardingService;
- private DistributedDataStore distributedConfigDatastore;
- private DistributedDataStore distributedOperDatastore;
+ private AbstractDataStore distributedConfigDatastore;
+ private AbstractDataStore distributedOperDatastore;
private ActorSystem actorSystem;
private ClusterWrapper cluster;
+ private int maxRetries;
public DistributedShardedDOMDataTree getShardingService() {
return shardingService;
return this;
}
- public ShardedDataTreeActorCreator setClusterWrapper(final ClusterWrapper cluster) {
- this.cluster = cluster;
+ public ShardedDataTreeActorCreator setClusterWrapper(final ClusterWrapper clusterWrapper) {
+ this.cluster = clusterWrapper;
return this;
}
return cluster;
}
- public DistributedDataStore getDistributedConfigDatastore() {
+ public AbstractDataStore getDistributedConfigDatastore() {
return distributedConfigDatastore;
}
public ShardedDataTreeActorCreator setDistributedConfigDatastore(
- final DistributedDataStore distributedConfigDatastore) {
+ final AbstractDataStore distributedConfigDatastore) {
this.distributedConfigDatastore = distributedConfigDatastore;
return this;
}
- public DistributedDataStore getDistributedOperDatastore() {
+ public AbstractDataStore getDistributedOperDatastore() {
return distributedOperDatastore;
}
public ShardedDataTreeActorCreator setDistributedOperDatastore(
- final DistributedDataStore distributedOperDatastore) {
+ final AbstractDataStore distributedOperDatastore) {
this.distributedOperDatastore = distributedOperDatastore;
return this;
}
+ public ShardedDataTreeActorCreator setLookupTaskMaxRetries(final int newMaxRetries) {
+ this.maxRetries = newMaxRetries;
+ return this;
+ }
+
+ public int getLookupTaskMaxRetries() {
+ return maxRetries;
+ }
+
private void verify() {
- Preconditions.checkNotNull(shardingService);
- Preconditions.checkNotNull(actorSystem);
- Preconditions.checkNotNull(cluster);
- Preconditions.checkNotNull(distributedConfigDatastore);
- Preconditions.checkNotNull(distributedOperDatastore);
+ requireNonNull(shardingService);
+ requireNonNull(actorSystem);
+ requireNonNull(cluster);
+ requireNonNull(distributedConfigDatastore);
+ requireNonNull(distributedOperDatastore);
}
public Props props() {
verify();
return Props.create(ShardedDataTreeActor.class, this);
}
-
}
}