import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.actor.Status;
+import akka.actor.Status.Failure;
+import akka.actor.Status.Success;
+import akka.cluster.Cluster;
import akka.cluster.ClusterEvent;
import akka.cluster.ClusterEvent.MemberExited;
import akka.cluster.ClusterEvent.MemberRemoved;
import akka.cluster.ClusterEvent.ReachableMember;
import akka.cluster.ClusterEvent.UnreachableMember;
import akka.cluster.Member;
+import akka.cluster.ddata.DistributedData;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.Replicator;
+import akka.cluster.ddata.Replicator.Changed;
+import akka.cluster.ddata.Replicator.Subscribe;
+import akka.cluster.ddata.Replicator.Update;
+import akka.dispatch.OnComplete;
+import akka.pattern.Patterns;
import akka.util.Timeout;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
+import com.google.common.collect.Sets;
+import com.google.common.collect.Sets.SetView;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
import org.opendaylight.controller.cluster.sharding.messages.CreatePrefixShard;
import org.opendaylight.controller.cluster.sharding.messages.NotifyProducerCreated;
import org.opendaylight.controller.cluster.sharding.messages.NotifyProducerRemoved;
import org.opendaylight.controller.cluster.sharding.messages.ProducerCreated;
import org.opendaylight.controller.cluster.sharding.messages.ProducerRemoved;
import org.opendaylight.controller.cluster.sharding.messages.RemovePrefixShard;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingService;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
+import org.opendaylight.mdsal.dom.broker.DOMDataTreeShardRegistration;
+import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTableEntry;
import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import scala.compat.java8.FutureConverters;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
/**
* Actor that tracks currently open producers/shards on remote nodes and handles notifications of remote
*/
public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardedDataTreeActor.class);
+
private static final String PERSISTENCE_ID = "sharding-service-actor";
private static final Timeout DEFAULT_ASK_TIMEOUT = new Timeout(15, TimeUnit.SECONDS);
- private final DOMDataTreeService dataTreeService;
- private final DOMDataTreeShardingService shardingService;
+ static final FiniteDuration SHARD_LOOKUP_TASK_INTERVAL = new FiniteDuration(1L, TimeUnit.SECONDS);
+ static final int LOOKUP_TASK_MAX_RETRIES = 100;
+
+ private final DistributedShardedDOMDataTree shardingService;
private final ActorSystem actorSystem;
- private final ClusterWrapper cluster;
+ private final ClusterWrapper clusterWrapper;
// helper actorContext used only for static calls to executeAsync etc
// for calls that need specific actor context tied to a datastore use the one provided in the DistributedDataStore
private final ActorContext actorContext;
private final Map<DOMDataTreeIdentifier, ActorProducerRegistration> idToProducer = new HashMap<>();
private final Map<DOMDataTreeIdentifier, ShardFrontendRegistration> idToShardRegistration = new HashMap<>();
+ private final Cluster cluster;
+ private final ActorRef replicator;
+
+ private ORMap<PrefixShardConfiguration> currentData = ORMap.create();
+ private Map<DOMDataTreeIdentifier, PrefixShardConfiguration> currentConfiguration = new HashMap<>();
+
ShardedDataTreeActor(final ShardedDataTreeActorCreator builder) {
LOG.debug("Creating ShardedDataTreeActor on {}", builder.getClusterWrapper().getCurrentMemberName());
- dataTreeService = builder.getDataTreeService();
shardingService = builder.getShardingService();
actorSystem = builder.getActorSystem();
- cluster = builder.getClusterWrapper();
+ clusterWrapper = builder.getClusterWrapper();
distributedConfigDatastore = builder.getDistributedConfigDatastore();
distributedOperDatastore = builder.getDistributedOperDatastore();
actorContext = distributedConfigDatastore.getActorContext();
resolver = new ShardingServiceAddressResolver(
- DistributedShardedDOMDataTree.ACTOR_ID, cluster.getCurrentMemberName());
+ DistributedShardedDOMDataTree.ACTOR_ID, clusterWrapper.getCurrentMemberName());
- cluster.subscribeToMemberEvents(self());
+ clusterWrapper.subscribeToMemberEvents(self());
+ cluster = Cluster.get(actorSystem);
+
+ replicator = DistributedData.get(context().system()).replicator();
+ }
+
+ @Override
+ public void preStart() {
+ final Subscribe<ORMap<PrefixShardConfiguration>> subscribe =
+ new Subscribe<>(ClusterUtils.CONFIGURATION_KEY, self());
+ replicator.tell(subscribe, noSender());
}
@Override
@Override
protected void handleCommand(final Object message) throws Exception {
+ LOG.debug("Received {}", message);
if (message instanceof ClusterEvent.MemberUp) {
memberUp((ClusterEvent.MemberUp) message);
} else if (message instanceof ClusterEvent.MemberWeaklyUp) {
memberUnreachable((ClusterEvent.UnreachableMember) message);
} else if (message instanceof ClusterEvent.ReachableMember) {
memberReachable((ClusterEvent.ReachableMember) message);
+ } else if (message instanceof Changed) {
+ onConfigChanged((Changed) message);
} else if (message instanceof ProducerCreated) {
onProducerCreated((ProducerCreated) message);
} else if (message instanceof NotifyProducerCreated) {
}
}
+ private void onConfigChanged(final Changed<ORMap<PrefixShardConfiguration>> change) {
+ LOG.debug("member : {}, Received configuration changed: {}", clusterWrapper.getCurrentMemberName(), change);
+
+ currentData = change.dataValue();
+ final Map<String, PrefixShardConfiguration> changedConfig = change.dataValue().getEntries();
+
+ LOG.debug("Changed set {}", changedConfig);
+
+ try {
+ final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> newConfig =
+ changedConfig.values().stream().collect(
+ Collectors.toMap(PrefixShardConfiguration::getPrefix, Function.identity()));
+ resolveConfig(newConfig);
+ } catch (final IllegalStateException e) {
+ LOG.error("Failed, ", e);
+ }
+
+ }
+
+ private void resolveConfig(final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> newConfig) {
+
+ // get the removed configurations
+ final SetView<DOMDataTreeIdentifier> deleted =
+ Sets.difference(currentConfiguration.keySet(), newConfig.keySet());
+ shardingService.resolveShardRemovals(deleted);
+
+ // get the added configurations
+ final SetView<DOMDataTreeIdentifier> additions =
+ Sets.difference(newConfig.keySet(), currentConfiguration.keySet());
+ shardingService.resolveShardAdditions(additions);
+ // we can ignore those that existed previously since the potential changes in replicas will be handled by
+ // shard manager.
+
+ currentConfiguration = new HashMap<>(newConfig);
+ }
+
@Override
public String persistenceId() {
return PERSISTENCE_ID;
private void onProducerCreated(final ProducerCreated message) {
LOG.debug("Received ProducerCreated: {}", message);
+
+ // fastpath if no replication is needed, since there is only one node
+ if (resolver.getShardingServicePeerActorAddresses().size() == 1) {
+ getSender().tell(new Status.Success(null), noSender());
+ }
+
final ActorRef sender = getSender();
final Collection<DOMDataTreeIdentifier> subtrees = message.getSubtrees();
futures.toArray(new CompletableFuture[futures.size()]));
combinedFuture.thenRun(() -> {
- for (final CompletableFuture<Object> future : futures) {
- try {
- final Object result = future.get();
- if (result instanceof Status.Failure) {
- sender.tell(result, self());
- return;
- }
- } catch (InterruptedException | ExecutionException e) {
- sender.tell(new Status.Failure(e), self());
- return;
- }
- }
sender.tell(new Status.Success(null), noSender());
}).exceptionally(throwable -> {
sender.tell(new Status.Failure(throwable), self());
try {
final ActorProducerRegistration registration =
- new ActorProducerRegistration(dataTreeService.createProducer(subtrees), subtrees);
+ new ActorProducerRegistration(shardingService.localCreateProducer(subtrees), subtrees);
subtrees.forEach(id -> idToProducer.put(id, registration));
sender().tell(new Status.Success(null), self());
} catch (final IllegalArgumentException e) {
@SuppressWarnings("checkstyle:IllegalCatch")
private void onCreatePrefixShard(final CreatePrefixShard message) {
- LOG.debug("Received CreatePrefixShard: {}", message);
+ LOG.debug("Member: {}, Received CreatePrefixShard: {}", clusterWrapper.getCurrentMemberName(), message);
final PrefixShardConfiguration configuration = message.getConfiguration();
- final DOMDataTreeProducer producer =
- dataTreeService.createProducer(Collections.singleton(configuration.getPrefix()));
+ final Update<ORMap<PrefixShardConfiguration>> update =
+ new Update<>(ClusterUtils.CONFIGURATION_KEY, currentData, Replicator.writeLocal(),
+ map -> map.put(cluster, configuration.toDataMapKey(), configuration));
- final DistributedDataStore distributedDataStore =
- configuration.getPrefix().getDatastoreType() == LogicalDatastoreType.CONFIGURATION
- ? distributedConfigDatastore : distributedOperDatastore;
- final String shardName = ClusterUtils.getCleanShardName(configuration.getPrefix().getRootIdentifier());
- LOG.debug("Creating distributed datastore client for shard {}", shardName);
- final Props distributedDataStoreClientProps =
- SimpleDataStoreClientActor.props(cluster.getCurrentMemberName(),
- "Shard-" + shardName, distributedDataStore.getActorContext(), shardName);
+ replicator.tell(update, self());
- final ActorRef clientActor = actorSystem.actorOf(distributedDataStoreClientProps);
- final DataStoreClient client;
- try {
- client = SimpleDataStoreClientActor.getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
- } catch (final Exception e) {
- LOG.error("Failed to get actor for {}", distributedDataStoreClientProps, e);
- clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
- throw Throwables.propagate(e);
- }
-
- try {
- final ListenerRegistration<ShardFrontend> shardFrontendRegistration =
- shardingService.registerDataTreeShard(configuration.getPrefix(),
- new ShardFrontend(
- client,
- configuration.getPrefix()
- ),
- producer);
- idToShardRegistration.put(configuration.getPrefix(),
- new ShardFrontendRegistration(clientActor, shardFrontendRegistration));
-
- sender().tell(new Status.Success(null), self());
- } catch (final DOMDataTreeShardingConflictException e) {
- LOG.error("Unable to create shard", e);
- clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
- sender().tell(new Status.Failure(e), self());
- } finally {
- try {
- producer.close();
- } catch (final DOMDataTreeProducerException e) {
- LOG.error("Unable to close producer that was used for shard registration {}", producer, e);
- }
- }
+ // schedule a notification task for the reply
+ actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL,
+ new ShardCreationLookupTask(actorSystem, getSender(), clusterWrapper,
+ actorContext, shardingService, configuration.getPrefix()),
+ actorSystem.dispatcher());
}
private void onPrefixShardCreated(final PrefixShardCreated message) {
- LOG.debug("Received PrefixShardCreated: {}", message);
+ LOG.debug("Member: {}, Received PrefixShardCreated: {}", clusterWrapper.getCurrentMemberName(), message);
final Collection<String> addresses = resolver.getShardingServicePeerActorAddresses();
final ActorRef sender = getSender();
CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
combinedFuture.thenRun(() -> {
- for (final CompletableFuture<Object> future : futures) {
- try {
- final Object result = future.get();
- if (result instanceof Status.Failure) {
- sender.tell(result, self());
- return;
- }
- } catch (InterruptedException | ExecutionException e) {
- sender.tell(new Status.Failure(e), self());
- return;
- }
- }
sender.tell(new Status.Success(null), self());
}).exceptionally(throwable -> {
sender.tell(new Status.Failure(throwable), self());
}
private void onRemovePrefixShard(final RemovePrefixShard message) {
- LOG.debug("Received RemovePrefixShard: {}", message);
+ LOG.debug("Member: {}, Received RemovePrefixShard: {}", clusterWrapper.getCurrentMemberName(), message);
- for (final String address : resolver.getShardingServicePeerActorAddresses()) {
- final ActorSelection selection = actorContext.actorSelection(address);
- selection.tell(new PrefixShardRemoved(message.getPrefix()), getSelf());
- }
+ //TODO the removal message should have the configuration or some other way to get to the key
+ final Update<ORMap<PrefixShardConfiguration>> removal =
+ new Update<>(ClusterUtils.CONFIGURATION_KEY, currentData, Replicator.writeLocal(),
+ map -> map.remove(cluster, "prefix=" + message.getPrefix()));
+ replicator.tell(removal, self());
+
+ final ShardRemovalLookupTask removalTask =
+ new ShardRemovalLookupTask(actorSystem, getSender(),
+ actorContext, message.getPrefix());
+
+ actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL, removalTask, actorSystem.dispatcher());
}
private void onPrefixShardRemoved(final PrefixShardRemoved message) {
}
private static class ShardFrontendRegistration extends
- AbstractObjectRegistration<ListenerRegistration<ShardFrontend>> {
+ AbstractObjectRegistration<ListenerRegistration<DistributedShardFrontend>> {
private final ActorRef clientActor;
- private final ListenerRegistration<ShardFrontend> shardRegistration;
+ private final ListenerRegistration<DistributedShardFrontend> shardRegistration;
ShardFrontendRegistration(final ActorRef clientActor,
- final ListenerRegistration<ShardFrontend> shardRegistration) {
+ final ListenerRegistration<DistributedShardFrontend> shardRegistration) {
super(shardRegistration);
this.clientActor = clientActor;
this.shardRegistration = shardRegistration;
}
}
+ private abstract static class LookupTask implements Runnable {
+
+ private final ActorRef replyTo;
+ private int retries = 0;
+
+ private LookupTask(final ActorRef replyTo) {
+ this.replyTo = replyTo;
+ }
+
+ abstract void reschedule(int retries);
+
+ void tryReschedule(@Nullable final Throwable throwable) {
+ if (retries <= LOOKUP_TASK_MAX_RETRIES) {
+ retries++;
+ reschedule(retries);
+ } else {
+ fail(throwable);
+ }
+ }
+
+ void fail(@Nullable final Throwable throwable) {
+ if (throwable == null) {
+ replyTo.tell(new Failure(
+ new DOMDataTreeShardCreationFailedException("Unable to find the backend shard."
+ + "Failing..")), noSender());
+ } else {
+ replyTo.tell(new Failure(
+ new DOMDataTreeShardCreationFailedException("Unable to find the backend shard."
+ + "Failing..", throwable)), noSender());
+ }
+ }
+ }
+
+ /**
+ * Handles the lookup step of cds shard creation once the configuration is updated.
+ */
+ private static class ShardCreationLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ClusterWrapper clusterWrapper;
+ private final ActorContext context;
+ private final DistributedShardedDOMDataTree shardingService;
+ private final DOMDataTreeIdentifier toLookup;
+
+ ShardCreationLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ClusterWrapper clusterWrapper,
+ final ActorContext context,
+ final DistributedShardedDOMDataTree shardingService,
+ final DOMDataTreeIdentifier toLookup) {
+ super(replyTo);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.clusterWrapper = clusterWrapper;
+ this.context = context;
+ this.shardingService = shardingService;
+ this.toLookup = toLookup;
+ }
+
+ @Override
+ public void run() {
+ final Future<ActorRef> localShardFuture =
+ context.findLocalShardAsync(ClusterUtils.getCleanShardName(toLookup.getRootIdentifier()));
+
+ localShardFuture.onComplete(new OnComplete<ActorRef>() {
+ @Override
+ public void onComplete(Throwable throwable, ActorRef actorRef) throws Throwable {
+ if (throwable != null) {
+ tryReschedule(throwable);
+ } else {
+ LOG.debug("Local backend for shard[{}] lookup successful, starting leader lookup..", toLookup);
+
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL,
+ new ShardLeaderLookupTask(system, replyTo, context, clusterWrapper, actorRef,
+ shardingService, toLookup),
+ system.dispatcher());
+ }
+ }
+ }, system.dispatcher());
+ }
+
+ @Override
+ void reschedule(int retries) {
+ LOG.debug("Local backend for shard[{}] not found, try: {}, rescheduling..", toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ShardCreationLookupTask.this, system.dispatcher());
+ }
+ }
+
+ /**
+ * Handles the readiness step by waiting for a leader of the created shard.
+ */
+ private static class ShardLeaderLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ActorContext context;
+ private final ClusterWrapper clusterWrapper;
+ private final ActorRef shard;
+ private final DistributedShardedDOMDataTree shardingService;
+ private final DOMDataTreeIdentifier toLookup;
+
+ ShardLeaderLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ActorContext context,
+ final ClusterWrapper clusterWrapper,
+ final ActorRef shard,
+ final DistributedShardedDOMDataTree shardingService,
+ final DOMDataTreeIdentifier toLookup) {
+ super(replyTo);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.context = context;
+ this.clusterWrapper = clusterWrapper;
+ this.shard = shard;
+ this.shardingService = shardingService;
+ this.toLookup = toLookup;
+ }
+
+ @Override
+ public void run() {
+
+ final Future<Object> ask = Patterns.ask(shard, FindLeader.INSTANCE, context.getOperationTimeout());
+
+ ask.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(final Throwable throwable, final Object findLeaderReply) throws Throwable {
+ if (throwable != null) {
+ tryReschedule(throwable);
+ } else {
+ final FindLeaderReply findLeader = (FindLeaderReply) findLeaderReply;
+ final java.util.Optional<String> leaderActor = findLeader.getLeaderActor();
+ if (leaderActor.isPresent()) {
+ // leader is found, backend seems ready, check if the frontend is ready
+ LOG.debug("{} - Leader for shard[{}] backend ready, starting frontend lookup..",
+ clusterWrapper.getCurrentMemberName(), toLookup);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL,
+ new FrontendLookupTask(system, replyTo, shardingService, toLookup),
+ system.dispatcher());
+ } else {
+ tryReschedule(null);
+ }
+ }
+ }
+ }, system.dispatcher());
+
+ }
+
+ @Override
+ void reschedule(int retries) {
+ LOG.debug("{} - Leader for shard[{}] backend not found on try: {}, retrying..",
+ clusterWrapper.getCurrentMemberName(), toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ShardLeaderLookupTask.this, system.dispatcher());
+ }
+ }
+
+ /**
+ * After backend is ready this handles the last step - checking if we have a frontend shard for the backend,
+ * once this completes(which should be ready by the time the backend is created, this is just a sanity check in
+ * case they race), the future for the cds shard creation is completed and the shard is ready for use.
+ */
+ private static final class FrontendLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final DistributedShardedDOMDataTree shardingService;
+ private final DOMDataTreeIdentifier toLookup;
+
+ FrontendLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final DistributedShardedDOMDataTree shardingService,
+ final DOMDataTreeIdentifier toLookup) {
+ super(replyTo);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.shardingService = shardingService;
+ this.toLookup = toLookup;
+ }
+
+ @Override
+ public void run() {
+ final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> entry =
+ shardingService.lookupShardFrontend(toLookup);
+
+ if (entry != null && tableEntryIdCheck(entry, toLookup) && entry.getValue() != null) {
+ replyTo.tell(new Success(null), noSender());
+ } else {
+ tryReschedule(null);
+ }
+ }
+
+ private boolean tableEntryIdCheck(final DOMDataTreePrefixTableEntry<?> entry,
+ final DOMDataTreeIdentifier prefix) {
+ if (entry == null) {
+ return false;
+ }
+
+ if (YangInstanceIdentifier.EMPTY.equals(prefix.getRootIdentifier())) {
+ return true;
+ }
+
+ if (entry.getIdentifier().equals(toLookup.getRootIdentifier().getLastPathArgument())) {
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ void reschedule(int retries) {
+ LOG.debug("Frontend for shard[{}] not found on try: {}, retrying..", toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, FrontendLookupTask.this, system.dispatcher());
+ }
+ }
+
+ /**
+ * Task that is run once a cds shard registration is closed and completes once the backend shard is removed from the
+ * configuration.
+ */
+ private static class ShardRemovalLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ActorContext context;
+ private final DOMDataTreeIdentifier toLookup;
+
+ ShardRemovalLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ActorContext context,
+ final DOMDataTreeIdentifier toLookup) {
+ super(replyTo);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.context = context;
+ this.toLookup = toLookup;
+ }
+
+ @Override
+ public void run() {
+ final Future<ActorRef> localShardFuture =
+ context.findLocalShardAsync(ClusterUtils.getCleanShardName(toLookup.getRootIdentifier()));
+
+ localShardFuture.onComplete(new OnComplete<ActorRef>() {
+ @Override
+ public void onComplete(Throwable throwable, ActorRef actorRef) throws Throwable {
+ if (throwable != null) {
+ //TODO Shouldn't we check why findLocalShard failed?
+ LOG.debug("Backend shard[{}] removal lookup successful notifying the registration future",
+ toLookup);
+ replyTo.tell(new Success(null), noSender());
+ } else {
+ tryReschedule(null);
+ }
+ }
+ }, system.dispatcher());
+ }
+
+ @Override
+ void reschedule(int retries) {
+ LOG.debug("Backend shard[{}] removal lookup failed, shard is still present, try: {}, rescheduling..",
+ toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ShardRemovalLookupTask.this, system.dispatcher());
+ }
+ }
+
public static class ShardedDataTreeActorCreator {
- private DOMDataTreeService dataTreeService;
- private DOMDataTreeShardingService shardingService;
+ private DistributedShardedDOMDataTree shardingService;
private DistributedDataStore distributedConfigDatastore;
private DistributedDataStore distributedOperDatastore;
private ActorSystem actorSystem;
private ClusterWrapper cluster;
- public DOMDataTreeService getDataTreeService() {
- return dataTreeService;
- }
-
- public ShardedDataTreeActorCreator setDataTreeService(final DOMDataTreeService dataTreeService) {
- this.dataTreeService = dataTreeService;
- return this;
- }
-
- public DOMDataTreeShardingService getShardingService() {
+ public DistributedShardedDOMDataTree getShardingService() {
return shardingService;
}
- public ShardedDataTreeActorCreator setShardingService(final DOMDataTreeShardingService shardingService) {
+ public ShardedDataTreeActorCreator setShardingService(final DistributedShardedDOMDataTree shardingService) {
this.shardingService = shardingService;
return this;
}
}
private void verify() {
- Preconditions.checkNotNull(dataTreeService);
Preconditions.checkNotNull(shardingService);
Preconditions.checkNotNull(actorSystem);
Preconditions.checkNotNull(cluster);
verify();
return Props.create(ShardedDataTreeActor.class, this);
}
-
}
}