+ @Override
+ public void run() {
+
+ final Future<Object> ask = Patterns.ask(shard, FindLeader.INSTANCE, context.getOperationTimeout());
+
+ ask.onComplete(new OnComplete<>() {
+ @Override
+ public void onComplete(final Throwable throwable, final Object findLeaderReply) {
+ if (throwable != null) {
+ tryReschedule(throwable);
+ } else {
+ final FindLeaderReply findLeader = (FindLeaderReply) findLeaderReply;
+ final Optional<String> leaderActor = findLeader.getLeaderActor();
+ if (leaderActor.isPresent()) {
+ // leader is found, backend seems ready, check if the frontend is ready
+ LOG.debug("{} - Leader for shard[{}] backend ready, starting frontend lookup..",
+ clusterWrapper.getCurrentMemberName(), toLookup);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL,
+ new FrontendLookupTask(
+ system, replyTo, shardingService, toLookup, lookupMaxRetries),
+ system.dispatcher());
+ } else {
+ tryReschedule(null);
+ }
+ }
+ }
+ }, system.dispatcher());
+
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("{} - Leader for shard[{}] backend not found on try: {}, retrying..",
+ clusterWrapper.getCurrentMemberName(), toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ShardLeaderLookupTask.this, system.dispatcher());
+ }
+ }
+
+ /**
+ * After backend is ready this handles the last step - checking if we have a frontend shard for the backend,
+ * once this completes(which should be ready by the time the backend is created, this is just a sanity check in
+ * case they race), the future for the cds shard creation is completed and the shard is ready for use.
+ */
+ private static final class FrontendLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final DistributedShardedDOMDataTree shardingService;
+ private final DOMDataTreeIdentifier toLookup;
+
+ FrontendLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final DistributedShardedDOMDataTree shardingService,
+ final DOMDataTreeIdentifier toLookup,
+ final int lookupMaxRetries) {
+ super(replyTo, lookupMaxRetries);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.shardingService = shardingService;
+ this.toLookup = toLookup;
+ }
+
+ @Override
+ public void run() {
+ final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> entry =
+ shardingService.lookupShardFrontend(toLookup);
+
+ if (entry != null && tableEntryIdCheck(entry, toLookup) && entry.getValue() != null) {
+ replyTo.tell(new Success(null), ActorRef.noSender());
+ } else {
+ tryReschedule(null);
+ }
+ }
+
+ private boolean tableEntryIdCheck(final DOMDataTreePrefixTableEntry<?> entry,
+ final DOMDataTreeIdentifier prefix) {
+ if (entry == null) {
+ return false;
+ }
+
+ if (YangInstanceIdentifier.empty().equals(prefix.getRootIdentifier())) {
+ return true;
+ }
+
+ if (entry.getIdentifier().equals(toLookup.getRootIdentifier().getLastPathArgument())) {
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("Frontend for shard[{}] not found on try: {}, retrying..", toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, FrontendLookupTask.this, system.dispatcher());
+ }
+ }
+
+ /**
+ * Task that is run once a cds shard registration is closed and completes once the backend shard is removed from the
+ * configuration.
+ */
+ private static class ShardRemovalLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ActorUtils context;
+ private final DOMDataTreeIdentifier toLookup;
+
+ ShardRemovalLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ActorUtils context,
+ final DOMDataTreeIdentifier toLookup,
+ final int lookupMaxRetries) {
+ super(replyTo, lookupMaxRetries);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.context = context;
+ this.toLookup = toLookup;
+ }
+
+ @Override
+ public void run() {
+ final Future<ActorRef> localShardFuture =
+ context.findLocalShardAsync(ClusterUtils.getCleanShardName(toLookup.getRootIdentifier()));
+
+ localShardFuture.onComplete(new OnComplete<ActorRef>() {
+ @Override
+ public void onComplete(final Throwable throwable, final ActorRef actorRef) {
+ if (throwable != null) {
+ //TODO Shouldn't we check why findLocalShard failed?
+ LOG.debug("Backend shard[{}] removal lookup successful notifying the registration future",
+ toLookup);
+ replyTo.tell(new Success(null), ActorRef.noSender());
+ } else {
+ tryReschedule(null);
+ }
+ }
+ }, system.dispatcher());
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("Backend shard[{}] removal lookup failed, shard is still present, try: {}, rescheduling..",
+ toLookup, retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ShardRemovalLookupTask.this, system.dispatcher());
+ }
+ }
+
+ /**
+ * Task for handling the lookup of the backend for the configuration shard.
+ */
+ private static class ConfigShardLookupTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ActorUtils context;
+
+ ConfigShardLookupTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ActorUtils context,
+ final StartConfigShardLookup message,
+ final int lookupMaxRetries) {
+ super(replyTo, lookupMaxRetries);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.context = context;
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("Local backend for prefix configuration shard not found, try: {}, rescheduling..", retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ConfigShardLookupTask.this, system.dispatcher());
+ }
+
+ @Override
+ public void run() {
+ final Optional<ActorRef> localShard =
+ context.findLocalShard(ClusterUtils.PREFIX_CONFIG_SHARD_ID);
+
+ if (!localShard.isPresent()) {
+ tryReschedule(null);
+ } else {
+ LOG.debug("Local backend for prefix configuration shard lookup successful");
+ replyTo.tell(new Status.Success(null), ActorRef.noSender());
+ }
+ }
+ }
+
+ /**
+ * Task for handling the readiness state of the config shard. Reports success once the leader is elected.
+ */
+ private static class ConfigShardReadinessTask extends LookupTask {
+
+ private final ActorSystem system;
+ private final ActorRef replyTo;
+ private final ActorUtils context;
+ private final ClusterWrapper clusterWrapper;
+ private final ActorRef shard;
+
+ ConfigShardReadinessTask(final ActorSystem system,
+ final ActorRef replyTo,
+ final ActorUtils context,
+ final ClusterWrapper clusterWrapper,
+ final ActorRef shard,
+ final int lookupMaxRetries) {
+ super(replyTo, lookupMaxRetries);
+ this.system = system;
+ this.replyTo = replyTo;
+ this.context = context;
+ this.clusterWrapper = clusterWrapper;
+ this.shard = shard;
+ }
+
+ @Override
+ void reschedule(final int retries) {
+ LOG.debug("{} - Leader for config shard not found on try: {}, retrying..",
+ clusterWrapper.getCurrentMemberName(), retries);
+ system.scheduler().scheduleOnce(
+ SHARD_LOOKUP_TASK_INTERVAL, ConfigShardReadinessTask.this, system.dispatcher());
+ }
+
+ @Override
+ public void run() {
+ final Future<Object> ask = Patterns.ask(shard, FindLeader.INSTANCE, context.getOperationTimeout());
+
+ ask.onComplete(new OnComplete<>() {
+ @Override
+ public void onComplete(final Throwable throwable, final Object findLeaderReply) {
+ if (throwable != null) {
+ tryReschedule(throwable);
+ } else {
+ final FindLeaderReply findLeader = (FindLeaderReply) findLeaderReply;
+ final Optional<String> leaderActor = findLeader.getLeaderActor();
+ if (leaderActor.isPresent()) {
+ // leader is found, backend seems ready, check if the frontend is ready
+ LOG.debug("{} - Leader for config shard is ready. Ending lookup.",
+ clusterWrapper.getCurrentMemberName());
+ replyTo.tell(new Status.Success(null), ActorRef.noSender());
+ } else {
+ tryReschedule(null);
+ }
+ }
+ }
+ }, system.dispatcher());
+ }
+ }
+
+ public static class ShardedDataTreeActorCreator {
+
+ private DistributedShardedDOMDataTree shardingService;
+ private DistributedDataStoreInterface distributedConfigDatastore;
+ private DistributedDataStoreInterface distributedOperDatastore;
+ private ActorSystem actorSystem;
+ private ClusterWrapper cluster;
+ private int maxRetries;
+
+ public DistributedShardedDOMDataTree getShardingService() {