- // we should probably only have one node create the default shards
- return Await.result(FutureConverters.toScala(createDistributedShard(
- new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)),
- SHARD_FUTURE_TIMEOUT_DURATION);
- } catch (DOMDataTreeShardingConflictException e) {
- LOG.debug("Default shard already registered, possibly due to other node doing it faster");
- return new DistributedShardRegistrationImpl(
- new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY),
- shardedDataTreeActor, this);
+ // Currently the default shard configuration is present in the out-of-box modules.conf and is
+ // expected to be present. So look up the local default shard here and create the frontend.
+
+ // TODO we don't have to do it for config and operational default shard separately. Just one of them
+ // should be enough
+ final ActorContext actorContext = logicalDatastoreType == LogicalDatastoreType.CONFIGURATION
+ ? distributedConfigDatastore.getActorContext() : distributedOperDatastore.getActorContext();
+
+ final Optional<ActorRef> defaultLocalShardOptional =
+ actorContext.findLocalShard(ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
+
+ if (defaultLocalShardOptional.isPresent()) {
+ LOG.debug("{}: Default shard for {} is already started, creating just frontend", memberName,
+ logicalDatastoreType);
+ createShardFrontend(new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY));
+ }
+
+ // The local shard isn't present - we assume that means the local member isn't in the replica list
+ // and will be dynamically created later via an explicit add-shard-replica request. This is the
+ // bootstrapping mechanism to add a new node into an existing cluster. The following code to create
+ // the default shard as a prefix shard is problematic in this scenario so it is commented out. Since
+ // the default shard is a module-based shard by default, it makes sense to always treat it as such,
+ // ie bootstrap it in the same manner as the special prefix-configuration and EOS shards.
+// final Collection<MemberName> names = distributedConfigDatastore.getActorContext().getConfiguration()
+// .getUniqueMemberNamesForAllShards();
+// Await.result(FutureConverters.toScala(createDistributedShard(
+// new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)),
+// SHARD_FUTURE_TIMEOUT_DURATION);
+// } catch (DOMDataTreeShardingConflictException e) {
+// LOG.debug("{}: Default shard for {} already registered, possibly due to other node doing it faster",
+// memberName, logicalDatastoreType);