- // we should probably only have one node create the default shards
- return Await.result(FutureConverters.toScala(createDistributedShard(
- new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)),
- SHARD_FUTURE_TIMEOUT_DURATION);
- } catch (DOMDataTreeShardingConflictException e) {
- LOG.debug("Default shard already registered, possibly due to other node doing it faster");
- return new DistributedShardRegistrationImpl(
- new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY),
- shardedDataTreeActor, this);
+ // The local shard isn't present - we assume that means the local member isn't in the replica list
+ // and will be dynamically created later via an explicit add-shard-replica request. This is the
+ // bootstrapping mechanism to add a new node into an existing cluster. The following code to create
+ // the default shard as a prefix shard is problematic in this scenario so it is commented out. Since
+ // the default shard is a module-based shard by default, it makes sense to always treat it as such,
+ // ie bootstrap it in the same manner as the special prefix-configuration and EOS shards.
+// final Collection<MemberName> names = distributedConfigDatastore.getActorContext().getConfiguration()
+// .getUniqueMemberNamesForAllShards();
+// Await.result(FutureConverters.toScala(createDistributedShard(
+// new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)),
+// SHARD_FUTURE_TIMEOUT_DURATION);
+// } catch (DOMDataTreeShardingConflictException e) {
+// LOG.debug("{}: Default shard for {} already registered, possibly due to other node doing it faster",
+// memberName, logicalDatastoreType);