X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fsharding%2FDistributedShardedDOMDataTree.java;h=64c3f14dfb5dde59806c31dd3f92e92af745ebc7;hp=557543803769413e0baaebb73c1eae218293ba1e;hb=a6af137c30470b86d4bc624d4c48cb686495a182;hpb=c1336f9b497bc6867536a24f629c3f0b002ccb2f diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java index 5575438037..64c3f14dfb 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. + * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others. All rights reserved. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 which accompanies this distribution, @@ -14,35 +14,60 @@ import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.actor.PoisonPill; import akka.actor.Props; -import akka.cluster.Cluster; -import akka.cluster.Member; +import akka.dispatch.Mapper; +import akka.dispatch.OnComplete; +import akka.pattern.Patterns; import akka.util.Timeout; +import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; -import com.google.common.collect.Collections2; +import com.google.common.collect.ClassToInstanceMap; import com.google.common.collect.ForwardingObject; +import com.google.common.collect.ImmutableClassToInstanceMap; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.SettableFuture; import com.google.common.util.concurrent.Uninterruptibles; import java.util.AbstractMap.SimpleEntry; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.EnumMap; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; +import org.opendaylight.controller.cluster.ActorSystemProvider; import org.opendaylight.controller.cluster.access.concepts.MemberName; import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient; import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor; -import org.opendaylight.controller.cluster.datastore.DistributedDataStore; -import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration; +import org.opendaylight.controller.cluster.datastore.AbstractDataStore; +import org.opendaylight.controller.cluster.datastore.Shard; +import org.opendaylight.controller.cluster.datastore.config.Configuration; +import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration; +import org.opendaylight.controller.cluster.datastore.messages.CreateShard; +import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy; import org.opendaylight.controller.cluster.datastore.utils.ActorContext; import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils; +import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer; +import org.opendaylight.controller.cluster.dom.api.CDSShardAccess; import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor.ShardedDataTreeActorCreator; -import org.opendaylight.controller.cluster.sharding.messages.CreatePrefixShard; +import org.opendaylight.controller.cluster.sharding.messages.InitConfigListener; +import org.opendaylight.controller.cluster.sharding.messages.LookupPrefixShard; +import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemovalLookup; import org.opendaylight.controller.cluster.sharding.messages.ProducerCreated; import org.opendaylight.controller.cluster.sharding.messages.ProducerRemoved; -import org.opendaylight.controller.cluster.sharding.messages.RemovePrefixShard; +import org.opendaylight.controller.cluster.sharding.messages.StartConfigShardLookup; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction; import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier; @@ -51,6 +76,7 @@ import org.opendaylight.mdsal.dom.api.DOMDataTreeLoopException; import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer; import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException; import org.opendaylight.mdsal.dom.api.DOMDataTreeService; +import org.opendaylight.mdsal.dom.api.DOMDataTreeServiceExtension; import org.opendaylight.mdsal.dom.api.DOMDataTreeShard; import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException; import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingService; @@ -58,11 +84,15 @@ import org.opendaylight.mdsal.dom.broker.DOMDataTreeShardRegistration; import org.opendaylight.mdsal.dom.broker.ShardedDOMDataTree; import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTable; import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTableEntry; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.prefix.shard.configuration.rev170110.PrefixShards; import org.opendaylight.yangtools.concepts.ListenerRegistration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import scala.collection.JavaConverters; +import scala.compat.java8.FutureConverters; +import scala.concurrent.Future; +import scala.concurrent.Promise; +import scala.concurrent.duration.FiniteDuration; /** * A layer on top of DOMDataTreeService that distributes producer/shard registrations to remote nodes via @@ -73,31 +103,40 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTree.class); - private static final Timeout DEFAULT_ASK_TIMEOUT = new Timeout(15, TimeUnit.SECONDS); private static final int MAX_ACTOR_CREATION_RETRIES = 100; private static final int ACTOR_RETRY_DELAY = 100; private static final TimeUnit ACTOR_RETRY_TIME_UNIT = TimeUnit.MILLISECONDS; + private static final int LOOKUP_TASK_MAX_RETRIES = 100; + static final FiniteDuration SHARD_FUTURE_TIMEOUT_DURATION = + new FiniteDuration(LOOKUP_TASK_MAX_RETRIES * LOOKUP_TASK_MAX_RETRIES * 3, TimeUnit.SECONDS); + static final Timeout SHARD_FUTURE_TIMEOUT = new Timeout(SHARD_FUTURE_TIMEOUT_DURATION); static final String ACTOR_ID = "ShardedDOMDataTreeFrontend"; private final ShardedDOMDataTree shardedDOMDataTree; private final ActorSystem actorSystem; - private final DistributedDataStore distributedOperDatastore; - private final DistributedDataStore distributedConfigDatastore; + private final AbstractDataStore distributedOperDatastore; + private final AbstractDataStore distributedConfigDatastore; private final ActorRef shardedDataTreeActor; private final MemberName memberName; + @GuardedBy("shards") private final DOMDataTreePrefixTable> shards = DOMDataTreePrefixTable.create(); - private final EnumMap defaultShardRegistrations = + private final EnumMap> configurationShardMap = new EnumMap<>(LogicalDatastoreType.class); - public DistributedShardedDOMDataTree(final ActorSystem actorSystem, - final DistributedDataStore distributedOperDatastore, - final DistributedDataStore distributedConfigDatastore) { - this.actorSystem = Preconditions.checkNotNull(actorSystem); + private final EnumMap writerMap = + new EnumMap<>(LogicalDatastoreType.class); + + private final PrefixedShardConfigUpdateHandler updateHandler; + + public DistributedShardedDOMDataTree(final ActorSystemProvider actorSystemProvider, + final AbstractDataStore distributedOperDatastore, + final AbstractDataStore distributedConfigDatastore) { + this.actorSystem = Preconditions.checkNotNull(actorSystemProvider).getActorSystem(); this.distributedOperDatastore = Preconditions.checkNotNull(distributedOperDatastore); this.distributedConfigDatastore = Preconditions.checkNotNull(distributedConfigDatastore); shardedDOMDataTree = new ShardedDOMDataTree(); @@ -108,25 +147,127 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat .setActorSystem(actorSystem) .setClusterWrapper(distributedConfigDatastore.getActorContext().getClusterWrapper()) .setDistributedConfigDatastore(distributedConfigDatastore) - .setDistributedOperDatastore(distributedOperDatastore), + .setDistributedOperDatastore(distributedOperDatastore) + .setLookupTaskMaxRetries(LOOKUP_TASK_MAX_RETRIES), ACTOR_ID); this.memberName = distributedConfigDatastore.getActorContext().getCurrentMemberName(); - //create shard registration for DEFAULT_SHARD + updateHandler = new PrefixedShardConfigUpdateHandler(shardedDataTreeActor, + distributedConfigDatastore.getActorContext().getCurrentMemberName()); + + LOG.debug("{} - Starting prefix configuration shards", memberName); + createPrefixConfigShard(distributedConfigDatastore); + createPrefixConfigShard(distributedOperDatastore); + } + + private static void createPrefixConfigShard(final AbstractDataStore dataStore) { + Configuration configuration = dataStore.getActorContext().getConfiguration(); + Collection memberNames = configuration.getUniqueMemberNamesForAllShards(); + CreateShard createShardMessage = + new CreateShard(new ModuleShardConfiguration(PrefixShards.QNAME.getNamespace(), + "prefix-shard-configuration", ClusterUtils.PREFIX_CONFIG_SHARD_ID, ModuleShardStrategy.NAME, + memberNames), + Shard.builder(), dataStore.getActorContext().getDatastoreContext()); + + dataStore.getActorContext().getShardManager().tell(createShardMessage, noSender()); + } + + /** + * This will try to initialize prefix configuration shards upon their + * successful start. We need to create writers to these shards, so we can + * satisfy future {@link #createDistributedShard} and + * {@link #resolveShardAdditions} requests and update prefix configuration + * shards accordingly. + * + *

+ * We also need to initialize listeners on these shards, so we can react + * on changes made on them by other cluster members or even by ourselves. + * + *

+ * Finally, we need to be sure that default shards for both operational and + * configuration data stores are up and running and we have distributed + * shards frontend created for them. + * + *

+ * This is intended to be invoked by blueprint as initialization method. + */ + public void init() { + // create our writers to the configuration try { - defaultShardRegistrations.put(LogicalDatastoreType.CONFIGURATION, - initDefaultShard(LogicalDatastoreType.CONFIGURATION)); - } catch (final DOMDataTreeProducerException | DOMDataTreeShardingConflictException e) { - LOG.error("Unable to create default shard frontend for config shard", e); + LOG.debug("{} - starting config shard lookup.", memberName); + + // We have to wait for prefix config shards to be up and running + // so we can create datastore clients for them + handleConfigShardLookup().get(SHARD_FUTURE_TIMEOUT_DURATION.length(), SHARD_FUTURE_TIMEOUT_DURATION.unit()); + } catch (InterruptedException | ExecutionException | TimeoutException e) { + throw new IllegalStateException("Prefix config shards not found", e); } try { - defaultShardRegistrations.put(LogicalDatastoreType.OPERATIONAL, - initDefaultShard(LogicalDatastoreType.OPERATIONAL)); - } catch (final DOMDataTreeProducerException | DOMDataTreeShardingConflictException e) { - LOG.error("Unable to create default shard frontend for operational shard", e); + LOG.debug("{}: Prefix configuration shards ready - creating clients", memberName); + configurationShardMap.put(LogicalDatastoreType.CONFIGURATION, + createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID, + distributedConfigDatastore.getActorContext())); + } catch (final DOMDataTreeShardCreationFailedException e) { + throw new IllegalStateException( + "Unable to create datastoreClient for config DS prefix configuration shard.", e); + } + + try { + configurationShardMap.put(LogicalDatastoreType.OPERATIONAL, + createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID, + distributedOperDatastore.getActorContext())); + + } catch (final DOMDataTreeShardCreationFailedException e) { + throw new IllegalStateException( + "Unable to create datastoreClient for oper DS prefix configuration shard.", e); } + + writerMap.put(LogicalDatastoreType.CONFIGURATION, new PrefixedShardConfigWriter( + configurationShardMap.get(LogicalDatastoreType.CONFIGURATION).getKey())); + + writerMap.put(LogicalDatastoreType.OPERATIONAL, new PrefixedShardConfigWriter( + configurationShardMap.get(LogicalDatastoreType.OPERATIONAL).getKey())); + + updateHandler.initListener(distributedConfigDatastore, LogicalDatastoreType.CONFIGURATION); + updateHandler.initListener(distributedOperDatastore, LogicalDatastoreType.OPERATIONAL); + + distributedConfigDatastore.getActorContext().getShardManager().tell(InitConfigListener.INSTANCE, noSender()); + distributedOperDatastore.getActorContext().getShardManager().tell(InitConfigListener.INSTANCE, noSender()); + + + //create shard registration for DEFAULT_SHARD + initDefaultShard(LogicalDatastoreType.CONFIGURATION); + initDefaultShard(LogicalDatastoreType.OPERATIONAL); + } + + private ListenableFuture> handleConfigShardLookup() { + + final ListenableFuture configFuture = lookupConfigShard(LogicalDatastoreType.CONFIGURATION); + final ListenableFuture operFuture = lookupConfigShard(LogicalDatastoreType.OPERATIONAL); + + return Futures.allAsList(configFuture, operFuture); + } + + private ListenableFuture lookupConfigShard(final LogicalDatastoreType type) { + final SettableFuture future = SettableFuture.create(); + + final Future ask = + Patterns.ask(shardedDataTreeActor, new StartConfigShardLookup(type), SHARD_FUTURE_TIMEOUT); + + ask.onComplete(new OnComplete() { + @Override + public void onComplete(final Throwable throwable, final Object result) { + if (throwable != null) { + future.setException(throwable); + } else { + future.set(null); + } + } + }, actorSystem.dispatcher()); + + return future; } @Nonnull @@ -135,96 +276,131 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat final T listener, final Collection subtrees, final boolean allowRxMerges, final Collection producers) throws DOMDataTreeLoopException { + return shardedDOMDataTree.registerListener(listener, subtrees, allowRxMerges, producers); + } - throw new UnsupportedOperationException("Not implemented"); + @Override + public ClassToInstanceMap getExtensions() { + return ImmutableClassToInstanceMap.of(); } @Nonnull @Override public DOMDataTreeProducer createProducer(@Nonnull final Collection subtrees) { - LOG.debug("{} - Creating producer for {}", - distributedConfigDatastore.getActorContext().getClusterWrapper().getCurrentMemberName(), subtrees); + LOG.debug("{} - Creating producer for {}", memberName, subtrees); final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(subtrees); final Object response = distributedConfigDatastore.getActorContext() .executeOperation(shardedDataTreeActor, new ProducerCreated(subtrees)); if (response == null) { - LOG.debug("{} - Received success from remote nodes, creating producer:{}", - distributedConfigDatastore.getActorContext().getClusterWrapper().getCurrentMemberName(), subtrees); + LOG.debug("{} - Received success from remote nodes, creating producer:{}", memberName, subtrees); return new ProxyProducer(producer, subtrees, shardedDataTreeActor, - distributedConfigDatastore.getActorContext()); - } else if (response instanceof Exception) { - closeProducer(producer); - throw Throwables.propagate((Exception) response); - } else { - closeProducer(producer); - throw new RuntimeException("Unexpected response to create producer received." + response); + distributedConfigDatastore.getActorContext(), shards); + } + + closeProducer(producer); + + if (response instanceof Throwable) { + Throwables.throwIfUnchecked((Throwable) response); + throw new RuntimeException((Throwable) response); } + throw new RuntimeException("Unexpected response to create producer received." + response); } @Override - @SuppressWarnings("checkstyle:IllegalCatch") - //TODO: it would be better to block here until the message is processed by the actor - public DistributedShardRegistration createDistributedShard( + public CompletionStage createDistributedShard( final DOMDataTreeIdentifier prefix, final Collection replicaMembers) - throws DOMDataTreeShardingConflictException, DOMDataTreeProducerException { - final DOMDataTreePrefixTableEntry> lookup = - shards.lookup(prefix); - if (lookup != null && lookup.getValue().getPrefix().equals(prefix)) { - throw new DOMDataTreeShardingConflictException( - "Prefix " + prefix + " is already occupied by another shard."); + throws DOMDataTreeShardingConflictException { + + synchronized (shards) { + final DOMDataTreePrefixTableEntry> lookup = + shards.lookup(prefix); + if (lookup != null && lookup.getValue().getPrefix().equals(prefix)) { + throw new DOMDataTreeShardingConflictException( + "Prefix " + prefix + " is already occupied by another shard."); + } } - PrefixShardConfiguration config = new PrefixShardConfiguration(prefix, "prefix", replicaMembers); - shardedDataTreeActor.tell(new CreatePrefixShard(config), noSender()); + final PrefixedShardConfigWriter writer = writerMap.get(prefix.getDatastoreType()); + + final ListenableFuture writeFuture = + writer.writeConfig(prefix.getRootIdentifier(), replicaMembers); + + final Promise shardRegistrationPromise = akka.dispatch.Futures.promise(); + Futures.addCallback(writeFuture, new FutureCallback() { + @Override + public void onSuccess(@Nullable final Void result) { + + final Future ask = + Patterns.ask(shardedDataTreeActor, new LookupPrefixShard(prefix), SHARD_FUTURE_TIMEOUT); + + shardRegistrationPromise.completeWith(ask.transform( + new Mapper() { + @Override + public DistributedShardRegistration apply(final Object parameter) { + return new DistributedShardRegistrationImpl( + prefix, shardedDataTreeActor, DistributedShardedDOMDataTree.this); + } + }, + new Mapper() { + @Override + public Throwable apply(final Throwable throwable) { + return new DOMDataTreeShardCreationFailedException( + "Unable to create a cds shard.", throwable); + } + }, actorSystem.dispatcher())); + } + + @Override + public void onFailure(final Throwable throwable) { + shardRegistrationPromise.failure( + new DOMDataTreeShardCreationFailedException("Unable to create a cds shard.", throwable)); + } + }, MoreExecutors.directExecutor()); - return new DistributedShardRegistrationImpl(prefix, shardedDataTreeActor, this); + return FutureConverters.toJava(shardRegistrationPromise.future()); } void resolveShardAdditions(final Set additions) { - LOG.debug("Member {}: Resolving additions : {}", memberName, additions); - final ArrayList list = new ArrayList<>(additions); + LOG.debug("{}: Resolving additions : {}", memberName, additions); // we need to register the shards from top to bottom, so we need to atleast make sure the ordering reflects that - Collections.sort(list, (o1, o2) -> { - if (o1.getRootIdentifier().getPathArguments().size() < o2.getRootIdentifier().getPathArguments().size()) { - return -1; - } else if (o1.getRootIdentifier().getPathArguments().size() - == o2.getRootIdentifier().getPathArguments().size()) { - return 0; - } else { - return 1; - } - }); - list.forEach(this::createShardFrontend); + additions + .stream() + .sorted(Comparator.comparingInt(o -> o.getRootIdentifier().getPathArguments().size())) + .forEachOrdered(this::createShardFrontend); } void resolveShardRemovals(final Set removals) { - LOG.debug("Member {}: Resolving removals : {}", memberName, removals); + LOG.debug("{}: Resolving removals : {}", memberName, removals); // do we need to go from bottom to top? removals.forEach(this::despawnShardFrontend); } private void createShardFrontend(final DOMDataTreeIdentifier prefix) { - LOG.debug("Member {}: Creating CDS shard for prefix: {}", memberName, prefix); + LOG.debug("{}: Creating CDS shard for prefix: {}", memberName, prefix); final String shardName = ClusterUtils.getCleanShardName(prefix.getRootIdentifier()); - final DistributedDataStore distributedDataStore = + final AbstractDataStore distributedDataStore = prefix.getDatastoreType().equals(org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION) ? distributedConfigDatastore : distributedOperDatastore; - try (final DOMDataTreeProducer producer = localCreateProducer(Collections.singletonList(prefix))) { + try (DOMDataTreeProducer producer = localCreateProducer(Collections.singletonList(prefix))) { final Entry entry = createDatastoreClient(shardName, distributedDataStore.getActorContext()); final DistributedShardFrontend shard = new DistributedShardFrontend(distributedDataStore, entry.getKey(), prefix); - @SuppressWarnings("unchecked") final DOMDataTreeShardRegistration reg = - (DOMDataTreeShardRegistration) shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer); - shards.store(prefix, reg); + shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer); + + synchronized (shards) { + shards.store(prefix, reg); + } + } catch (final DOMDataTreeShardingConflictException e) { - LOG.error("Prefix {} is already occupied by another shard", prefix, e); + LOG.error("{}: Prefix {} is already occupied by another shard", + distributedConfigDatastore.getActorContext().getClusterWrapper().getCurrentMemberName(), prefix, e); } catch (DOMDataTreeProducerException e) { LOG.error("Unable to close producer", e); } catch (DOMDataTreeShardCreationFailedException e) { @@ -233,19 +409,45 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat } private void despawnShardFrontend(final DOMDataTreeIdentifier prefix) { - LOG.debug("Member {}: Removing CDS shard for prefix: {}", memberName, prefix); - final DOMDataTreePrefixTableEntry> lookup = - shards.lookup(prefix); + LOG.debug("{}: Removing CDS shard for prefix: {}", memberName, prefix); + final DOMDataTreePrefixTableEntry> lookup; + synchronized (shards) { + lookup = shards.lookup(prefix); + } if (lookup == null || !lookup.getValue().getPrefix().equals(prefix)) { - LOG.debug("Member {}: Received despawn for non-existing CDS shard frontend, prefix: {}, ignoring..", + LOG.debug("{}: Received despawn for non-existing CDS shard frontend, prefix: {}, ignoring..", memberName, prefix); return; } lookup.getValue().close(); // need to remove from our local table thats used for tracking - shards.remove(prefix); + synchronized (shards) { + shards.remove(prefix); + } + + final PrefixedShardConfigWriter writer = writerMap.get(prefix.getDatastoreType()); + final ListenableFuture future = writer.removeConfig(prefix.getRootIdentifier()); + + Futures.addCallback(future, new FutureCallback() { + @Override + public void onSuccess(@Nullable final Void result) { + LOG.debug("{} - Succesfuly removed shard for {}", memberName, prefix); + } + + @Override + public void onFailure(final Throwable throwable) { + LOG.error("Removal of shard {} from configuration failed.", prefix, throwable); + } + }, MoreExecutors.directExecutor()); + } + + DOMDataTreePrefixTableEntry> lookupShardFrontend( + final DOMDataTreeIdentifier prefix) { + synchronized (shards) { + return shards.lookup(prefix); + } } DOMDataTreeProducer localCreateProducer(final Collection prefix) { @@ -262,6 +464,10 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat LOG.debug("Registering shard[{}] at prefix: {}", shard, prefix); + if (producer instanceof ProxyProducer) { + return shardedDOMDataTree.registerDataTreeShard(prefix, shard, ((ProxyProducer) producer).delegate()); + } + return shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer); } @@ -270,7 +476,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat final String shardName, final ActorContext actorContext) throws DOMDataTreeShardCreationFailedException { - LOG.debug("Creating distributed datastore client for shard {}", shardName); + LOG.debug("{}: Creating distributed datastore client for shard {}", memberName, shardName); final Props distributedDataStoreClientProps = SimpleDataStoreClientActor.props(memberName, "Shard-" + shardName, actorContext, shardName); @@ -279,22 +485,59 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat return new SimpleEntry<>(SimpleDataStoreClientActor .getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS), clientActor); } catch (final Exception e) { - LOG.error("Failed to get actor for {}", distributedDataStoreClientProps, e); + LOG.error("{}: Failed to get actor for {}", distributedDataStoreClientProps, memberName, e); clientActor.tell(PoisonPill.getInstance(), noSender()); throw new DOMDataTreeShardCreationFailedException( "Unable to create datastore client for shard{" + shardName + "}", e); } } - private DistributedShardRegistration initDefaultShard(final LogicalDatastoreType logicalDatastoreType) - throws DOMDataTreeProducerException, DOMDataTreeShardingConflictException { - final Collection members = JavaConverters.asJavaCollectionConverter( - Cluster.get(actorSystem).state().members()).asJavaCollection(); - final Collection names = Collections2.transform(members, - m -> MemberName.forName(m.roles().iterator().next())); + @SuppressWarnings("checkstyle:IllegalCatch") + private void initDefaultShard(final LogicalDatastoreType logicalDatastoreType) { - return createDistributedShard( - new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names); + final PrefixedShardConfigWriter writer = writerMap.get(logicalDatastoreType); + + if (writer.checkDefaultIsPresent()) { + LOG.debug("{}: Default shard for {} is already present in the config. Possibly saved in snapshot.", + memberName, logicalDatastoreType); + } else { + try { + // Currently the default shard configuration is present in the out-of-box modules.conf and is + // expected to be present. So look up the local default shard here and create the frontend. + + // TODO we don't have to do it for config and operational default shard separately. Just one of them + // should be enough + final ActorContext actorContext = logicalDatastoreType == LogicalDatastoreType.CONFIGURATION + ? distributedConfigDatastore.getActorContext() : distributedOperDatastore.getActorContext(); + + final Optional defaultLocalShardOptional = + actorContext.findLocalShard(ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY)); + + if (defaultLocalShardOptional.isPresent()) { + LOG.debug("{}: Default shard for {} is already started, creating just frontend", memberName, + logicalDatastoreType); + createShardFrontend(new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY)); + } + + // The local shard isn't present - we assume that means the local member isn't in the replica list + // and will be dynamically created later via an explicit add-shard-replica request. This is the + // bootstrapping mechanism to add a new node into an existing cluster. The following code to create + // the default shard as a prefix shard is problematic in this scenario so it is commented out. Since + // the default shard is a module-based shard by default, it makes sense to always treat it as such, + // ie bootstrap it in the same manner as the special prefix-configuration and EOS shards. +// final Collection names = distributedConfigDatastore.getActorContext().getConfiguration() +// .getUniqueMemberNamesForAllShards(); +// Await.result(FutureConverters.toScala(createDistributedShard( +// new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)), +// SHARD_FUTURE_TIMEOUT_DURATION); +// } catch (DOMDataTreeShardingConflictException e) { +// LOG.debug("{}: Default shard for {} already registered, possibly due to other node doing it faster", +// memberName, logicalDatastoreType); + } catch (Exception e) { + LOG.error("{}: Default shard initialization for {} failed", memberName, logicalDatastoreType, e); + throw new RuntimeException(e); + } + } } private static void closeProducer(final DOMDataTreeProducer producer) { @@ -326,7 +569,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat throw new IllegalStateException("Failed to create actor for ShardedDOMDataTree", lastException); } - private static class DistributedShardRegistrationImpl implements DistributedShardRegistration { + private class DistributedShardRegistrationImpl implements DistributedShardRegistration { private final DOMDataTreeIdentifier prefix; private final ActorRef shardedDataTreeActor; @@ -341,29 +584,57 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat } @Override - public void close() { + public CompletionStage close() { // first despawn on the local node distributedShardedDOMDataTree.despawnShardFrontend(prefix); // update the config so the remote nodes are updated - shardedDataTreeActor.tell(new RemovePrefixShard(prefix), noSender()); + final Future ask = + Patterns.ask(shardedDataTreeActor, new PrefixShardRemovalLookup(prefix), SHARD_FUTURE_TIMEOUT); + + final Future closeFuture = ask.transform( + new Mapper() { + @Override + public Void apply(final Object parameter) { + return null; + } + }, + new Mapper() { + @Override + public Throwable apply(final Throwable throwable) { + return throwable; + } + }, actorSystem.dispatcher()); + + return FutureConverters.toJava(closeFuture); } } - private static final class ProxyProducer extends ForwardingObject implements DOMDataTreeProducer { + // TODO what about producers created by this producer? + // They should also be CDSProducers + private static final class ProxyProducer extends ForwardingObject implements CDSDataTreeProducer { private final DOMDataTreeProducer delegate; private final Collection subtrees; private final ActorRef shardDataTreeActor; private final ActorContext actorContext; + @GuardedBy("shardAccessMap") + private final Map shardAccessMap = new HashMap<>(); + + // We don't have to guard access to shardTable in ProxyProducer. + // ShardTable's entries relevant to this ProxyProducer shouldn't + // change during producer's lifetime. + private final DOMDataTreePrefixTable> shardTable; ProxyProducer(final DOMDataTreeProducer delegate, final Collection subtrees, final ActorRef shardDataTreeActor, - final ActorContext actorContext) { + final ActorContext actorContext, + final DOMDataTreePrefixTable> shardLayout) { this.delegate = Preconditions.checkNotNull(delegate); this.subtrees = Preconditions.checkNotNull(subtrees); this.shardDataTreeActor = Preconditions.checkNotNull(shardDataTreeActor); this.actorContext = Preconditions.checkNotNull(actorContext); + this.shardTable = Preconditions.checkNotNull(shardLayout); } @Nonnull @@ -374,6 +645,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat @Nonnull @Override + @SuppressWarnings("checkstyle:hiddenField") public DOMDataTreeProducer createProducer(@Nonnull final Collection subtrees) { // TODO we probably don't need to distribute this on the remote nodes since once we have this producer // open we surely have the rights to all the subtrees. @@ -381,12 +653,17 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat } @Override + @SuppressWarnings("checkstyle:IllegalCatch") public void close() throws DOMDataTreeProducerException { delegate.close(); + synchronized (shardAccessMap) { + shardAccessMap.values().forEach(CDSShardAccessImpl::close); + } + final Object o = actorContext.executeOperation(shardDataTreeActor, new ProducerRemoved(subtrees)); if (o instanceof DOMDataTreeProducerException) { - throw ((DOMDataTreeProducerException) o); + throw (DOMDataTreeProducerException) o; } else if (o instanceof Throwable) { throw new DOMDataTreeProducerException("Unable to close producer", (Throwable) o); } @@ -396,5 +673,33 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat protected DOMDataTreeProducer delegate() { return delegate; } + + @Nonnull + @Override + public CDSShardAccess getShardAccess(@Nonnull final DOMDataTreeIdentifier subtree) { + Preconditions.checkArgument( + subtrees.stream().anyMatch(dataTreeIdentifier -> dataTreeIdentifier.contains(subtree)), + "Subtree %s is not controlled by this producer %s", subtree, this); + + final DOMDataTreePrefixTableEntry> lookup = + shardTable.lookup(subtree); + Preconditions.checkState(lookup != null, "Subtree %s is not contained in any registered shard.", subtree); + + final DOMDataTreeIdentifier lookupId = lookup.getValue().getPrefix(); + + synchronized (shardAccessMap) { + if (shardAccessMap.get(lookupId) != null) { + return shardAccessMap.get(lookupId); + } + + // TODO Maybe we can have static factory method and return the same instance + // for same subtrees. But maybe it is not needed since there can be only one + // producer attached to some subtree at a time. And also how we can close ShardAccess + // then + final CDSShardAccessImpl shardAccess = new CDSShardAccessImpl(lookupId, actorContext); + shardAccessMap.put(lookupId, shardAccess); + return shardAccess; + } + } } }