X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fsharding%2FDistributedShardedDOMDataTree.java;h=df49e33cdd09d74f295c6922af03974081edb5c5;hp=0bb6aac36fc86b6285b06ca227592315fb178d66;hb=d69af1f79ae4630be8c4d65b98096aa27b1665b6;hpb=61791b72e0137609d15c18efc64d227b6a4006ec diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java index 0bb6aac36f..df49e33cdd 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java @@ -14,36 +14,57 @@ import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.actor.PoisonPill; import akka.actor.Props; -import akka.cluster.Cluster; -import akka.cluster.Member; +import akka.dispatch.Mapper; +import akka.dispatch.OnComplete; +import akka.pattern.Patterns; import akka.util.Timeout; +import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; -import com.google.common.collect.Collections2; import com.google.common.collect.ForwardingObject; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import com.google.common.util.concurrent.Uninterruptibles; import java.util.AbstractMap.SimpleEntry; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.EnumMap; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; import org.opendaylight.controller.cluster.ActorSystemProvider; import org.opendaylight.controller.cluster.access.concepts.MemberName; import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient; import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor; import org.opendaylight.controller.cluster.datastore.DistributedDataStore; -import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration; +import org.opendaylight.controller.cluster.datastore.Shard; +import org.opendaylight.controller.cluster.datastore.config.Configuration; +import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration; +import org.opendaylight.controller.cluster.datastore.messages.CreateShard; +import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy; import org.opendaylight.controller.cluster.datastore.utils.ActorContext; import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils; +import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer; +import org.opendaylight.controller.cluster.dom.api.CDSShardAccess; import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor.ShardedDataTreeActorCreator; -import org.opendaylight.controller.cluster.sharding.messages.CreatePrefixShard; +import org.opendaylight.controller.cluster.sharding.messages.InitConfigListener; +import org.opendaylight.controller.cluster.sharding.messages.LookupPrefixShard; +import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemovalLookup; import org.opendaylight.controller.cluster.sharding.messages.ProducerCreated; import org.opendaylight.controller.cluster.sharding.messages.ProducerRemoved; -import org.opendaylight.controller.cluster.sharding.messages.RemovePrefixShard; +import org.opendaylight.controller.cluster.sharding.messages.StartConfigShardLookup; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction; import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier; @@ -59,11 +80,16 @@ import org.opendaylight.mdsal.dom.broker.DOMDataTreeShardRegistration; import org.opendaylight.mdsal.dom.broker.ShardedDOMDataTree; import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTable; import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTableEntry; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.prefix.shard.configuration.rev170110.PrefixShards; import org.opendaylight.yangtools.concepts.ListenerRegistration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import scala.collection.JavaConverters; +import scala.compat.java8.FutureConverters; +import scala.concurrent.Await; +import scala.concurrent.Future; +import scala.concurrent.Promise; +import scala.concurrent.duration.FiniteDuration; /** * A layer on top of DOMDataTreeService that distributes producer/shard registrations to remote nodes via @@ -74,10 +100,13 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTree.class); - private static final Timeout DEFAULT_ASK_TIMEOUT = new Timeout(15, TimeUnit.SECONDS); private static final int MAX_ACTOR_CREATION_RETRIES = 100; private static final int ACTOR_RETRY_DELAY = 100; private static final TimeUnit ACTOR_RETRY_TIME_UNIT = TimeUnit.MILLISECONDS; + private static final int LOOKUP_TASK_MAX_RETRIES = 100; + static final FiniteDuration SHARD_FUTURE_TIMEOUT_DURATION = + new FiniteDuration(LOOKUP_TASK_MAX_RETRIES * LOOKUP_TASK_MAX_RETRIES * 3, TimeUnit.SECONDS); + static final Timeout SHARD_FUTURE_TIMEOUT = new Timeout(SHARD_FUTURE_TIMEOUT_DURATION); static final String ACTOR_ID = "ShardedDOMDataTreeFrontend"; @@ -89,12 +118,21 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat private final ActorRef shardedDataTreeActor; private final MemberName memberName; + @GuardedBy("shards") private final DOMDataTreePrefixTable> shards = DOMDataTreePrefixTable.create(); private final EnumMap defaultShardRegistrations = new EnumMap<>(LogicalDatastoreType.class); + private final EnumMap> configurationShardMap = + new EnumMap<>(LogicalDatastoreType.class); + + private final EnumMap writerMap = + new EnumMap<>(LogicalDatastoreType.class); + + private final PrefixedShardConfigUpdateHandler updateHandler; + public DistributedShardedDOMDataTree(final ActorSystemProvider actorSystemProvider, final DistributedDataStore distributedOperDatastore, final DistributedDataStore distributedConfigDatastore) { @@ -109,27 +147,144 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat .setActorSystem(actorSystem) .setClusterWrapper(distributedConfigDatastore.getActorContext().getClusterWrapper()) .setDistributedConfigDatastore(distributedConfigDatastore) - .setDistributedOperDatastore(distributedOperDatastore), + .setDistributedOperDatastore(distributedOperDatastore) + .setLookupTaskMaxRetries(LOOKUP_TASK_MAX_RETRIES), ACTOR_ID); this.memberName = distributedConfigDatastore.getActorContext().getCurrentMemberName(); + updateHandler = new PrefixedShardConfigUpdateHandler(shardedDataTreeActor, + distributedConfigDatastore.getActorContext().getCurrentMemberName()); + + LOG.debug("{} - Starting prefix configuration shards", memberName); + createPrefixConfigShard(distributedConfigDatastore); + createPrefixConfigShard(distributedOperDatastore); + } + + private void createPrefixConfigShard(final DistributedDataStore dataStore) { + Configuration configuration = dataStore.getActorContext().getConfiguration(); + Collection memberNames = configuration.getUniqueMemberNamesForAllShards(); + CreateShard createShardMessage = + new CreateShard(new ModuleShardConfiguration(PrefixShards.QNAME.getNamespace(), + "prefix-shard-configuration", ClusterUtils.PREFIX_CONFIG_SHARD_ID, ModuleShardStrategy.NAME, + memberNames), + Shard.builder(), dataStore.getActorContext().getDatastoreContext()); + + dataStore.getActorContext().getShardManager().tell(createShardMessage, noSender()); + } + + /** + * This will try to initialize prefix configuration shards upon their + * successful start. We need to create writers to these shards, so we can + * satisfy future {@link #createDistributedShard} and + * {@link #resolveShardAdditions} requests and update prefix configuration + * shards accordingly. + * + *

+ * We also need to initialize listeners on these shards, so we can react + * on changes made on them by other cluster members or even by ourselves. + * + *

+ * Finally, we need to be sure that default shards for both operational and + * configuration data stores are up and running and we have distributed + * shards frontend created for them. + * + *

+ * This is intended to be invoked by blueprint as initialization method. + */ + public void init() { + // create our writers to the configuration + try { + LOG.debug("{} - starting config shard lookup.", + distributedConfigDatastore.getActorContext().getCurrentMemberName()); + + // We have to wait for prefix config shards to be up and running + // so we can create datastore clients for them + handleConfigShardLookup().get(SHARD_FUTURE_TIMEOUT_DURATION.length(), SHARD_FUTURE_TIMEOUT_DURATION.unit()); + + LOG.debug("Prefix configuration shards ready - creating clients"); + + } catch (InterruptedException | ExecutionException | TimeoutException e) { + throw new IllegalStateException("Prefix config shards not found", e); + } + + try { + LOG.debug("Prefix configuration shards ready - creating clients"); + configurationShardMap.put(LogicalDatastoreType.CONFIGURATION, + createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID, + distributedConfigDatastore.getActorContext())); + } catch (final DOMDataTreeShardCreationFailedException e) { + throw new IllegalStateException( + "Unable to create datastoreClient for config DS prefix configuration shard.", e); + } + + try { + configurationShardMap.put(LogicalDatastoreType.OPERATIONAL, + createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID, + distributedOperDatastore.getActorContext())); + + } catch (final DOMDataTreeShardCreationFailedException e) { + throw new IllegalStateException( + "Unable to create datastoreClient for oper DS prefix configuration shard.", e); + } + + writerMap.put(LogicalDatastoreType.CONFIGURATION, new PrefixedShardConfigWriter( + configurationShardMap.get(LogicalDatastoreType.CONFIGURATION).getKey())); + + writerMap.put(LogicalDatastoreType.OPERATIONAL, new PrefixedShardConfigWriter( + configurationShardMap.get(LogicalDatastoreType.OPERATIONAL).getKey())); + + updateHandler.initListener(distributedConfigDatastore, LogicalDatastoreType.CONFIGURATION); + updateHandler.initListener(distributedOperDatastore, LogicalDatastoreType.OPERATIONAL); + + distributedConfigDatastore.getActorContext().getShardManager().tell(InitConfigListener.INSTANCE, noSender()); + distributedOperDatastore.getActorContext().getShardManager().tell(InitConfigListener.INSTANCE, noSender()); + + //create shard registration for DEFAULT_SHARD try { defaultShardRegistrations.put(LogicalDatastoreType.CONFIGURATION, initDefaultShard(LogicalDatastoreType.CONFIGURATION)); - } catch (final DOMDataTreeProducerException | DOMDataTreeShardingConflictException e) { - LOG.error("Unable to create default shard frontend for config shard", e); + } catch (final InterruptedException | ExecutionException e) { + throw new IllegalStateException("Unable to create default shard frontend for config shard", e); } try { defaultShardRegistrations.put(LogicalDatastoreType.OPERATIONAL, initDefaultShard(LogicalDatastoreType.OPERATIONAL)); - } catch (final DOMDataTreeProducerException | DOMDataTreeShardingConflictException e) { - LOG.error("Unable to create default shard frontend for operational shard", e); + } catch (final InterruptedException | ExecutionException e) { + throw new IllegalStateException("Unable to create default shard frontend for operational shard", e); } } + private ListenableFuture> handleConfigShardLookup() { + + final ListenableFuture configFuture = lookupConfigShard(LogicalDatastoreType.CONFIGURATION); + final ListenableFuture operFuture = lookupConfigShard(LogicalDatastoreType.OPERATIONAL); + + return Futures.allAsList(configFuture, operFuture); + } + + private ListenableFuture lookupConfigShard(final LogicalDatastoreType type) { + final SettableFuture future = SettableFuture.create(); + + final Future ask = + Patterns.ask(shardedDataTreeActor, new StartConfigShardLookup(type), SHARD_FUTURE_TIMEOUT); + + ask.onComplete(new OnComplete() { + @Override + public void onComplete(final Throwable throwable, final Object result) throws Throwable { + if (throwable != null) { + future.setException(throwable); + } else { + future.set(null); + } + } + }, actorSystem.dispatcher()); + + return future; + } + @Nonnull @Override public ListenerRegistration registerListener( @@ -163,22 +318,57 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat } @Override - @SuppressWarnings("checkstyle:IllegalCatch") - //TODO: it would be better to block here until the message is processed by the actor - public DistributedShardRegistration createDistributedShard( + public CompletionStage createDistributedShard( final DOMDataTreeIdentifier prefix, final Collection replicaMembers) - throws DOMDataTreeShardingConflictException, DOMDataTreeProducerException { - final DOMDataTreePrefixTableEntry> lookup = - shards.lookup(prefix); - if (lookup != null && lookup.getValue().getPrefix().equals(prefix)) { - throw new DOMDataTreeShardingConflictException( - "Prefix " + prefix + " is already occupied by another shard."); + throws DOMDataTreeShardingConflictException { + + synchronized (shards) { + final DOMDataTreePrefixTableEntry> lookup = + shards.lookup(prefix); + if (lookup != null && lookup.getValue().getPrefix().equals(prefix)) { + throw new DOMDataTreeShardingConflictException( + "Prefix " + prefix + " is already occupied by another shard."); + } } - PrefixShardConfiguration config = new PrefixShardConfiguration(prefix, "prefix", replicaMembers); - shardedDataTreeActor.tell(new CreatePrefixShard(config), noSender()); + final PrefixedShardConfigWriter writer = writerMap.get(prefix.getDatastoreType()); + + final ListenableFuture writeFuture = + writer.writeConfig(prefix.getRootIdentifier(), replicaMembers); + + final Promise shardRegistrationPromise = akka.dispatch.Futures.promise(); + Futures.addCallback(writeFuture, new FutureCallback() { + @Override + public void onSuccess(@Nullable final Void result) { + + final Future ask = + Patterns.ask(shardedDataTreeActor, new LookupPrefixShard(prefix), SHARD_FUTURE_TIMEOUT); + + shardRegistrationPromise.completeWith(ask.transform( + new Mapper() { + @Override + public DistributedShardRegistration apply(final Object parameter) { + return new DistributedShardRegistrationImpl( + prefix, shardedDataTreeActor, DistributedShardedDOMDataTree.this); + } + }, + new Mapper() { + @Override + public Throwable apply(final Throwable throwable) { + return new DOMDataTreeShardCreationFailedException( + "Unable to create a cds shard.", throwable); + } + }, actorSystem.dispatcher())); + } + + @Override + public void onFailure(final Throwable throwable) { + shardRegistrationPromise.failure( + new DOMDataTreeShardCreationFailedException("Unable to create a cds shard.", throwable)); + } + }); - return new DistributedShardRegistrationImpl(prefix, shardedDataTreeActor, this); + return FutureConverters.toJava(shardRegistrationPromise.future()); } void resolveShardAdditions(final Set additions) { @@ -222,9 +412,14 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat @SuppressWarnings("unchecked") final DOMDataTreeShardRegistration reg = (DOMDataTreeShardRegistration) shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer); - shards.store(prefix, reg); + + synchronized (shards) { + shards.store(prefix, reg); + } + } catch (final DOMDataTreeShardingConflictException e) { - LOG.error("Prefix {} is already occupied by another shard", prefix, e); + LOG.error("{}: Prefix {} is already occupied by another shard", + distributedConfigDatastore.getActorContext().getClusterWrapper().getCurrentMemberName(), prefix, e); } catch (DOMDataTreeProducerException e) { LOG.error("Unable to close producer", e); } catch (DOMDataTreeShardCreationFailedException e) { @@ -234,8 +429,10 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat private void despawnShardFrontend(final DOMDataTreeIdentifier prefix) { LOG.debug("Member {}: Removing CDS shard for prefix: {}", memberName, prefix); - final DOMDataTreePrefixTableEntry> lookup = - shards.lookup(prefix); + final DOMDataTreePrefixTableEntry> lookup; + synchronized (shards) { + lookup = shards.lookup(prefix); + } if (lookup == null || !lookup.getValue().getPrefix().equals(prefix)) { LOG.debug("Member {}: Received despawn for non-existing CDS shard frontend, prefix: {}, ignoring..", @@ -245,7 +442,30 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat lookup.getValue().close(); // need to remove from our local table thats used for tracking - shards.remove(prefix); + synchronized (shards) { + shards.remove(prefix); + } + + final PrefixedShardConfigWriter writer = writerMap.get(prefix.getDatastoreType()); + final ListenableFuture future = writer.removeConfig(prefix.getRootIdentifier()); + + Futures.addCallback(future, new FutureCallback() { + @Override + public void onSuccess(@Nullable Void result) { + LOG.debug("{} - Succesfuly removed shard for {}", memberName, prefix); + } + + @Override + public void onFailure(Throwable throwable) { + LOG.error("Removal of shard {} from configuration failed.", prefix, throwable); + } + }); + } + + DOMDataTreePrefixTableEntry> lookupShardFrontend( + final DOMDataTreeIdentifier prefix) { + return shards.lookup(prefix); + } DOMDataTreeProducer localCreateProducer(final Collection prefix) { @@ -262,6 +482,10 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat LOG.debug("Registering shard[{}] at prefix: {}", shard, prefix); + if (producer instanceof ProxyProducer) { + return shardedDOMDataTree.registerDataTreeShard(prefix, shard, ((ProxyProducer) producer).delegate()); + } + return shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer); } @@ -286,15 +510,55 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat } } + @SuppressWarnings("checkstyle:IllegalCatch") private DistributedShardRegistration initDefaultShard(final LogicalDatastoreType logicalDatastoreType) - throws DOMDataTreeProducerException, DOMDataTreeShardingConflictException { - final Collection members = JavaConverters.asJavaCollectionConverter( - Cluster.get(actorSystem).state().members()).asJavaCollection(); - final Collection names = Collections2.transform(members, - m -> MemberName.forName(m.roles().iterator().next())); - - return createDistributedShard( - new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names); + throws ExecutionException, InterruptedException { + final Collection names = + distributedConfigDatastore.getActorContext().getConfiguration().getUniqueMemberNamesForAllShards(); + + final PrefixedShardConfigWriter writer = writerMap.get(logicalDatastoreType); + + if (writer.checkDefaultIsPresent()) { + LOG.debug("Default shard for {} is already present in the config. Possibly saved in snapshot.", + logicalDatastoreType); + return new DistributedShardRegistrationImpl( + new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), + shardedDataTreeActor, this); + } else { + try { + // There can be situation when there is already started default shard + // because it is present in modules.conf. In that case we have to create + // just frontend for default shard, but not shard itself + // TODO we don't have to do it for config and operational default shard + // separately. Just one of them should be enough + final ActorContext actorContext = logicalDatastoreType == LogicalDatastoreType.CONFIGURATION + ? distributedConfigDatastore.getActorContext() : distributedOperDatastore.getActorContext(); + + final Optional defaultLocalShardOptional = + actorContext.findLocalShard(ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY)); + + if (defaultLocalShardOptional.isPresent()) { + LOG.debug("{} Default shard is already started, creating just frontend", logicalDatastoreType); + createShardFrontend(new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY)); + return new DistributedShardRegistrationImpl( + new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), + shardedDataTreeActor, this); + } + + // we should probably only have one node create the default shards + return Await.result(FutureConverters.toScala(createDistributedShard( + new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)), + SHARD_FUTURE_TIMEOUT_DURATION); + } catch (DOMDataTreeShardingConflictException e) { + LOG.debug("Default shard already registered, possibly due to other node doing it faster"); + return new DistributedShardRegistrationImpl( + new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), + shardedDataTreeActor, this); + } catch (Exception e) { + LOG.error("{} default shard initialization failed", logicalDatastoreType, e); + throw new RuntimeException(e); + } + } } private static void closeProducer(final DOMDataTreeProducer producer) { @@ -326,7 +590,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat throw new IllegalStateException("Failed to create actor for ShardedDOMDataTree", lastException); } - private static class DistributedShardRegistrationImpl implements DistributedShardRegistration { + private class DistributedShardRegistrationImpl implements DistributedShardRegistration { private final DOMDataTreeIdentifier prefix; private final ActorRef shardedDataTreeActor; @@ -341,20 +605,41 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat } @Override - public void close() { + public CompletionStage close() { // first despawn on the local node distributedShardedDOMDataTree.despawnShardFrontend(prefix); // update the config so the remote nodes are updated - shardedDataTreeActor.tell(new RemovePrefixShard(prefix), noSender()); + final Future ask = + Patterns.ask(shardedDataTreeActor, new PrefixShardRemovalLookup(prefix), SHARD_FUTURE_TIMEOUT); + + final Future closeFuture = ask.transform( + new Mapper() { + @Override + public Void apply(final Object parameter) { + return null; + } + }, + new Mapper() { + @Override + public Throwable apply(final Throwable throwable) { + return throwable; + } + }, actorSystem.dispatcher()); + + return FutureConverters.toJava(closeFuture); } } - private static final class ProxyProducer extends ForwardingObject implements DOMDataTreeProducer { + // TODO what about producers created by this producer? + // They should also be CDSProducers + private static final class ProxyProducer extends ForwardingObject implements CDSDataTreeProducer { private final DOMDataTreeProducer delegate; private final Collection subtrees; private final ActorRef shardDataTreeActor; private final ActorContext actorContext; + @GuardedBy("shardAccessMap") + private final Map shardAccessMap = new HashMap<>(); ProxyProducer(final DOMDataTreeProducer delegate, final Collection subtrees, @@ -381,8 +666,12 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat } @Override + @SuppressWarnings("checkstyle:IllegalCatch") public void close() throws DOMDataTreeProducerException { delegate.close(); + synchronized (shardAccessMap) { + shardAccessMap.values().forEach(CDSShardAccessImpl::close); + } final Object o = actorContext.executeOperation(shardDataTreeActor, new ProducerRemoved(subtrees)); if (o instanceof DOMDataTreeProducerException) { @@ -396,5 +685,24 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat protected DOMDataTreeProducer delegate() { return delegate; } + + @Nonnull + @Override + public CDSShardAccess getShardAccess(@Nonnull final DOMDataTreeIdentifier subtree) { + synchronized (shardAccessMap) { + Preconditions.checkArgument(subtrees.contains(subtree), + "Subtree {} is not controlled by this producer {}", subtree, this); + if (shardAccessMap.get(subtree) != null) { + return shardAccessMap.get(subtree); + } + + // TODO Maybe we can have static factory method and return the same instance + // for same subtrees. But maybe it is not needed since there can be only one + // producer attached to some subtree at a time. And also how we can close ShardAccess + // then + final CDSShardAccessImpl shardAccess = new CDSShardAccessImpl(subtree, actorContext); + return shardAccessMap.put(subtree, shardAccess); + } + } } }