* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
import akka.dispatch.OnComplete;
import akka.pattern.Patterns;
import akka.util.Timeout;
import akka.dispatch.OnComplete;
import akka.pattern.Patterns;
import akka.util.Timeout;
import com.google.common.base.Throwables;
import com.google.common.collect.ClassToInstanceMap;
import com.google.common.collect.ForwardingObject;
import com.google.common.base.Throwables;
import com.google.common.collect.ClassToInstanceMap;
import com.google.common.collect.ForwardingObject;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.opendaylight.controller.cluster.ActorSystemProvider;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
import org.opendaylight.controller.cluster.ActorSystemProvider;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
import org.opendaylight.controller.cluster.datastore.Shard;
import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
import org.opendaylight.controller.cluster.datastore.Shard;
import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer;
import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer;
import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
* A layer on top of DOMDataTreeService that distributes producer/shard registrations to remote nodes via
* {@link ShardedDataTreeActor}. Also provides QoL method for addition of prefix based clustered shard into the system.
*/
* A layer on top of DOMDataTreeService that distributes producer/shard registrations to remote nodes via
* {@link ShardedDataTreeActor}. Also provides QoL method for addition of prefix based clustered shard into the system.
*/
public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDataTreeShardingService,
DistributedShardFactory {
public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDataTreeShardingService,
DistributedShardFactory {
- private final AbstractDataStore distributedOperDatastore;
- private final AbstractDataStore distributedConfigDatastore;
+ private final DistributedDataStoreInterface distributedOperDatastore;
+ private final DistributedDataStoreInterface distributedConfigDatastore;
private final PrefixedShardConfigUpdateHandler updateHandler;
public DistributedShardedDOMDataTree(final ActorSystemProvider actorSystemProvider,
private final PrefixedShardConfigUpdateHandler updateHandler;
public DistributedShardedDOMDataTree(final ActorSystemProvider actorSystemProvider,
- final AbstractDataStore distributedOperDatastore,
- final AbstractDataStore distributedConfigDatastore) {
- this.actorSystem = Preconditions.checkNotNull(actorSystemProvider).getActorSystem();
- this.distributedOperDatastore = Preconditions.checkNotNull(distributedOperDatastore);
- this.distributedConfigDatastore = Preconditions.checkNotNull(distributedConfigDatastore);
+ final DistributedDataStoreInterface distributedOperDatastore,
+ final DistributedDataStoreInterface distributedConfigDatastore) {
+ this.actorSystem = requireNonNull(actorSystemProvider).getActorSystem();
+ this.distributedOperDatastore = requireNonNull(distributedOperDatastore);
+ this.distributedConfigDatastore = requireNonNull(distributedConfigDatastore);
shardedDOMDataTree = new ShardedDOMDataTree();
shardedDataTreeActor = createShardedDataTreeActor(actorSystem,
new ShardedDataTreeActorCreator()
.setShardingService(this)
.setActorSystem(actorSystem)
shardedDOMDataTree = new ShardedDOMDataTree();
shardedDataTreeActor = createShardedDataTreeActor(actorSystem,
new ShardedDataTreeActorCreator()
.setShardingService(this)
.setActorSystem(actorSystem)
.setDistributedConfigDatastore(distributedConfigDatastore)
.setDistributedOperDatastore(distributedOperDatastore)
.setLookupTaskMaxRetries(LOOKUP_TASK_MAX_RETRIES),
ACTOR_ID);
.setDistributedConfigDatastore(distributedConfigDatastore)
.setDistributedOperDatastore(distributedOperDatastore)
.setLookupTaskMaxRetries(LOOKUP_TASK_MAX_RETRIES),
ACTOR_ID);
LOG.debug("{} - Starting prefix configuration shards", memberName);
createPrefixConfigShard(distributedConfigDatastore);
createPrefixConfigShard(distributedOperDatastore);
}
LOG.debug("{} - Starting prefix configuration shards", memberName);
createPrefixConfigShard(distributedConfigDatastore);
createPrefixConfigShard(distributedOperDatastore);
}
- private static void createPrefixConfigShard(final AbstractDataStore dataStore) {
- Configuration configuration = dataStore.getActorContext().getConfiguration();
+ private static void createPrefixConfigShard(final DistributedDataStoreInterface dataStore) {
+ Configuration configuration = dataStore.getActorUtils().getConfiguration();
Collection<MemberName> memberNames = configuration.getUniqueMemberNamesForAllShards();
CreateShard createShardMessage =
new CreateShard(new ModuleShardConfiguration(PrefixShards.QNAME.getNamespace(),
"prefix-shard-configuration", ClusterUtils.PREFIX_CONFIG_SHARD_ID, ModuleShardStrategy.NAME,
memberNames),
Collection<MemberName> memberNames = configuration.getUniqueMemberNamesForAllShards();
CreateShard createShardMessage =
new CreateShard(new ModuleShardConfiguration(PrefixShards.QNAME.getNamespace(),
"prefix-shard-configuration", ClusterUtils.PREFIX_CONFIG_SHARD_ID, ModuleShardStrategy.NAME,
memberNames),
LOG.debug("{}: Prefix configuration shards ready - creating clients", memberName);
configurationShardMap.put(LogicalDatastoreType.CONFIGURATION,
createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
LOG.debug("{}: Prefix configuration shards ready - creating clients", memberName);
configurationShardMap.put(LogicalDatastoreType.CONFIGURATION,
createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
} catch (final DOMDataTreeShardCreationFailedException e) {
throw new IllegalStateException(
"Unable to create datastoreClient for config DS prefix configuration shard.", e);
} catch (final DOMDataTreeShardCreationFailedException e) {
throw new IllegalStateException(
"Unable to create datastoreClient for config DS prefix configuration shard.", e);
try {
configurationShardMap.put(LogicalDatastoreType.OPERATIONAL,
createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
try {
configurationShardMap.put(LogicalDatastoreType.OPERATIONAL,
createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
updateHandler.initListener(distributedConfigDatastore, LogicalDatastoreType.CONFIGURATION);
updateHandler.initListener(distributedOperDatastore, LogicalDatastoreType.OPERATIONAL);
updateHandler.initListener(distributedConfigDatastore, LogicalDatastoreType.CONFIGURATION);
updateHandler.initListener(distributedOperDatastore, LogicalDatastoreType.OPERATIONAL);
- distributedConfigDatastore.getActorContext().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
- distributedOperDatastore.getActorContext().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
+ distributedConfigDatastore.getActorUtils().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
+ distributedOperDatastore.getActorUtils().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
final Future<Object> ask =
Patterns.ask(shardedDataTreeActor, new StartConfigShardLookup(type), SHARD_FUTURE_TIMEOUT);
final Future<Object> ask =
Patterns.ask(shardedDataTreeActor, new StartConfigShardLookup(type), SHARD_FUTURE_TIMEOUT);
@Override
public <T extends DOMDataTreeListener> ListenerRegistration<T> registerListener(
final T listener, final Collection<DOMDataTreeIdentifier> subtrees,
@Override
public <T extends DOMDataTreeListener> ListenerRegistration<T> registerListener(
final T listener, final Collection<DOMDataTreeIdentifier> subtrees,
LOG.debug("{} - Creating producer for {}", memberName, subtrees);
final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(subtrees);
LOG.debug("{} - Creating producer for {}", memberName, subtrees);
final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(subtrees);
.executeOperation(shardedDataTreeActor, new ProducerCreated(subtrees));
if (response == null) {
LOG.debug("{} - Received success from remote nodes, creating producer:{}", memberName, subtrees);
return new ProxyProducer(producer, subtrees, shardedDataTreeActor,
.executeOperation(shardedDataTreeActor, new ProducerCreated(subtrees));
if (response == null) {
LOG.debug("{} - Received success from remote nodes, creating producer:{}", memberName, subtrees);
return new ProxyProducer(producer, subtrees, shardedDataTreeActor,
final Promise<DistributedShardRegistration> shardRegistrationPromise = akka.dispatch.Futures.promise();
Futures.addCallback(writeFuture, new FutureCallback<Void>() {
@Override
final Promise<DistributedShardRegistration> shardRegistrationPromise = akka.dispatch.Futures.promise();
Futures.addCallback(writeFuture, new FutureCallback<Void>() {
@Override
final Future<Object> ask =
Patterns.ask(shardedDataTreeActor, new LookupPrefixShard(prefix), SHARD_FUTURE_TIMEOUT);
final Future<Object> ask =
Patterns.ask(shardedDataTreeActor, new LookupPrefixShard(prefix), SHARD_FUTURE_TIMEOUT);
private void createShardFrontend(final DOMDataTreeIdentifier prefix) {
LOG.debug("{}: Creating CDS shard for prefix: {}", memberName, prefix);
final String shardName = ClusterUtils.getCleanShardName(prefix.getRootIdentifier());
private void createShardFrontend(final DOMDataTreeIdentifier prefix) {
LOG.debug("{}: Creating CDS shard for prefix: {}", memberName, prefix);
final String shardName = ClusterUtils.getCleanShardName(prefix.getRootIdentifier());
- final AbstractDataStore distributedDataStore =
- prefix.getDatastoreType().equals(org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION)
+ final DistributedDataStoreInterface distributedDataStore =
+ prefix.getDatastoreType().equals(LogicalDatastoreType.CONFIGURATION)
? distributedConfigDatastore : distributedOperDatastore;
try (DOMDataTreeProducer producer = localCreateProducer(Collections.singletonList(prefix))) {
final Entry<DataStoreClient, ActorRef> entry =
? distributedConfigDatastore : distributedOperDatastore;
try (DOMDataTreeProducer producer = localCreateProducer(Collections.singletonList(prefix))) {
final Entry<DataStoreClient, ActorRef> entry =
final DistributedShardFrontend shard =
new DistributedShardFrontend(distributedDataStore, entry.getKey(), prefix);
final DistributedShardFrontend shard =
new DistributedShardFrontend(distributedDataStore, entry.getKey(), prefix);
} catch (final DOMDataTreeShardingConflictException e) {
LOG.error("{}: Prefix {} is already occupied by another shard",
} catch (final DOMDataTreeShardingConflictException e) {
LOG.error("{}: Prefix {} is already occupied by another shard",
- distributedConfigDatastore.getActorContext().getClusterWrapper().getCurrentMemberName(), prefix, e);
+ distributedConfigDatastore.getActorUtils().getClusterWrapper().getCurrentMemberName(), prefix, e);
} catch (DOMDataTreeProducerException e) {
LOG.error("Unable to close producer", e);
} catch (DOMDataTreeShardCreationFailedException e) {
} catch (DOMDataTreeProducerException e) {
LOG.error("Unable to close producer", e);
} catch (DOMDataTreeShardCreationFailedException e) {
- @Nonnull final DOMDataTreeIdentifier prefix,
- @Nonnull final T shard,
- @Nonnull final DOMDataTreeProducer producer)
- throws DOMDataTreeShardingConflictException {
+ final DOMDataTreeIdentifier prefix, final T shard, final DOMDataTreeProducer producer)
+ throws DOMDataTreeShardingConflictException {
- private Entry<DataStoreClient, ActorRef> createDatastoreClient(
- final String shardName, final ActorContext actorContext)
+ private Entry<DataStoreClient, ActorRef> createDatastoreClient(final String shardName, final ActorUtils actorUtils)
throws DOMDataTreeShardCreationFailedException {
LOG.debug("{}: Creating distributed datastore client for shard {}", memberName, shardName);
final Props distributedDataStoreClientProps =
throws DOMDataTreeShardCreationFailedException {
LOG.debug("{}: Creating distributed datastore client for shard {}", memberName, shardName);
final Props distributedDataStoreClientProps =
- SimpleDataStoreClientActor.props(memberName, "Shard-" + shardName, actorContext, shardName);
+ SimpleDataStoreClientActor.props(memberName, "Shard-" + shardName, actorUtils, shardName);
- final ActorContext actorContext = logicalDatastoreType == LogicalDatastoreType.CONFIGURATION
- ? distributedConfigDatastore.getActorContext() : distributedOperDatastore.getActorContext();
+ final ActorUtils actorUtils = logicalDatastoreType == LogicalDatastoreType.CONFIGURATION
+ ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
if (defaultLocalShardOptional.isPresent()) {
LOG.debug("{}: Default shard for {} is already started, creating just frontend", memberName,
logicalDatastoreType);
if (defaultLocalShardOptional.isPresent()) {
LOG.debug("{}: Default shard for {} is already started, creating just frontend", memberName,
logicalDatastoreType);
- createShardFrontend(new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY));
+ createShardFrontend(new DOMDataTreeIdentifier(logicalDatastoreType,
+ YangInstanceIdentifier.empty()));
// the default shard as a prefix shard is problematic in this scenario so it is commented out. Since
// the default shard is a module-based shard by default, it makes sense to always treat it as such,
// ie bootstrap it in the same manner as the special prefix-configuration and EOS shards.
// the default shard as a prefix shard is problematic in this scenario so it is commented out. Since
// the default shard is a module-based shard by default, it makes sense to always treat it as such,
// ie bootstrap it in the same manner as the special prefix-configuration and EOS shards.
// .getUniqueMemberNamesForAllShards();
// Await.result(FutureConverters.toScala(createDistributedShard(
// .getUniqueMemberNamesForAllShards();
// Await.result(FutureConverters.toScala(createDistributedShard(
-// new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)),
+// new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.empty()), names)),
// SHARD_FUTURE_TIMEOUT_DURATION);
// } catch (DOMDataTreeShardingConflictException e) {
// LOG.debug("{}: Default shard for {} already registered, possibly due to other node doing it faster",
// SHARD_FUTURE_TIMEOUT_DURATION);
// } catch (DOMDataTreeShardingConflictException e) {
// LOG.debug("{}: Default shard for {} already registered, possibly due to other node doing it faster",
private final DOMDataTreeProducer delegate;
private final Collection<DOMDataTreeIdentifier> subtrees;
private final ActorRef shardDataTreeActor;
private final DOMDataTreeProducer delegate;
private final Collection<DOMDataTreeIdentifier> subtrees;
private final ActorRef shardDataTreeActor;
@GuardedBy("shardAccessMap")
private final Map<DOMDataTreeIdentifier, CDSShardAccessImpl> shardAccessMap = new HashMap<>();
@GuardedBy("shardAccessMap")
private final Map<DOMDataTreeIdentifier, CDSShardAccessImpl> shardAccessMap = new HashMap<>();
ProxyProducer(final DOMDataTreeProducer delegate,
final Collection<DOMDataTreeIdentifier> subtrees,
final ActorRef shardDataTreeActor,
ProxyProducer(final DOMDataTreeProducer delegate,
final Collection<DOMDataTreeIdentifier> subtrees,
final ActorRef shardDataTreeActor,
- this.delegate = Preconditions.checkNotNull(delegate);
- this.subtrees = Preconditions.checkNotNull(subtrees);
- this.shardDataTreeActor = Preconditions.checkNotNull(shardDataTreeActor);
- this.actorContext = Preconditions.checkNotNull(actorContext);
- this.shardTable = Preconditions.checkNotNull(shardLayout);
+ this.delegate = requireNonNull(delegate);
+ this.subtrees = requireNonNull(subtrees);
+ this.shardDataTreeActor = requireNonNull(shardDataTreeActor);
+ this.actorUtils = requireNonNull(actorUtils);
+ this.shardTable = requireNonNull(shardLayout);
@Override
public DOMDataTreeCursorAwareTransaction createTransaction(final boolean isolated) {
return delegate.createTransaction(isolated);
}
@Override
public DOMDataTreeCursorAwareTransaction createTransaction(final boolean isolated) {
return delegate.createTransaction(isolated);
}
// TODO we probably don't need to distribute this on the remote nodes since once we have this producer
// open we surely have the rights to all the subtrees.
return delegate.createProducer(subtrees);
// TODO we probably don't need to distribute this on the remote nodes since once we have this producer
// open we surely have the rights to all the subtrees.
return delegate.createProducer(subtrees);
if (o instanceof DOMDataTreeProducerException) {
throw (DOMDataTreeProducerException) o;
} else if (o instanceof Throwable) {
if (o instanceof DOMDataTreeProducerException) {
throw (DOMDataTreeProducerException) o;
} else if (o instanceof Throwable) {
- public CDSShardAccess getShardAccess(@Nonnull final DOMDataTreeIdentifier subtree) {
- Preconditions.checkArgument(
- subtrees.stream().anyMatch(dataTreeIdentifier -> dataTreeIdentifier.contains(subtree)),
- "Subtree %s is not controlled by this producer %s", subtree, this);
+ public CDSShardAccess getShardAccess(final DOMDataTreeIdentifier subtree) {
+ checkArgument(subtrees.stream().anyMatch(dataTreeIdentifier -> dataTreeIdentifier.contains(subtree)),
+ "Subtree %s is not controlled by this producer %s", subtree, this);
final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup =
shardTable.lookup(subtree);
final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup =
shardTable.lookup(subtree);
- Preconditions.checkState(lookup != null, "Subtree %s is not contained in any registered shard.", subtree);
+ checkState(lookup != null, "Subtree %s is not contained in any registered shard.", subtree);
// for same subtrees. But maybe it is not needed since there can be only one
// producer attached to some subtree at a time. And also how we can close ShardAccess
// then
// for same subtrees. But maybe it is not needed since there can be only one
// producer attached to some subtree at a time. And also how we can close ShardAccess
// then