import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
import akka.actor.Props;
+import akka.cluster.Cluster;
+import akka.cluster.Member;
+import akka.dispatch.Mapper;
+import akka.pattern.Patterns;
import akka.util.Timeout;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
+import com.google.common.collect.Collections2;
import com.google.common.collect.ForwardingObject;
import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.AbstractMap.SimpleEntry;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
-import java.util.concurrent.CompletionException;
+import java.util.EnumMap;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.CompletionStage;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nonnull;
+import org.opendaylight.controller.cluster.ActorSystemProvider;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.Shard;
import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.messages.CreatePrefixedShard;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor.ShardedDataTreeActorCreator;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemoved;
+import org.opendaylight.controller.cluster.sharding.messages.CreatePrefixShard;
import org.opendaylight.controller.cluster.sharding.messages.ProducerCreated;
import org.opendaylight.controller.cluster.sharding.messages.ProducerRemoved;
+import org.opendaylight.controller.cluster.sharding.messages.RemovePrefixShard;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.dom.api.DOMDataTreeListener;
import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingService;
+import org.opendaylight.mdsal.dom.broker.DOMDataTreeShardRegistration;
import org.opendaylight.mdsal.dom.broker.ShardedDOMDataTree;
+import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTable;
+import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTableEntry;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import scala.concurrent.Await;
+import scala.collection.JavaConverters;
+import scala.compat.java8.FutureConverters;
import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
/**
* A layer on top of DOMDataTreeService that distributes producer/shard registrations to remote nodes via
private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTree.class);
- private static final Timeout DEFAULT_ASK_TIMEOUT = new Timeout(15, TimeUnit.SECONDS);
private static final int MAX_ACTOR_CREATION_RETRIES = 100;
private static final int ACTOR_RETRY_DELAY = 100;
private static final TimeUnit ACTOR_RETRY_TIME_UNIT = TimeUnit.MILLISECONDS;
+ static final FiniteDuration SHARD_FUTURE_TIMEOUT_DURATION = new FiniteDuration(
+ ShardedDataTreeActor.LOOKUP_TASK_MAX_RETRIES * ShardedDataTreeActor.LOOKUP_TASK_MAX_RETRIES * 3,
+ TimeUnit.SECONDS);
+ static final Timeout SHARD_FUTURE_TIMEOUT = new Timeout(SHARD_FUTURE_TIMEOUT_DURATION);
static final String ACTOR_ID = "ShardedDOMDataTreeFrontend";
private final ActorRef shardedDataTreeActor;
private final MemberName memberName;
- public DistributedShardedDOMDataTree(final ActorSystem actorSystem,
+ private final DOMDataTreePrefixTable<DOMDataTreeShardRegistration<DOMDataTreeShard>> shards =
+ DOMDataTreePrefixTable.create();
+
+ private final EnumMap<LogicalDatastoreType, DistributedShardRegistration> defaultShardRegistrations =
+ new EnumMap<>(LogicalDatastoreType.class);
+
+ public DistributedShardedDOMDataTree(final ActorSystemProvider actorSystemProvider,
final DistributedDataStore distributedOperDatastore,
final DistributedDataStore distributedConfigDatastore) {
- this.actorSystem = Preconditions.checkNotNull(actorSystem);
+ this.actorSystem = Preconditions.checkNotNull(actorSystemProvider).getActorSystem();
this.distributedOperDatastore = Preconditions.checkNotNull(distributedOperDatastore);
this.distributedConfigDatastore = Preconditions.checkNotNull(distributedConfigDatastore);
shardedDOMDataTree = new ShardedDOMDataTree();
shardedDataTreeActor = createShardedDataTreeActor(actorSystem,
new ShardedDataTreeActorCreator()
- .setDataTreeService(shardedDOMDataTree)
- .setShardingService(shardedDOMDataTree)
+ .setShardingService(this)
.setActorSystem(actorSystem)
.setClusterWrapper(distributedConfigDatastore.getActorContext().getClusterWrapper())
.setDistributedConfigDatastore(distributedConfigDatastore)
ACTOR_ID);
this.memberName = distributedConfigDatastore.getActorContext().getCurrentMemberName();
+
+ //create shard registration for DEFAULT_SHARD
+ try {
+ defaultShardRegistrations.put(LogicalDatastoreType.CONFIGURATION,
+ initDefaultShard(LogicalDatastoreType.CONFIGURATION));
+ } catch (final InterruptedException | ExecutionException e) {
+ LOG.error("Unable to create default shard frontend for config shard", e);
+ }
+
+ try {
+ defaultShardRegistrations.put(LogicalDatastoreType.OPERATIONAL,
+ initDefaultShard(LogicalDatastoreType.OPERATIONAL));
+ } catch (final InterruptedException | ExecutionException e) {
+ LOG.error("Unable to create default shard frontend for operational shard", e);
+ }
}
@Nonnull
final T listener, final Collection<DOMDataTreeIdentifier> subtrees,
final boolean allowRxMerges, final Collection<DOMDataTreeProducer> producers)
throws DOMDataTreeLoopException {
-
- throw new UnsupportedOperationException("Not implemented");
+ return shardedDOMDataTree.registerListener(listener, subtrees, allowRxMerges, producers);
}
@Nonnull
@Override
public DOMDataTreeProducer createProducer(@Nonnull final Collection<DOMDataTreeIdentifier> subtrees) {
- LOG.debug("Creating producer for {}", subtrees);
+ LOG.debug("{} - Creating producer for {}",
+ distributedConfigDatastore.getActorContext().getClusterWrapper().getCurrentMemberName(), subtrees);
final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(subtrees);
final Object response = distributedConfigDatastore.getActorContext()
.executeOperation(shardedDataTreeActor, new ProducerCreated(subtrees));
if (response == null) {
- LOG.debug("Received success from remote nodes, creating producer:{}", subtrees);
+ LOG.debug("{} - Received success from remote nodes, creating producer:{}",
+ distributedConfigDatastore.getActorContext().getClusterWrapper().getCurrentMemberName(), subtrees);
return new ProxyProducer(producer, subtrees, shardedDataTreeActor,
distributedConfigDatastore.getActorContext());
} else if (response instanceof Exception) {
}
@Override
- @SuppressWarnings("checkstyle:IllegalCatch")
- public DistributedShardRegistration createDistributedShard(
+ public CompletionStage<DistributedShardRegistration> createDistributedShard(
final DOMDataTreeIdentifier prefix, final Collection<MemberName> replicaMembers)
- throws DOMDataTreeShardingConflictException, DOMDataTreeProducerException,
- DOMDataTreeShardCreationFailedException {
+ throws DOMDataTreeShardingConflictException {
+ final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup =
+ shards.lookup(prefix);
+ if (lookup != null && lookup.getValue().getPrefix().equals(prefix)) {
+ throw new DOMDataTreeShardingConflictException(
+ "Prefix " + prefix + " is already occupied by another shard.");
+ }
+
+ final PrefixShardConfiguration config = new PrefixShardConfiguration(prefix, "prefix", replicaMembers);
+
+ final Future<Object> ask =
+ Patterns.ask(shardedDataTreeActor, new CreatePrefixShard(config), SHARD_FUTURE_TIMEOUT);
+
+ final Future<DistributedShardRegistration> shardRegistrationFuture = ask.transform(
+ new Mapper<Object, DistributedShardRegistration>() {
+ @Override
+ public DistributedShardRegistration apply(final Object parameter) {
+ return new DistributedShardRegistrationImpl(
+ prefix, shardedDataTreeActor, DistributedShardedDOMDataTree.this);
+ }
+ },
+ new Mapper<Throwable, Throwable>() {
+ @Override
+ public Throwable apply(final Throwable throwable) {
+ return new DOMDataTreeShardCreationFailedException("Unable to create a cds shard.", throwable);
+ }
+ }, actorSystem.dispatcher());
+
+ return FutureConverters.toJava(shardRegistrationFuture);
+ }
+ void resolveShardAdditions(final Set<DOMDataTreeIdentifier> additions) {
+ LOG.debug("Member {}: Resolving additions : {}", memberName, additions);
+ final ArrayList<DOMDataTreeIdentifier> list = new ArrayList<>(additions);
+ // we need to register the shards from top to bottom, so we need to atleast make sure the ordering reflects that
+ Collections.sort(list, (o1, o2) -> {
+ if (o1.getRootIdentifier().getPathArguments().size() < o2.getRootIdentifier().getPathArguments().size()) {
+ return -1;
+ } else if (o1.getRootIdentifier().getPathArguments().size()
+ == o2.getRootIdentifier().getPathArguments().size()) {
+ return 0;
+ } else {
+ return 1;
+ }
+ });
+ list.forEach(this::createShardFrontend);
+ }
+
+ void resolveShardRemovals(final Set<DOMDataTreeIdentifier> removals) {
+ LOG.debug("Member {}: Resolving removals : {}", memberName, removals);
+
+ // do we need to go from bottom to top?
+ removals.forEach(this::despawnShardFrontend);
+ }
+
+ private void createShardFrontend(final DOMDataTreeIdentifier prefix) {
+ LOG.debug("Member {}: Creating CDS shard for prefix: {}", memberName, prefix);
final String shardName = ClusterUtils.getCleanShardName(prefix.getRootIdentifier());
final DistributedDataStore distributedDataStore =
prefix.getDatastoreType().equals(org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION)
? distributedConfigDatastore : distributedOperDatastore;
- final PrefixShardConfiguration config = new PrefixShardConfiguration(prefix, "prefix", replicaMembers);
- if (replicaMembers.contains(memberName)) {
- // spawn the backend shard and have the shard Manager create all replicas
- final ActorRef shardManager = distributedDataStore.getActorContext().getShardManager();
-
- shardManager.tell(new CreatePrefixedShard(config, null, Shard.builder()), noSender());
- }
+ try (final DOMDataTreeProducer producer = localCreateProducer(Collections.singletonList(prefix))) {
+ final Entry<DataStoreClient, ActorRef> entry =
+ createDatastoreClient(shardName, distributedDataStore.getActorContext());
- LOG.debug("Creating distributed datastore client for shard {}", shardName);
- final Props distributedDataStoreClientProps =
- SimpleDataStoreClientActor
- .props(memberName, "Shard-" + shardName, distributedDataStore.getActorContext(), shardName);
+ final DistributedShardFrontend shard =
+ new DistributedShardFrontend(distributedDataStore, entry.getKey(), prefix);
- final ActorRef clientActor = actorSystem.actorOf(distributedDataStoreClientProps);
- final DataStoreClient client;
- try {
- client = SimpleDataStoreClientActor.getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
- } catch (final Exception e) {
- LOG.error("Failed to get actor for {}", distributedDataStoreClientProps, e);
- clientActor.tell(PoisonPill.getInstance(), noSender());
- throw new DOMDataTreeProducerException("Unable to create producer", e);
+ @SuppressWarnings("unchecked")
+ final DOMDataTreeShardRegistration<DOMDataTreeShard> reg =
+ (DOMDataTreeShardRegistration) shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer);
+ shards.store(prefix, reg);
+ } catch (final DOMDataTreeShardingConflictException e) {
+ LOG.error("Prefix {} is already occupied by another shard", prefix, e);
+ } catch (DOMDataTreeProducerException e) {
+ LOG.error("Unable to close producer", e);
+ } catch (DOMDataTreeShardCreationFailedException e) {
+ LOG.error("Unable to create datastore client for shard {}", prefix, e);
}
+ }
- // register the frontend into the sharding service and let the actor distribute this onto the other nodes
- final ListenerRegistration<ShardFrontend> shardFrontendRegistration;
- try (DOMDataTreeProducer producer = createProducer(Collections.singletonList(prefix))) {
- shardFrontendRegistration = shardedDOMDataTree
- .registerDataTreeShard(prefix,
- new ShardFrontend(client, prefix),
- ((ProxyProducer) producer).delegate());
- }
+ private void despawnShardFrontend(final DOMDataTreeIdentifier prefix) {
+ LOG.debug("Member {}: Removing CDS shard for prefix: {}", memberName, prefix);
+ final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup =
+ shards.lookup(prefix);
- final Future<Object> future = distributedDataStore.getActorContext()
- .executeOperationAsync(shardedDataTreeActor, new PrefixShardCreated(config), DEFAULT_ASK_TIMEOUT);
- try {
- final Object result = Await.result(future, DEFAULT_ASK_TIMEOUT.duration());
- if (result != null) {
- throw new DOMDataTreeShardCreationFailedException("Received unexpected response to PrefixShardCreated"
- + result);
- }
+ if (lookup == null || !lookup.getValue().getPrefix().equals(prefix)) {
+ LOG.debug("Member {}: Received despawn for non-existing CDS shard frontend, prefix: {}, ignoring..",
+ memberName, prefix);
+ return;
+ }
- return new DistributedShardRegistrationImpl(shardFrontendRegistration, prefix, shardedDataTreeActor);
- } catch (final CompletionException e) {
- shardedDataTreeActor.tell(new PrefixShardRemoved(prefix), noSender());
- clientActor.tell(PoisonPill.getInstance(), noSender());
+ lookup.getValue().close();
+ // need to remove from our local table thats used for tracking
+ shards.remove(prefix);
+ }
- final Throwable cause = e.getCause();
- if (cause instanceof DOMDataTreeShardingConflictException) {
- throw (DOMDataTreeShardingConflictException) cause;
- }
+ DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookupShardFrontend(
+ final DOMDataTreeIdentifier prefix) {
+ return shards.lookup(prefix);
- throw new DOMDataTreeShardCreationFailedException("Shard creation failed.", e.getCause());
- } catch (final Exception e) {
- shardedDataTreeActor.tell(new PrefixShardRemoved(prefix), noSender());
- clientActor.tell(PoisonPill.getInstance(), noSender());
+ }
- throw new DOMDataTreeShardCreationFailedException("Shard creation failed.", e);
- }
+ DOMDataTreeProducer localCreateProducer(final Collection<DOMDataTreeIdentifier> prefix) {
+ return shardedDOMDataTree.createProducer(prefix);
}
@Nonnull
return shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer);
}
+ @SuppressWarnings("checkstyle:IllegalCatch")
+ private Entry<DataStoreClient, ActorRef> createDatastoreClient(
+ final String shardName, final ActorContext actorContext)
+ throws DOMDataTreeShardCreationFailedException {
+
+ LOG.debug("Creating distributed datastore client for shard {}", shardName);
+ final Props distributedDataStoreClientProps =
+ SimpleDataStoreClientActor.props(memberName, "Shard-" + shardName, actorContext, shardName);
+
+ final ActorRef clientActor = actorSystem.actorOf(distributedDataStoreClientProps);
+ try {
+ return new SimpleEntry<>(SimpleDataStoreClientActor
+ .getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS), clientActor);
+ } catch (final Exception e) {
+ LOG.error("Failed to get actor for {}", distributedDataStoreClientProps, e);
+ clientActor.tell(PoisonPill.getInstance(), noSender());
+ throw new DOMDataTreeShardCreationFailedException(
+ "Unable to create datastore client for shard{" + shardName + "}", e);
+ }
+ }
+
+ private DistributedShardRegistration initDefaultShard(final LogicalDatastoreType logicalDatastoreType)
+ throws ExecutionException, InterruptedException {
+ final Collection<Member> members = JavaConverters.asJavaCollectionConverter(
+ Cluster.get(actorSystem).state().members()).asJavaCollection();
+ final Collection<MemberName> names = Collections2.transform(members,
+ m -> MemberName.forName(m.roles().iterator().next()));
+
+ try {
+ // we should probably only have one node create the default shards
+ return createDistributedShard(
+ new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)
+ .toCompletableFuture().get();
+ } catch (DOMDataTreeShardingConflictException e) {
+ LOG.debug("Default shard already registered, possibly due to other node doing it faster");
+ return new DistributedShardRegistrationImpl(
+ new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY),
+ shardedDataTreeActor, this);
+ }
+ }
+
private static void closeProducer(final DOMDataTreeProducer producer) {
try {
producer.close();
throw new IllegalStateException("Failed to create actor for ShardedDOMDataTree", lastException);
}
- private static class DistributedShardRegistrationImpl implements DistributedShardRegistration {
- private final ListenerRegistration<ShardFrontend> registration;
+ private class DistributedShardRegistrationImpl implements DistributedShardRegistration {
+
private final DOMDataTreeIdentifier prefix;
private final ActorRef shardedDataTreeActor;
+ private final DistributedShardedDOMDataTree distributedShardedDOMDataTree;
- DistributedShardRegistrationImpl(final ListenerRegistration<ShardFrontend> registration,
- final DOMDataTreeIdentifier prefix,
- final ActorRef shardedDataTreeActor) {
- this.registration = registration;
+ DistributedShardRegistrationImpl(final DOMDataTreeIdentifier prefix,
+ final ActorRef shardedDataTreeActor,
+ final DistributedShardedDOMDataTree distributedShardedDOMDataTree) {
this.prefix = prefix;
this.shardedDataTreeActor = shardedDataTreeActor;
+ this.distributedShardedDOMDataTree = distributedShardedDOMDataTree;
}
@Override
- public void close() {
- // TODO send the correct messages to ShardManager to destroy the shard
- // maybe we could provide replica removal mechanisms also?
- shardedDataTreeActor.tell(new PrefixShardRemoved(prefix), noSender());
- registration.close();
+ public CompletionStage<Void> close() {
+ // first despawn on the local node
+ distributedShardedDOMDataTree.despawnShardFrontend(prefix);
+ // update the config so the remote nodes are updated
+ final Future<Object> ask =
+ Patterns.ask(shardedDataTreeActor, new RemovePrefixShard(prefix), SHARD_FUTURE_TIMEOUT);
+
+ final Future<Void> closeFuture = ask.transform(
+ new Mapper<Object, Void>() {
+ @Override
+ public Void apply(Object parameter) {
+ return null;
+ }
+ },
+ new Mapper<Throwable, Throwable>() {
+ @Override
+ public Throwable apply(Throwable throwable) {
+ return throwable;
+ }
+ }, actorSystem.dispatcher());
+
+ return FutureConverters.toJava(closeFuture);
}
}