package org.opendaylight.controller.cluster.datastore;
-import java.util.concurrent.TimeUnit;
-
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
-
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
+import org.opendaylight.controller.cluster.databroker.actors.dds.DistributedDataStoreClient;
+import org.opendaylight.controller.cluster.databroker.actors.dds.DistributedDataStoreClientActor;
+import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl;
+import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
+import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerCreator;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
+import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import scala.concurrent.duration.Duration;
-
/**
- *
+ * Implements a distributed DOMStore.
*/
-public class DistributedDataStore implements DOMStore, SchemaContextListener, AutoCloseable {
+public class DistributedDataStore implements DistributedDataStoreInterface, SchemaContextListener,
+ DatastoreContextConfigAdminOverlay.Listener, DOMStoreTreeChangePublisher,
+ DOMDataTreeCommitCohortRegistry, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class);
+ private static final long READY_WAIT_FACTOR = 3;
+
private final ActorContext actorContext;
- private final ShardContext shardContext;
+ private final long waitTillReadyTimeInMillis;
- public DistributedDataStore(ActorSystem actorSystem, String type, ClusterWrapper cluster,
- Configuration configuration, DistributedDataStoreProperties dataStoreProperties) {
+ private AutoCloseable closeable;
+
+ private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
+
+ private DatastoreInfoMXBeanImpl datastoreInfoMXBean;
+
+ private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
+
+ private final ClientIdentifier identifier;
+ private final DistributedDataStoreClient client;
+
+ private final TransactionContextFactory txContextFactory;
+
+ @SuppressWarnings("checkstyle:IllegalCatch")
+ public DistributedDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
+ final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
+ final DatastoreSnapshot restoreFromSnapshot) {
Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
- Preconditions.checkNotNull(type, "type should not be null");
Preconditions.checkNotNull(cluster, "cluster should not be null");
Preconditions.checkNotNull(configuration, "configuration should not be null");
+ Preconditions.checkNotNull(datastoreContextFactory, "datastoreContextFactory should not be null");
-
- String shardManagerId = ShardManagerIdentifier.builder().type(type).build().toString();
+ String shardManagerId = ShardManagerIdentifier.builder()
+ .type(datastoreContextFactory.getBaseDatastoreContext().getDataStoreName()).build().toString();
LOG.info("Creating ShardManager : {}", shardManagerId);
- shardContext = new ShardContext(InMemoryDOMDataStoreConfigProperties.create(
- dataStoreProperties.getMaxShardDataChangeExecutorPoolSize(),
- dataStoreProperties.getMaxShardDataChangeExecutorQueueSize(),
- dataStoreProperties.getMaxShardDataChangeListenerQueueSize()),
- Duration.create(dataStoreProperties.getShardTransactionIdleTimeoutInMinutes(),
- TimeUnit.MINUTES));
+ String shardDispatcher =
+ new Dispatchers(actorSystem.dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
+
+ PrimaryShardInfoFutureCache primaryShardInfoCache = new PrimaryShardInfoFutureCache();
+
+ ShardManagerCreator creator = new ShardManagerCreator().cluster(cluster).configuration(configuration)
+ .datastoreContextFactory(datastoreContextFactory)
+ .waitTillReadyCountdownLatch(waitTillReadyCountDownLatch)
+ .primaryShardInfoCache(primaryShardInfoCache)
+ .restoreFromSnapshot(restoreFromSnapshot);
+
+ actorContext = new ActorContext(actorSystem, createShardManager(actorSystem, creator, shardDispatcher,
+ shardManagerId), cluster, configuration, datastoreContextFactory.getBaseDatastoreContext(),
+ primaryShardInfoCache);
+
+ final Props clientProps = DistributedDataStoreClientActor.props(cluster.getCurrentMemberName(),
+ datastoreContextFactory.getBaseDatastoreContext().getDataStoreName(), actorContext);
+ final ActorRef clientActor = actorSystem.actorOf(clientProps);
+ try {
+ client = DistributedDataStoreClientActor.getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
+ } catch (Exception e) {
+ LOG.error("Failed to get actor for {}", clientProps, e);
+ clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ throw Throwables.propagate(e);
+ }
+
+ identifier = client.getIdentifier();
+ LOG.debug("Distributed data store client {} started", identifier);
+
+ this.waitTillReadyTimeInMillis = actorContext.getDatastoreContext().getShardLeaderElectionTimeout()
+ .duration().toMillis() * READY_WAIT_FACTOR;
+
+ this.txContextFactory = new TransactionContextFactory(actorContext, identifier);
- actorContext
- = new ActorContext(
- actorSystem, actorSystem.actorOf(
- ShardManager.props(type, cluster, configuration, shardContext).
- withMailbox(ActorContext.MAILBOX), shardManagerId ), cluster, configuration);
+ datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(
+ datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
+ datastoreConfigMXBean.setContext(datastoreContextFactory.getBaseDatastoreContext());
+ datastoreConfigMXBean.registerMBean();
+
+ datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContextFactory.getBaseDatastoreContext()
+ .getDataStoreMXBeanType(), actorContext);
+ datastoreInfoMXBean.registerMBean();
}
- public DistributedDataStore(ActorContext actorContext) {
+ @VisibleForTesting
+ DistributedDataStore(final ActorContext actorContext, final ClientIdentifier identifier) {
this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
- this.shardContext = new ShardContext();
+ this.client = null;
+ this.identifier = Preconditions.checkNotNull(identifier);
+ this.txContextFactory = new TransactionContextFactory(actorContext, identifier);
+ this.waitTillReadyTimeInMillis = actorContext.getDatastoreContext().getShardLeaderElectionTimeout()
+ .duration().toMillis() * READY_WAIT_FACTOR;
}
+ public void setCloseable(final AutoCloseable closeable) {
+ this.closeable = closeable;
+ }
@SuppressWarnings("unchecked")
@Override
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
ListenerRegistration<L> registerChangeListener(
- YangInstanceIdentifier path, L listener,
- AsyncDataBroker.DataChangeScope scope) {
+ final YangInstanceIdentifier path, final L listener,
+ final AsyncDataBroker.DataChangeScope scope) {
Preconditions.checkNotNull(path, "path should not be null");
Preconditions.checkNotNull(listener, "listener should not be null");
LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
- ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf(
- DataChangeListener.props(listener ));
+ String shardName = actorContext.getShardStrategyFactory().getStrategy(path).findShard(path);
+
+ final DataChangeListenerRegistrationProxy listenerRegistrationProxy =
+ new DataChangeListenerRegistrationProxy(shardName, actorContext, listener);
+ listenerRegistrationProxy.init(path, scope);
- String shardName = ShardStrategyFactory.getStrategy(path).findShard(path);
+ return listenerRegistrationProxy;
+ }
- Object result = actorContext.executeLocalShardOperation(shardName,
- new RegisterChangeListener(path, dataChangeListenerActor.path(), scope),
- ActorContext.ASK_DURATION);
+ @Override
+ public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
+ final YangInstanceIdentifier treeId, final L listener) {
+ Preconditions.checkNotNull(treeId, "treeId should not be null");
+ Preconditions.checkNotNull(listener, "listener should not be null");
- if (result != null) {
- RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result;
- return new DataChangeListenerRegistrationProxy(actorContext
- .actorSelection(reply.getListenerRegistrationPath()), listener,
- dataChangeListenerActor);
- }
+ final String shardName = actorContext.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
+ LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
- LOG.debug(
- "No local shard for shardName {} was found so returning a noop registration",
- shardName);
+ final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
+ new DataTreeChangeListenerProxy<>(actorContext, listener);
+ listenerRegistrationProxy.init(shardName, treeId);
- return new NoOpDataChangeListenerRegistration(listener);
+ return listenerRegistrationProxy;
+ }
+
+
+ @Override
+ public <C extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<C> registerCommitCohort(
+ final DOMDataTreeIdentifier subtree, final C cohort) {
+ YangInstanceIdentifier treeId =
+ Preconditions.checkNotNull(subtree, "subtree should not be null").getRootIdentifier();
+ Preconditions.checkNotNull(cohort, "listener should not be null");
+
+
+ final String shardName = actorContext.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
+ LOG.debug("Registering cohort: {} for tree: {} shard: {}", cohort, treeId, shardName);
+
+ DataTreeCohortRegistrationProxy<C> cohortProxy =
+ new DataTreeCohortRegistrationProxy<>(actorContext, subtree, cohort);
+ cohortProxy.init(shardName);
+ return cohortProxy;
}
@Override
public DOMStoreTransactionChain createTransactionChain() {
- return new TransactionChainProxy(actorContext);
+ return txContextFactory.createTransactionChain();
}
@Override
public DOMStoreReadTransaction newReadOnlyTransaction() {
- return new TransactionProxy(actorContext, TransactionProxy.TransactionType.READ_ONLY);
+ return new TransactionProxy(txContextFactory, TransactionType.READ_ONLY);
}
@Override
public DOMStoreWriteTransaction newWriteOnlyTransaction() {
- return new TransactionProxy(actorContext, TransactionProxy.TransactionType.WRITE_ONLY);
+ actorContext.acquireTxCreationPermit();
+ return new TransactionProxy(txContextFactory, TransactionType.WRITE_ONLY);
}
@Override
public DOMStoreReadWriteTransaction newReadWriteTransaction() {
- return new TransactionProxy(actorContext, TransactionProxy.TransactionType.READ_WRITE);
+ actorContext.acquireTxCreationPermit();
+ return new TransactionProxy(txContextFactory, TransactionType.READ_WRITE);
}
@Override
- public void onGlobalContextUpdated(SchemaContext schemaContext) {
+ public void onGlobalContextUpdated(final SchemaContext schemaContext) {
actorContext.setSchemaContext(schemaContext);
}
@Override
- public void close() throws Exception {
+ public void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) {
+ LOG.info("DatastoreContext updated for data store {}", actorContext.getDataStoreName());
+
+ actorContext.setDatastoreContext(contextFactory);
+ datastoreConfigMXBean.setContext(contextFactory.getBaseDatastoreContext());
+ }
+
+ @Override
+ @SuppressWarnings("checkstyle:IllegalCatch")
+ public void close() {
+ LOG.info("Closing data store {}", identifier);
+
+ if (datastoreConfigMXBean != null) {
+ datastoreConfigMXBean.unregisterMBean();
+ }
+ if (datastoreInfoMXBean != null) {
+ datastoreInfoMXBean.unregisterMBean();
+ }
+
+ if (closeable != null) {
+ try {
+ closeable.close();
+ } catch (Exception e) {
+ LOG.debug("Error closing instance", e);
+ }
+ }
+
+ txContextFactory.close();
actorContext.shutdown();
+
+ if (client != null) {
+ client.close();
+ }
+ }
+
+ @Override
+ public ActorContext getActorContext() {
+ return actorContext;
+ }
+
+ public void waitTillReady() {
+ LOG.info("Beginning to wait for data store to become ready : {}", identifier);
+
+ try {
+ if (waitTillReadyCountDownLatch.await(waitTillReadyTimeInMillis, TimeUnit.MILLISECONDS)) {
+ LOG.debug("Data store {} is now ready", identifier);
+ } else {
+ LOG.error("Shard leaders failed to settle in {} seconds, giving up",
+ TimeUnit.MILLISECONDS.toSeconds(waitTillReadyTimeInMillis));
+ }
+ } catch (InterruptedException e) {
+ LOG.error("Interrupted while waiting for shards to settle", e);
+ }
+ }
+
+ @SuppressWarnings("checkstyle:IllegalCatch")
+ private static ActorRef createShardManager(final ActorSystem actorSystem, final ShardManagerCreator creator,
+ final String shardDispatcher, final String shardManagerId) {
+ Exception lastException = null;
+
+ for (int i = 0; i < 100; i++) {
+ try {
+ return actorSystem.actorOf(creator.props().withDispatcher(shardDispatcher).withMailbox(
+ ActorContext.BOUNDED_MAILBOX), shardManagerId);
+ } catch (Exception e) {
+ lastException = e;
+ Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+ LOG.debug("Could not create actor {} because of {} - waiting for sometime before retrying "
+ + "(retry count = {})", shardManagerId, e.getMessage(), i);
+ }
+ }
+
+ throw new IllegalStateException("Failed to create Shard Manager", lastException);
+ }
+
+ @VisibleForTesting
+ public CountDownLatch getWaitTillReadyCountDownLatch() {
+ return waitTillReadyCountDownLatch;
}
}