X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatastore%2FDistributedDataStore.java;h=49f5388842d9b690609f063842fc60ce425ec78f;hp=51182deb1dcd6586ee1d421d477e3072363d4a83;hb=1a4a451d17d39b0dac45a70de9016bd4b0e6b249;hpb=a23ab6d60b7b57184a8fe59e282e46b448c86d6a diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java index 51182deb1d..49f5388842 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java @@ -8,20 +8,28 @@ package org.opendaylight.controller.cluster.datastore; +import akka.actor.ActorRef; import akka.actor.ActorSystem; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.Uninterruptibles; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.opendaylight.controller.cluster.datastore.config.Configuration; import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier; import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl; -import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory; +import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl; import org.opendaylight.controller.cluster.datastore.utils.ActorContext; import org.opendaylight.controller.cluster.datastore.utils.Dispatchers; +import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache; import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker; import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener; +import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener; import org.opendaylight.controller.sal.core.spi.data.DOMStore; import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction; import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction; import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain; +import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher; import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction; import org.opendaylight.yangtools.concepts.ListenerRegistration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; @@ -35,17 +43,29 @@ import org.slf4j.LoggerFactory; * */ public class DistributedDataStore implements DOMStore, SchemaContextListener, - DatastoreContextConfigAdminOverlay.Listener, AutoCloseable { + DatastoreContextConfigAdminOverlay.Listener, DOMStoreTreeChangePublisher, AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class); - public static final int REGISTER_DATA_CHANGE_LISTENER_TIMEOUT_FACTOR = 24; // 24 times the usual operation timeout + private static final String UNKNOWN_TYPE = "unknown"; + + private static final long READY_WAIT_FACTOR = 3; private final ActorContext actorContext; + private final long waitTillReadyTimeInMillis; + private AutoCloseable closeable; private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean; + private DatastoreInfoMXBeanImpl datastoreInfoMXBean; + + private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1); + + private final String type; + + private final TransactionContextFactory txContextFactory; + public DistributedDataStore(ActorSystem actorSystem, ClusterWrapper cluster, Configuration configuration, DatastoreContext datastoreContext) { Preconditions.checkNotNull(actorSystem, "actorSystem should not be null"); @@ -53,7 +73,7 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, Preconditions.checkNotNull(configuration, "configuration should not be null"); Preconditions.checkNotNull(datastoreContext, "datastoreContext should not be null"); - String type = datastoreContext.getDataStoreType(); + this.type = datastoreContext.getDataStoreType(); String shardManagerId = ShardManagerIdentifier.builder().type(type).build().toString(); @@ -62,18 +82,31 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, String shardDispatcher = new Dispatchers(actorSystem.dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard); - actorContext = new ActorContext(actorSystem, actorSystem.actorOf( - ShardManager.props(cluster, configuration, datastoreContext) - .withDispatcher(shardDispatcher).withMailbox(ActorContext.MAILBOX), shardManagerId ), - cluster, configuration, datastoreContext); + PrimaryShardInfoFutureCache primaryShardInfoCache = new PrimaryShardInfoFutureCache(); + actorContext = new ActorContext(actorSystem, createShardManager(actorSystem, cluster, configuration, + datastoreContext, shardDispatcher, shardManagerId, primaryShardInfoCache), cluster, + configuration, datastoreContext, primaryShardInfoCache); + + this.waitTillReadyTimeInMillis = + actorContext.getDatastoreContext().getShardLeaderElectionTimeout().duration().toMillis() * READY_WAIT_FACTOR; + + this.txContextFactory = TransactionContextFactory.create(actorContext); datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(datastoreContext.getDataStoreMXBeanType()); datastoreConfigMXBean.setContext(datastoreContext); datastoreConfigMXBean.registerMBean(); + + datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContext.getDataStoreMXBeanType(), actorContext); + datastoreInfoMXBean.registerMBean(); } - public DistributedDataStore(ActorContext actorContext) { + @VisibleForTesting + DistributedDataStore(ActorContext actorContext) { this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null"); + this.txContextFactory = TransactionContextFactory.create(actorContext); + this.type = UNKNOWN_TYPE; + this.waitTillReadyTimeInMillis = + actorContext.getDatastoreContext().getShardLeaderElectionTimeout().duration().toMillis() * READY_WAIT_FACTOR; } public void setCloseable(AutoCloseable closeable) { @@ -92,7 +125,7 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope); - String shardName = ShardStrategyFactory.getStrategy(path).findShard(path); + String shardName = actorContext.getShardStrategyFactory().getStrategy(path).findShard(path); final DataChangeListenerRegistrationProxy listenerRegistrationProxy = new DataChangeListenerRegistrationProxy(shardName, actorContext, listener); @@ -101,26 +134,41 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, return listenerRegistrationProxy; } + @Override + public ListenerRegistration registerTreeChangeListener(YangInstanceIdentifier treeId, L listener) { + Preconditions.checkNotNull(treeId, "treeId should not be null"); + Preconditions.checkNotNull(listener, "listener should not be null"); + + final String shardName = actorContext.getShardStrategyFactory().getStrategy(treeId).findShard(treeId); + LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName); + + final DataTreeChangeListenerProxy listenerRegistrationProxy = + new DataTreeChangeListenerProxy(actorContext, listener); + listenerRegistrationProxy.init(shardName, treeId); + + return listenerRegistrationProxy; + } + @Override public DOMStoreTransactionChain createTransactionChain() { - return new TransactionChainProxy(actorContext); + return txContextFactory.createTransactionChain(); } @Override public DOMStoreReadTransaction newReadOnlyTransaction() { - return new TransactionProxy(actorContext, TransactionProxy.TransactionType.READ_ONLY); + return new TransactionProxy(txContextFactory, TransactionType.READ_ONLY); } @Override public DOMStoreWriteTransaction newWriteOnlyTransaction() { actorContext.acquireTxCreationPermit(); - return new TransactionProxy(actorContext, TransactionProxy.TransactionType.WRITE_ONLY); + return new TransactionProxy(txContextFactory, TransactionType.WRITE_ONLY); } @Override public DOMStoreReadWriteTransaction newReadWriteTransaction() { actorContext.acquireTxCreationPermit(); - return new TransactionProxy(actorContext, TransactionProxy.TransactionType.READ_WRITE); + return new TransactionProxy(txContextFactory, TransactionType.READ_WRITE); } @Override @@ -138,21 +186,66 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, @Override public void close() { - datastoreConfigMXBean.unregisterMBean(); + if (datastoreConfigMXBean != null) { + datastoreConfigMXBean.unregisterMBean(); + } + if (datastoreInfoMXBean != null) { + datastoreInfoMXBean.unregisterMBean(); + } - if(closeable != null) { + if (closeable != null) { try { closeable.close(); } catch (Exception e) { - LOG.debug("Error closing insance", e); + LOG.debug("Error closing instance", e); } } + txContextFactory.close(); actorContext.shutdown(); } - @VisibleForTesting - ActorContext getActorContext() { + public ActorContext getActorContext() { return actorContext; } + + public void waitTillReady(){ + LOG.info("Beginning to wait for data store to become ready : {}", type); + + try { + if (waitTillReadyCountDownLatch.await(waitTillReadyTimeInMillis, TimeUnit.MILLISECONDS)) { + LOG.debug("Data store {} is now ready", type); + } else { + LOG.error("Shared leaders failed to settle in {} seconds, giving up", TimeUnit.MILLISECONDS.toSeconds(waitTillReadyTimeInMillis)); + } + } catch (InterruptedException e) { + LOG.error("Interrupted while waiting for shards to settle", e); + } + } + + private ActorRef createShardManager(ActorSystem actorSystem, ClusterWrapper cluster, Configuration configuration, + DatastoreContext datastoreContext, String shardDispatcher, String shardManagerId, + PrimaryShardInfoFutureCache primaryShardInfoCache){ + Exception lastException = null; + + for(int i=0;i<100;i++) { + try { + return actorSystem.actorOf( + ShardManager.props(cluster, configuration, datastoreContext, waitTillReadyCountDownLatch, + primaryShardInfoCache).withDispatcher(shardDispatcher).withMailbox( + ActorContext.MAILBOX), shardManagerId); + } catch (Exception e){ + lastException = e; + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + LOG.debug(String.format("Could not create actor %s because of %s - waiting for sometime before retrying (retry count = %d)", shardManagerId, e.getMessage(), i)); + } + } + + throw new IllegalStateException("Failed to create Shard Manager", lastException); + } + + @VisibleForTesting + public CountDownLatch getWaitTillReadyCountDownLatch() { + return waitTillReadyCountDownLatch; + } }