X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatastore%2FDistributedDataStore.java;h=49f5388842d9b690609f063842fc60ce425ec78f;hp=3029ef7e399a4db99c6ed2ad18a7e7701cb2f8ee;hb=1a4a451d17d39b0dac45a70de9016bd4b0e6b249;hpb=ceb41a4fe9c36a2036c5dcae0fc02359be8bdd66 diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java index 3029ef7e39..49f5388842 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java @@ -8,22 +8,28 @@ package org.opendaylight.controller.cluster.datastore; +import akka.actor.ActorRef; import akka.actor.ActorSystem; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.Uninterruptibles; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import org.opendaylight.controller.cluster.datastore.config.Configuration; import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier; import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl; -import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory; +import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl; import org.opendaylight.controller.cluster.datastore.utils.ActorContext; import org.opendaylight.controller.cluster.datastore.utils.Dispatchers; +import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache; import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker; import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener; +import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener; import org.opendaylight.controller.sal.core.spi.data.DOMStore; import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction; import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction; import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain; +import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher; import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction; import org.opendaylight.yangtools.concepts.ListenerRegistration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; @@ -37,7 +43,7 @@ import org.slf4j.LoggerFactory; * */ public class DistributedDataStore implements DOMStore, SchemaContextListener, - DatastoreContextConfigAdminOverlay.Listener, AutoCloseable { + DatastoreContextConfigAdminOverlay.Listener, DOMStoreTreeChangePublisher, AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class); private static final String UNKNOWN_TYPE = "unknown"; @@ -52,10 +58,14 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean; - private CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1); + private DatastoreInfoMXBeanImpl datastoreInfoMXBean; + + private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1); private final String type; + private final TransactionContextFactory txContextFactory; + public DistributedDataStore(ActorSystem actorSystem, ClusterWrapper cluster, Configuration configuration, DatastoreContext datastoreContext) { Preconditions.checkNotNull(actorSystem, "actorSystem should not be null"); @@ -72,26 +82,31 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, String shardDispatcher = new Dispatchers(actorSystem.dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard); - actorContext = new ActorContext(actorSystem, actorSystem.actorOf( - ShardManager.props(cluster, configuration, datastoreContext, waitTillReadyCountDownLatch) - .withDispatcher(shardDispatcher).withMailbox(ActorContext.MAILBOX), shardManagerId ), - cluster, configuration, datastoreContext); + PrimaryShardInfoFutureCache primaryShardInfoCache = new PrimaryShardInfoFutureCache(); + actorContext = new ActorContext(actorSystem, createShardManager(actorSystem, cluster, configuration, + datastoreContext, shardDispatcher, shardManagerId, primaryShardInfoCache), cluster, + configuration, datastoreContext, primaryShardInfoCache); this.waitTillReadyTimeInMillis = actorContext.getDatastoreContext().getShardLeaderElectionTimeout().duration().toMillis() * READY_WAIT_FACTOR; + this.txContextFactory = TransactionContextFactory.create(actorContext); datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(datastoreContext.getDataStoreMXBeanType()); datastoreConfigMXBean.setContext(datastoreContext); datastoreConfigMXBean.registerMBean(); + + datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContext.getDataStoreMXBeanType(), actorContext); + datastoreInfoMXBean.registerMBean(); } - public DistributedDataStore(ActorContext actorContext) { + @VisibleForTesting + DistributedDataStore(ActorContext actorContext) { this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null"); + this.txContextFactory = TransactionContextFactory.create(actorContext); this.type = UNKNOWN_TYPE; this.waitTillReadyTimeInMillis = actorContext.getDatastoreContext().getShardLeaderElectionTimeout().duration().toMillis() * READY_WAIT_FACTOR; - } public void setCloseable(AutoCloseable closeable) { @@ -110,7 +125,7 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope); - String shardName = ShardStrategyFactory.getStrategy(path).findShard(path); + String shardName = actorContext.getShardStrategyFactory().getStrategy(path).findShard(path); final DataChangeListenerRegistrationProxy listenerRegistrationProxy = new DataChangeListenerRegistrationProxy(shardName, actorContext, listener); @@ -119,26 +134,41 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, return listenerRegistrationProxy; } + @Override + public ListenerRegistration registerTreeChangeListener(YangInstanceIdentifier treeId, L listener) { + Preconditions.checkNotNull(treeId, "treeId should not be null"); + Preconditions.checkNotNull(listener, "listener should not be null"); + + final String shardName = actorContext.getShardStrategyFactory().getStrategy(treeId).findShard(treeId); + LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName); + + final DataTreeChangeListenerProxy listenerRegistrationProxy = + new DataTreeChangeListenerProxy(actorContext, listener); + listenerRegistrationProxy.init(shardName, treeId); + + return listenerRegistrationProxy; + } + @Override public DOMStoreTransactionChain createTransactionChain() { - return new TransactionChainProxy(actorContext); + return txContextFactory.createTransactionChain(); } @Override public DOMStoreReadTransaction newReadOnlyTransaction() { - return new TransactionProxy(actorContext, TransactionProxy.TransactionType.READ_ONLY); + return new TransactionProxy(txContextFactory, TransactionType.READ_ONLY); } @Override public DOMStoreWriteTransaction newWriteOnlyTransaction() { actorContext.acquireTxCreationPermit(); - return new TransactionProxy(actorContext, TransactionProxy.TransactionType.WRITE_ONLY); + return new TransactionProxy(txContextFactory, TransactionType.WRITE_ONLY); } @Override public DOMStoreReadWriteTransaction newReadWriteTransaction() { actorContext.acquireTxCreationPermit(); - return new TransactionProxy(actorContext, TransactionProxy.TransactionType.READ_WRITE); + return new TransactionProxy(txContextFactory, TransactionType.READ_WRITE); } @Override @@ -156,21 +186,26 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, @Override public void close() { - datastoreConfigMXBean.unregisterMBean(); + if (datastoreConfigMXBean != null) { + datastoreConfigMXBean.unregisterMBean(); + } + if (datastoreInfoMXBean != null) { + datastoreInfoMXBean.unregisterMBean(); + } - if(closeable != null) { + if (closeable != null) { try { closeable.close(); } catch (Exception e) { - LOG.debug("Error closing insance", e); + LOG.debug("Error closing instance", e); } } + txContextFactory.close(); actorContext.shutdown(); } - @VisibleForTesting - ActorContext getActorContext() { + public ActorContext getActorContext() { return actorContext; } @@ -178,12 +213,35 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, LOG.info("Beginning to wait for data store to become ready : {}", type); try { - waitTillReadyCountDownLatch.await(waitTillReadyTimeInMillis, TimeUnit.MILLISECONDS); - - LOG.debug("Data store {} is now ready", type); + if (waitTillReadyCountDownLatch.await(waitTillReadyTimeInMillis, TimeUnit.MILLISECONDS)) { + LOG.debug("Data store {} is now ready", type); + } else { + LOG.error("Shared leaders failed to settle in {} seconds, giving up", TimeUnit.MILLISECONDS.toSeconds(waitTillReadyTimeInMillis)); + } } catch (InterruptedException e) { - LOG.error("Interrupted when trying to wait for shards to become leader in a reasonable amount of time - giving up"); + LOG.error("Interrupted while waiting for shards to settle", e); + } + } + + private ActorRef createShardManager(ActorSystem actorSystem, ClusterWrapper cluster, Configuration configuration, + DatastoreContext datastoreContext, String shardDispatcher, String shardManagerId, + PrimaryShardInfoFutureCache primaryShardInfoCache){ + Exception lastException = null; + + for(int i=0;i<100;i++) { + try { + return actorSystem.actorOf( + ShardManager.props(cluster, configuration, datastoreContext, waitTillReadyCountDownLatch, + primaryShardInfoCache).withDispatcher(shardDispatcher).withMailbox( + ActorContext.MAILBOX), shardManagerId); + } catch (Exception e){ + lastException = e; + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + LOG.debug(String.format("Could not create actor %s because of %s - waiting for sometime before retrying (retry count = %d)", shardManagerId, e.getMessage(), i)); + } } + + throw new IllegalStateException("Failed to create Shard Manager", lastException); } @VisibleForTesting