X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatastore%2FDistributedDataStore.java;h=c79de945675a0f14d8a40fd6dc13f4007a3a9669;hp=51182deb1dcd6586ee1d421d477e3072363d4a83;hb=08dd5c2c443ff53f56af88a0e8dc8f34e36d2245;hpb=2292a31efc0f7779d5c62d6f2de05f57fa2fafc3 diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java index 51182deb1d..c79de94567 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java @@ -11,8 +11,11 @@ package org.opendaylight.controller.cluster.datastore; import akka.actor.ActorSystem; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier; import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl; +import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl; import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory; import org.opendaylight.controller.cluster.datastore.utils.ActorContext; import org.opendaylight.controller.cluster.datastore.utils.Dispatchers; @@ -38,14 +41,24 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, DatastoreContextConfigAdminOverlay.Listener, AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class); - public static final int REGISTER_DATA_CHANGE_LISTENER_TIMEOUT_FACTOR = 24; // 24 times the usual operation timeout + private static final String UNKNOWN_TYPE = "unknown"; + + private static final long READY_WAIT_FACTOR = 3; private final ActorContext actorContext; + private final long waitTillReadyTimeInMillis; + private AutoCloseable closeable; private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean; + private DatastoreInfoMXBeanImpl datastoreInfoMXBean; + + private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1); + + private final String type; + public DistributedDataStore(ActorSystem actorSystem, ClusterWrapper cluster, Configuration configuration, DatastoreContext datastoreContext) { Preconditions.checkNotNull(actorSystem, "actorSystem should not be null"); @@ -53,7 +66,7 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, Preconditions.checkNotNull(configuration, "configuration should not be null"); Preconditions.checkNotNull(datastoreContext, "datastoreContext should not be null"); - String type = datastoreContext.getDataStoreType(); + this.type = datastoreContext.getDataStoreType(); String shardManagerId = ShardManagerIdentifier.builder().type(type).build().toString(); @@ -63,17 +76,28 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, new Dispatchers(actorSystem.dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard); actorContext = new ActorContext(actorSystem, actorSystem.actorOf( - ShardManager.props(cluster, configuration, datastoreContext) + ShardManager.props(cluster, configuration, datastoreContext, waitTillReadyCountDownLatch) .withDispatcher(shardDispatcher).withMailbox(ActorContext.MAILBOX), shardManagerId ), cluster, configuration, datastoreContext); + this.waitTillReadyTimeInMillis = + actorContext.getDatastoreContext().getShardLeaderElectionTimeout().duration().toMillis() * READY_WAIT_FACTOR; + + datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(datastoreContext.getDataStoreMXBeanType()); datastoreConfigMXBean.setContext(datastoreContext); datastoreConfigMXBean.registerMBean(); + + datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContext.getDataStoreMXBeanType(), actorContext); + datastoreInfoMXBean.registerMBean(); } public DistributedDataStore(ActorContext actorContext) { this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null"); + this.type = UNKNOWN_TYPE; + this.waitTillReadyTimeInMillis = + actorContext.getDatastoreContext().getShardLeaderElectionTimeout().duration().toMillis() * READY_WAIT_FACTOR; + } public void setCloseable(AutoCloseable closeable) { @@ -139,6 +163,7 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, @Override public void close() { datastoreConfigMXBean.unregisterMBean(); + datastoreInfoMXBean.unregisterMBean(); if(closeable != null) { try { @@ -155,4 +180,23 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, ActorContext getActorContext() { return actorContext; } + + public void waitTillReady(){ + LOG.info("Beginning to wait for data store to become ready : {}", type); + + try { + if (waitTillReadyCountDownLatch.await(waitTillReadyTimeInMillis, TimeUnit.MILLISECONDS)) { + LOG.debug("Data store {} is now ready", type); + } else { + LOG.error("Shared leaders failed to settle in {} seconds, giving up", TimeUnit.MILLISECONDS.toSeconds(waitTillReadyTimeInMillis)); + } + } catch (InterruptedException e) { + LOG.error("Interrupted while waiting for shards to settle", e); + } + } + + @VisibleForTesting + public CountDownLatch getWaitTillReadyCountDownLatch() { + return waitTillReadyCountDownLatch; + } }