*/
package org.opendaylight.controller.cluster.datastore;
+import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.concurrent.CountDownLatch;
+import java.util.Collection;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.duration.Duration;
/**
* Base implementation of a distributed DOMStore.
*/
-public abstract class AbstractDataStore implements DistributedDataStoreInterface, SchemaContextListener,
+public abstract class AbstractDataStore implements DistributedDataStoreInterface, EffectiveModelContextListener,
DatastoreContextPropertiesUpdater.Listener, DOMStoreTreeChangePublisher,
DOMDataTreeCommitCohortRegistry, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(AbstractDataStore.class);
- private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
+ private final SettableFuture<Void> readinessFuture = SettableFuture.create();
private final ClientIdentifier identifier;
private final DataStoreClient client;
private final ActorUtils actorUtils;
AbstractShardManagerCreator<?> creator = getShardManagerCreator().cluster(cluster).configuration(configuration)
.datastoreContextFactory(datastoreContextFactory)
- .waitTillReadyCountDownLatch(waitTillReadyCountDownLatch)
+ .readinessFuture(readinessFuture)
.primaryShardInfoCache(primaryShardInfoCache)
.restoreFromSnapshot(restoreFromSnapshot)
.distributedDataStore(this);
requireNonNull(treeId, "treeId should not be null");
requireNonNull(listener, "listener should not be null");
+ /*
+ * We need to potentially deal with multi-shard composition for registration targeting the root of the data
+ * store. If that is the case, we delegate to a more complicated setup invol
+ */
+ if (treeId.isEmpty()) {
+ // User is targeting root of the datastore. If there is more than one shard, we have to register with them
+ // all and perform data composition.
+ final Set<String> shardNames = actorUtils.getConfiguration().getAllShardNames();
+ if (shardNames.size() > 1) {
+ checkArgument(listener instanceof ClusteredDOMDataTreeChangeListener,
+ "Cannot listen on root without non-clustered listener %s", listener);
+ return new RootDataTreeChangeListenerProxy<>(actorUtils, listener, shardNames);
+ }
+ }
+
final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
return listenerRegistrationProxy;
}
-
@Override
public <C extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<C> registerCommitCohort(
final DOMDataTreeIdentifier subtree, final C cohort) {
}
@Override
- public void onGlobalContextUpdated(final SchemaContext schemaContext) {
- actorUtils.setSchemaContext(schemaContext);
+ public void onModelContextUpdated(final EffectiveModelContext newModelContext) {
+ actorUtils.setSchemaContext(newModelContext);
}
@Override
}
// TODO: consider removing this in favor of awaitReadiness()
+ @Deprecated
public void waitTillReady() {
LOG.info("Beginning to wait for data store to become ready : {}", identifier);
final Duration toWait = initialSettleTime();
try {
- if (toWait.isFinite()) {
- if (!waitTillReadyCountDownLatch.await(toWait.toNanos(), TimeUnit.NANOSECONDS)) {
- LOG.error("Shard leaders failed to settle in {}, giving up", toWait);
- return;
- }
- } else {
- waitTillReadyCountDownLatch.await();
+ if (!awaitReadiness(toWait)) {
+ LOG.error("Shard leaders failed to settle in {}, giving up", toWait);
+ return;
}
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for shards to settle", e);
}
@Beta
+ @Deprecated
+ public boolean awaitReadiness() throws InterruptedException {
+ return awaitReadiness(initialSettleTime());
+ }
+
+ @Beta
+ @Deprecated
+ public boolean awaitReadiness(final Duration toWait) throws InterruptedException {
+ try {
+ if (toWait.isFinite()) {
+ try {
+ readinessFuture.get(toWait.toNanos(), TimeUnit.NANOSECONDS);
+ } catch (TimeoutException e) {
+ LOG.debug("Timed out waiting for shards to settle", e);
+ return false;
+ }
+ } else {
+ readinessFuture.get();
+ }
+ } catch (ExecutionException e) {
+ LOG.warn("Unexpected readiness failure, assuming convergence", e);
+ }
+
+ return true;
+ }
+
+ @Beta
+ @Deprecated
public void awaitReadiness(final long timeout, final TimeUnit unit) throws InterruptedException, TimeoutException {
- if (!waitTillReadyCountDownLatch.await(timeout, unit)) {
+ if (!awaitReadiness(Duration.create(timeout, unit))) {
throw new TimeoutException("Shard leaders failed to settle");
}
}
throw new IllegalStateException("Failed to create Shard Manager", lastException);
}
+ /**
+ * Future which completes when all shards settle for the first time.
+ *
+ * @return A Listenable future.
+ */
+ public final ListenableFuture<?> initialSettleFuture() {
+ return readinessFuture;
+ }
+
@VisibleForTesting
- public CountDownLatch getWaitTillReadyCountDownLatch() {
- return waitTillReadyCountDownLatch;
+ SettableFuture<Void> readinessFuture() {
+ return readinessFuture;
}
+ @Override
@SuppressWarnings("unchecked")
public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerProxyListener(
- final YangInstanceIdentifier shardLookup,
- final YangInstanceIdentifier insideShard,
- final org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener delegate) {
+ final YangInstanceIdentifier shardLookup, final YangInstanceIdentifier insideShard,
+ final DOMDataTreeChangeListener delegate) {
requireNonNull(shardLookup, "shardLookup should not be null");
requireNonNull(insideShard, "insideShard should not be null");
LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}",
delegate,shardLookup, shardName, insideShard);
+ // wrap this in the ClusteredDOMDataTreeChangeLister interface
+ // since we always want clustered registration
final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> listenerRegistrationProxy =
- new DataTreeChangeListenerProxy<>(actorUtils,
- // wrap this in the ClusteredDOMDataTreeChangeLister interface
- // since we always want clustered registration
- (ClusteredDOMDataTreeChangeListener) delegate::onDataTreeChanged, insideShard);
+ new DataTreeChangeListenerProxy<>(actorUtils, new ClusteredDOMDataTreeChangeListener() {
+ @Override
+ public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
+ delegate.onDataTreeChanged(changes);
+ }
+
+ @Override
+ public void onInitialData() {
+ delegate.onInitialData();
+ }
+ }, insideShard);
listenerRegistrationProxy.init(shardName);
return (ListenerRegistration<L>) listenerRegistrationProxy;
}
+ @Override
@SuppressWarnings("unchecked")
public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerShardConfigListener(
- final YangInstanceIdentifier internalPath,
- final DOMDataTreeChangeListener delegate) {
+ final YangInstanceIdentifier internalPath, final DOMDataTreeChangeListener delegate) {
requireNonNull(delegate, "delegate should not be null");
LOG.debug("Registering a listener for the configuration shard: {}", internalPath);