2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import static com.google.common.base.Preconditions.checkArgument;
11 import static java.util.Objects.requireNonNull;
13 import akka.actor.ActorRef;
14 import akka.actor.ActorSystem;
15 import akka.actor.PoisonPill;
16 import akka.actor.Props;
17 import com.google.common.annotations.Beta;
18 import com.google.common.annotations.VisibleForTesting;
19 import com.google.common.base.Throwables;
20 import com.google.common.util.concurrent.ListenableFuture;
21 import com.google.common.util.concurrent.SettableFuture;
22 import com.google.common.util.concurrent.Uninterruptibles;
24 import java.util.concurrent.ExecutionException;
25 import java.util.concurrent.TimeUnit;
26 import java.util.concurrent.TimeoutException;
27 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
28 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
29 import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
30 import org.opendaylight.controller.cluster.databroker.actors.dds.DistributedDataStoreClientActor;
31 import org.opendaylight.controller.cluster.datastore.config.Configuration;
32 import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
33 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
34 import org.opendaylight.controller.cluster.datastore.shardmanager.AbstractShardManagerCreator;
35 import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerCreator;
36 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
37 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
38 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
39 import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
40 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
41 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
42 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
43 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
44 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
45 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
46 import org.opendaylight.yangtools.concepts.ListenerRegistration;
47 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
48 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
49 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextListener;
50 import org.slf4j.Logger;
51 import org.slf4j.LoggerFactory;
52 import scala.concurrent.duration.Duration;
55 * Base implementation of a distributed DOMStore.
57 public abstract class AbstractDataStore implements DistributedDataStoreInterface, EffectiveModelContextListener,
58 DatastoreContextPropertiesUpdater.Listener, DOMStoreTreeChangePublisher,
59 DOMDataTreeCommitCohortRegistry, AutoCloseable {
61 private static final Logger LOG = LoggerFactory.getLogger(AbstractDataStore.class);
63 private final SettableFuture<Void> readinessFuture = SettableFuture.create();
64 private final ClientIdentifier identifier;
65 private final DataStoreClient client;
66 private final ActorUtils actorUtils;
68 private AutoCloseable closeable;
69 private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
70 private DatastoreInfoMXBeanImpl datastoreInfoMXBean;
72 @SuppressWarnings("checkstyle:IllegalCatch")
73 protected AbstractDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
74 final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
75 final DatastoreSnapshot restoreFromSnapshot) {
76 requireNonNull(actorSystem, "actorSystem should not be null");
77 requireNonNull(cluster, "cluster should not be null");
78 requireNonNull(configuration, "configuration should not be null");
79 requireNonNull(datastoreContextFactory, "datastoreContextFactory should not be null");
81 String shardManagerId = ShardManagerIdentifier.builder()
82 .type(datastoreContextFactory.getBaseDatastoreContext().getDataStoreName()).build().toString();
84 LOG.info("Creating ShardManager : {}", shardManagerId);
86 String shardDispatcher =
87 new Dispatchers(actorSystem.dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
89 PrimaryShardInfoFutureCache primaryShardInfoCache = new PrimaryShardInfoFutureCache();
91 AbstractShardManagerCreator<?> creator = getShardManagerCreator().cluster(cluster).configuration(configuration)
92 .datastoreContextFactory(datastoreContextFactory)
93 .readinessFuture(readinessFuture)
94 .primaryShardInfoCache(primaryShardInfoCache)
95 .restoreFromSnapshot(restoreFromSnapshot)
96 .distributedDataStore(this);
98 actorUtils = new ActorUtils(actorSystem, createShardManager(actorSystem, creator, shardDispatcher,
99 shardManagerId), cluster, configuration, datastoreContextFactory.getBaseDatastoreContext(),
100 primaryShardInfoCache);
102 final Props clientProps = DistributedDataStoreClientActor.props(cluster.getCurrentMemberName(),
103 datastoreContextFactory.getBaseDatastoreContext().getDataStoreName(), actorUtils);
104 final ActorRef clientActor = actorSystem.actorOf(clientProps);
106 client = DistributedDataStoreClientActor.getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
107 } catch (Exception e) {
108 LOG.error("Failed to get actor for {}", clientProps, e);
109 clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
110 Throwables.throwIfUnchecked(e);
111 throw new RuntimeException(e);
114 identifier = client.getIdentifier();
115 LOG.debug("Distributed data store client {} started", identifier);
117 datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(
118 datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
119 datastoreConfigMXBean.setContext(datastoreContextFactory.getBaseDatastoreContext());
120 datastoreConfigMXBean.registerMBean();
122 datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContextFactory.getBaseDatastoreContext()
123 .getDataStoreMXBeanType(), actorUtils);
124 datastoreInfoMXBean.registerMBean();
128 protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
129 this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null");
131 this.identifier = requireNonNull(identifier);
135 protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier,
136 final DataStoreClient clientActor) {
137 this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null");
138 this.client = clientActor;
139 this.identifier = requireNonNull(identifier);
142 protected AbstractShardManagerCreator<?> getShardManagerCreator() {
143 return new ShardManagerCreator();
146 protected final DataStoreClient getClient() {
150 final ClientIdentifier getIdentifier() {
154 public void setCloseable(final AutoCloseable closeable) {
155 this.closeable = closeable;
159 public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
160 final YangInstanceIdentifier treeId, final L listener) {
161 requireNonNull(treeId, "treeId should not be null");
162 requireNonNull(listener, "listener should not be null");
165 * We need to potentially deal with multi-shard composition for registration targeting the root of the data
166 * store. If that is the case, we delegate to a more complicated setup invol
168 if (treeId.isEmpty()) {
169 // User is targeting root of the datastore. If there is more than one shard, we have to register with them
170 // all and perform data composition.
171 final Set<String> shardNames = actorUtils.getConfiguration().getAllShardNames();
172 if (shardNames.size() > 1) {
173 checkArgument(listener instanceof ClusteredDOMDataTreeChangeListener,
174 "Cannot listen on root without non-clustered listener %s", listener);
175 return new RootDataTreeChangeListenerProxy<>(actorUtils, listener, shardNames);
179 final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
180 LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
182 final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
183 new DataTreeChangeListenerProxy<>(actorUtils, listener, treeId);
184 listenerRegistrationProxy.init(shardName);
186 return listenerRegistrationProxy;
190 public <C extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<C> registerCommitCohort(
191 final DOMDataTreeIdentifier subtree, final C cohort) {
192 YangInstanceIdentifier treeId = requireNonNull(subtree, "subtree should not be null").getRootIdentifier();
193 requireNonNull(cohort, "listener should not be null");
196 final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
197 LOG.debug("Registering cohort: {} for tree: {} shard: {}", cohort, treeId, shardName);
199 DataTreeCohortRegistrationProxy<C> cohortProxy =
200 new DataTreeCohortRegistrationProxy<>(actorUtils, subtree, cohort);
201 cohortProxy.init(shardName);
206 public void onModelContextUpdated(final EffectiveModelContext newModelContext) {
207 actorUtils.setSchemaContext(newModelContext);
211 public void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) {
212 LOG.info("DatastoreContext updated for data store {}", actorUtils.getDataStoreName());
214 actorUtils.setDatastoreContext(contextFactory);
215 datastoreConfigMXBean.setContext(contextFactory.getBaseDatastoreContext());
219 @SuppressWarnings("checkstyle:IllegalCatch")
220 public void close() {
221 LOG.info("Closing data store {}", identifier);
223 if (datastoreConfigMXBean != null) {
224 datastoreConfigMXBean.unregisterMBean();
226 if (datastoreInfoMXBean != null) {
227 datastoreInfoMXBean.unregisterMBean();
230 if (closeable != null) {
233 } catch (Exception e) {
234 LOG.debug("Error closing instance", e);
238 actorUtils.shutdown();
240 if (client != null) {
246 public ActorUtils getActorUtils() {
250 // TODO: consider removing this in favor of awaitReadiness()
252 public void waitTillReady() {
253 LOG.info("Beginning to wait for data store to become ready : {}", identifier);
255 final Duration toWait = initialSettleTime();
257 if (!awaitReadiness(toWait)) {
258 LOG.error("Shard leaders failed to settle in {}, giving up", toWait);
261 } catch (InterruptedException e) {
262 LOG.error("Interrupted while waiting for shards to settle", e);
266 LOG.debug("Data store {} is now ready", identifier);
271 public boolean awaitReadiness() throws InterruptedException {
272 return awaitReadiness(initialSettleTime());
277 public boolean awaitReadiness(final Duration toWait) throws InterruptedException {
279 if (toWait.isFinite()) {
281 readinessFuture.get(toWait.toNanos(), TimeUnit.NANOSECONDS);
282 } catch (TimeoutException e) {
283 LOG.debug("Timed out waiting for shards to settle", e);
287 readinessFuture.get();
289 } catch (ExecutionException e) {
290 LOG.warn("Unexpected readiness failure, assuming convergence", e);
298 public void awaitReadiness(final long timeout, final TimeUnit unit) throws InterruptedException, TimeoutException {
299 if (!awaitReadiness(Duration.create(timeout, unit))) {
300 throw new TimeoutException("Shard leaders failed to settle");
304 @SuppressWarnings("checkstyle:IllegalCatch")
305 private static ActorRef createShardManager(final ActorSystem actorSystem,
306 final AbstractShardManagerCreator<?> creator, final String shardDispatcher,
307 final String shardManagerId) {
308 Exception lastException = null;
310 for (int i = 0; i < 100; i++) {
312 return actorSystem.actorOf(creator.props().withDispatcher(shardDispatcher), shardManagerId);
313 } catch (Exception e) {
315 Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
316 LOG.debug("Could not create actor {} because of {} - waiting for sometime before retrying "
317 + "(retry count = {})", shardManagerId, e.getMessage(), i);
321 throw new IllegalStateException("Failed to create Shard Manager", lastException);
325 * Future which completes when all shards settle for the first time.
327 * @return A Listenable future.
329 public final ListenableFuture<?> initialSettleFuture() {
330 return readinessFuture;
334 SettableFuture<Void> readinessFuture() {
335 return readinessFuture;
339 @SuppressWarnings("unchecked")
340 public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerProxyListener(
341 final YangInstanceIdentifier shardLookup, final YangInstanceIdentifier insideShard,
342 final DOMDataTreeChangeListener delegate) {
344 requireNonNull(shardLookup, "shardLookup should not be null");
345 requireNonNull(insideShard, "insideShard should not be null");
346 requireNonNull(delegate, "delegate should not be null");
348 final String shardName = actorUtils.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup);
349 LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}",
350 delegate,shardLookup, shardName, insideShard);
352 final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> listenerRegistrationProxy =
353 new DataTreeChangeListenerProxy<>(actorUtils,
354 // wrap this in the ClusteredDOMDataTreeChangeLister interface
355 // since we always want clustered registration
356 (ClusteredDOMDataTreeChangeListener) delegate::onDataTreeChanged, insideShard);
357 listenerRegistrationProxy.init(shardName);
359 return (ListenerRegistration<L>) listenerRegistrationProxy;
363 @SuppressWarnings("unchecked")
364 public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerShardConfigListener(
365 final YangInstanceIdentifier internalPath, final DOMDataTreeChangeListener delegate) {
366 requireNonNull(delegate, "delegate should not be null");
368 LOG.debug("Registering a listener for the configuration shard: {}", internalPath);
370 final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
371 new DataTreeChangeListenerProxy<>(actorUtils, delegate, internalPath);
372 proxy.init(ClusterUtils.PREFIX_CONFIG_SHARD_ID);
374 return (ListenerRegistration<L>) proxy;
377 private Duration initialSettleTime() {
378 final DatastoreContext context = actorUtils.getDatastoreContext();
379 final int multiplier = context.getInitialSettleTimeoutMultiplier();
380 return multiplier == 0 ? Duration.Inf() : context.getShardLeaderElectionTimeout().duration().$times(multiplier);