2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSystem;
13 import akka.actor.PoisonPill;
14 import akka.actor.Props;
15 import com.google.common.annotations.VisibleForTesting;
16 import com.google.common.base.Preconditions;
17 import com.google.common.base.Throwables;
18 import com.google.common.util.concurrent.Uninterruptibles;
19 import java.util.concurrent.CountDownLatch;
20 import java.util.concurrent.TimeUnit;
21 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
22 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
23 import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
24 import org.opendaylight.controller.cluster.databroker.actors.dds.DistributedDataStoreClientActor;
25 import org.opendaylight.controller.cluster.datastore.config.Configuration;
26 import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
27 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
28 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl;
29 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
30 import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerCreator;
31 import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
32 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
33 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
34 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
35 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
36 import org.opendaylight.controller.md.sal.dom.api.ClusteredDOMDataTreeChangeListener;
37 import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
38 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
39 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
40 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
41 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
42 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
43 import org.opendaylight.yangtools.concepts.ListenerRegistration;
44 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
45 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
46 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
47 import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
48 import org.slf4j.Logger;
49 import org.slf4j.LoggerFactory;
52 * Base implementation of a distributed DOMStore.
54 public abstract class AbstractDataStore implements DistributedDataStoreInterface, SchemaContextListener,
55 DatastoreContextConfigAdminOverlay.Listener, DOMStoreTreeChangePublisher,
56 DOMDataTreeCommitCohortRegistry, AutoCloseable {
58 private static final Logger LOG = LoggerFactory.getLogger(AbstractDataStore.class);
60 private static final long READY_WAIT_FACTOR = 3;
62 private final ActorContext actorContext;
63 private final long waitTillReadyTimeInMillis;
65 private AutoCloseable closeable;
67 private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
69 private DatastoreInfoMXBeanImpl datastoreInfoMXBean;
71 private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
73 private final ClientIdentifier identifier;
74 private final DataStoreClient client;
76 @SuppressWarnings("checkstyle:IllegalCatch")
77 protected AbstractDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
78 final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
79 final DatastoreSnapshot restoreFromSnapshot) {
80 Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
81 Preconditions.checkNotNull(cluster, "cluster should not be null");
82 Preconditions.checkNotNull(configuration, "configuration should not be null");
83 Preconditions.checkNotNull(datastoreContextFactory, "datastoreContextFactory should not be null");
85 String shardManagerId = ShardManagerIdentifier.builder()
86 .type(datastoreContextFactory.getBaseDatastoreContext().getDataStoreName()).build().toString();
88 LOG.info("Creating ShardManager : {}", shardManagerId);
90 String shardDispatcher =
91 new Dispatchers(actorSystem.dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
93 PrimaryShardInfoFutureCache primaryShardInfoCache = new PrimaryShardInfoFutureCache();
95 ShardManagerCreator creator = new ShardManagerCreator().cluster(cluster).configuration(configuration)
96 .datastoreContextFactory(datastoreContextFactory)
97 .waitTillReadyCountDownLatch(waitTillReadyCountDownLatch)
98 .primaryShardInfoCache(primaryShardInfoCache)
99 .restoreFromSnapshot(restoreFromSnapshot)
100 .distributedDataStore(this);
102 actorContext = new ActorContext(actorSystem, createShardManager(actorSystem, creator, shardDispatcher,
103 shardManagerId), cluster, configuration, datastoreContextFactory.getBaseDatastoreContext(),
104 primaryShardInfoCache);
106 final Props clientProps = DistributedDataStoreClientActor.props(cluster.getCurrentMemberName(),
107 datastoreContextFactory.getBaseDatastoreContext().getDataStoreName(), actorContext);
108 final ActorRef clientActor = actorSystem.actorOf(clientProps);
110 client = DistributedDataStoreClientActor.getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
111 } catch (Exception e) {
112 LOG.error("Failed to get actor for {}", clientProps, e);
113 clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
114 Throwables.throwIfUnchecked(e);
115 throw new RuntimeException(e);
118 identifier = client.getIdentifier();
119 LOG.debug("Distributed data store client {} started", identifier);
121 this.waitTillReadyTimeInMillis = actorContext.getDatastoreContext().getShardLeaderElectionTimeout()
122 .duration().toMillis() * READY_WAIT_FACTOR;
124 datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(
125 datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
126 datastoreConfigMXBean.setContext(datastoreContextFactory.getBaseDatastoreContext());
127 datastoreConfigMXBean.registerMBean();
129 datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContextFactory.getBaseDatastoreContext()
130 .getDataStoreMXBeanType(), actorContext);
131 datastoreInfoMXBean.registerMBean();
135 protected AbstractDataStore(final ActorContext actorContext, final ClientIdentifier identifier) {
136 this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
138 this.identifier = Preconditions.checkNotNull(identifier);
139 this.waitTillReadyTimeInMillis = actorContext.getDatastoreContext().getShardLeaderElectionTimeout()
140 .duration().toMillis() * READY_WAIT_FACTOR;
144 protected AbstractDataStore(final ActorContext actorContext, final ClientIdentifier identifier,
145 final DataStoreClient clientActor) {
146 this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
147 this.client = clientActor;
148 this.identifier = Preconditions.checkNotNull(identifier);
149 this.waitTillReadyTimeInMillis = actorContext.getDatastoreContext().getShardLeaderElectionTimeout()
150 .duration().toMillis() * READY_WAIT_FACTOR;
153 protected final DataStoreClient getClient() {
157 final ClientIdentifier getIdentifier() {
161 public void setCloseable(final AutoCloseable closeable) {
162 this.closeable = closeable;
165 @SuppressWarnings("unchecked")
167 public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
168 ListenerRegistration<L> registerChangeListener(
169 final YangInstanceIdentifier path, final L listener,
170 final AsyncDataBroker.DataChangeScope scope) {
172 Preconditions.checkNotNull(path, "path should not be null");
173 Preconditions.checkNotNull(listener, "listener should not be null");
175 LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
177 String shardName = actorContext.getShardStrategyFactory().getStrategy(path).findShard(path);
179 final DataChangeListenerRegistrationProxy listenerRegistrationProxy =
180 new DataChangeListenerRegistrationProxy(shardName, actorContext, listener);
181 listenerRegistrationProxy.init(path, scope);
183 return listenerRegistrationProxy;
187 public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
188 final YangInstanceIdentifier treeId, final L listener) {
189 Preconditions.checkNotNull(treeId, "treeId should not be null");
190 Preconditions.checkNotNull(listener, "listener should not be null");
192 final String shardName = actorContext.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
193 LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
195 final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
196 new DataTreeChangeListenerProxy<>(actorContext, listener, treeId);
197 listenerRegistrationProxy.init(shardName);
199 return listenerRegistrationProxy;
204 public <C extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<C> registerCommitCohort(
205 final DOMDataTreeIdentifier subtree, final C cohort) {
206 YangInstanceIdentifier treeId =
207 Preconditions.checkNotNull(subtree, "subtree should not be null").getRootIdentifier();
208 Preconditions.checkNotNull(cohort, "listener should not be null");
211 final String shardName = actorContext.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
212 LOG.debug("Registering cohort: {} for tree: {} shard: {}", cohort, treeId, shardName);
214 DataTreeCohortRegistrationProxy<C> cohortProxy =
215 new DataTreeCohortRegistrationProxy<>(actorContext, subtree, cohort);
216 cohortProxy.init(shardName);
221 public void onGlobalContextUpdated(final SchemaContext schemaContext) {
222 actorContext.setSchemaContext(schemaContext);
226 public void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) {
227 LOG.info("DatastoreContext updated for data store {}", actorContext.getDataStoreName());
229 actorContext.setDatastoreContext(contextFactory);
230 datastoreConfigMXBean.setContext(contextFactory.getBaseDatastoreContext());
234 @SuppressWarnings("checkstyle:IllegalCatch")
235 public void close() {
236 LOG.info("Closing data store {}", identifier);
238 if (datastoreConfigMXBean != null) {
239 datastoreConfigMXBean.unregisterMBean();
241 if (datastoreInfoMXBean != null) {
242 datastoreInfoMXBean.unregisterMBean();
245 if (closeable != null) {
248 } catch (Exception e) {
249 LOG.debug("Error closing instance", e);
253 actorContext.shutdown();
255 if (client != null) {
261 public ActorContext getActorContext() {
265 public void waitTillReady() {
266 LOG.info("Beginning to wait for data store to become ready : {}", identifier);
269 if (waitTillReadyCountDownLatch.await(waitTillReadyTimeInMillis, TimeUnit.MILLISECONDS)) {
270 LOG.debug("Data store {} is now ready", identifier);
272 LOG.error("Shard leaders failed to settle in {} seconds, giving up",
273 TimeUnit.MILLISECONDS.toSeconds(waitTillReadyTimeInMillis));
275 } catch (InterruptedException e) {
276 LOG.error("Interrupted while waiting for shards to settle", e);
280 @SuppressWarnings("checkstyle:IllegalCatch")
281 private static ActorRef createShardManager(final ActorSystem actorSystem, final ShardManagerCreator creator,
282 final String shardDispatcher, final String shardManagerId) {
283 Exception lastException = null;
285 for (int i = 0; i < 100; i++) {
287 return actorSystem.actorOf(creator.props().withDispatcher(shardDispatcher), shardManagerId);
288 } catch (Exception e) {
290 Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
291 LOG.debug("Could not create actor {} because of {} - waiting for sometime before retrying "
292 + "(retry count = {})", shardManagerId, e.getMessage(), i);
296 throw new IllegalStateException("Failed to create Shard Manager", lastException);
300 public CountDownLatch getWaitTillReadyCountDownLatch() {
301 return waitTillReadyCountDownLatch;
304 @SuppressWarnings("unchecked")
305 public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerProxyListener(
306 final YangInstanceIdentifier shardLookup,
307 final YangInstanceIdentifier insideShard,
308 final org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener delegate) {
310 Preconditions.checkNotNull(shardLookup, "shardLookup should not be null");
311 Preconditions.checkNotNull(insideShard, "insideShard should not be null");
312 Preconditions.checkNotNull(delegate, "delegate should not be null");
314 final String shardName = actorContext.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup);
315 LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}",
316 delegate,shardLookup, shardName, insideShard);
318 final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> listenerRegistrationProxy =
319 new DataTreeChangeListenerProxy<>(actorContext,
320 // wrap this in the ClusteredDOMDataTreeChangeLister interface
321 // since we always want clustered registration
322 (ClusteredDOMDataTreeChangeListener) delegate::onDataTreeChanged, insideShard);
323 listenerRegistrationProxy.init(shardName);
325 return (ListenerRegistration<L>) listenerRegistrationProxy;
328 @SuppressWarnings("unchecked")
329 public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerShardConfigListener(
330 final YangInstanceIdentifier internalPath,
331 final DOMDataTreeChangeListener delegate) {
332 Preconditions.checkNotNull(delegate, "delegate should not be null");
334 LOG.debug("Registering a listener for the configuration shard: {}", internalPath);
336 final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
337 new DataTreeChangeListenerProxy<>(actorContext, delegate, internalPath);
338 proxy.init(ClusterUtils.PREFIX_CONFIG_SHARD_ID);
340 return (ListenerRegistration<L>) proxy;