2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSystem;
13 import akka.actor.PoisonPill;
14 import akka.actor.Props;
15 import com.google.common.annotations.VisibleForTesting;
16 import com.google.common.base.Preconditions;
17 import com.google.common.base.Throwables;
18 import com.google.common.util.concurrent.Uninterruptibles;
19 import java.util.concurrent.CountDownLatch;
20 import java.util.concurrent.TimeUnit;
21 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
22 import org.opendaylight.controller.cluster.databroker.actors.dds.DistributedDataStoreClient;
23 import org.opendaylight.controller.cluster.databroker.actors.dds.DistributedDataStoreClientActor;
24 import org.opendaylight.controller.cluster.datastore.config.Configuration;
25 import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
26 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
27 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl;
28 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
29 import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerCreator;
30 import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
31 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
32 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
33 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
34 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
35 import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
36 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
37 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
38 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
39 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
40 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
41 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
42 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
43 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
44 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
45 import org.opendaylight.yangtools.concepts.ListenerRegistration;
46 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
47 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
48 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
49 import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
50 import org.slf4j.Logger;
51 import org.slf4j.LoggerFactory;
56 public class DistributedDataStore implements DistributedDataStoreInterface, SchemaContextListener,
57 DatastoreContextConfigAdminOverlay.Listener, DOMStoreTreeChangePublisher, DOMDataTreeCommitCohortRegistry, AutoCloseable {
59 private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class);
61 private static final long READY_WAIT_FACTOR = 3;
63 private final ActorContext actorContext;
64 private final long waitTillReadyTimeInMillis;
66 private AutoCloseable closeable;
68 private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
70 private DatastoreInfoMXBeanImpl datastoreInfoMXBean;
72 private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
74 private final ClientIdentifier identifier;
75 private final DistributedDataStoreClient client;
77 private final TransactionContextFactory txContextFactory;
79 public DistributedDataStore(ActorSystem actorSystem, ClusterWrapper cluster,
80 Configuration configuration, DatastoreContextFactory datastoreContextFactory,
81 DatastoreSnapshot restoreFromSnapshot) {
82 Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
83 Preconditions.checkNotNull(cluster, "cluster should not be null");
84 Preconditions.checkNotNull(configuration, "configuration should not be null");
85 Preconditions.checkNotNull(datastoreContextFactory, "datastoreContextFactory should not be null");
87 final Props clientProps = DistributedDataStoreClientActor.props(cluster.getCurrentMemberName(),
88 datastoreContextFactory.getBaseDatastoreContext().getDataStoreName());
89 final ActorRef clientActor = actorSystem.actorOf(clientProps);
91 client = DistributedDataStoreClientActor.getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
92 } catch (Exception e) {
93 LOG.error("Failed to get actor for {}", clientProps, e);
94 clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
95 throw Throwables.propagate(e);
98 identifier = client.getIdentifier();
99 LOG.debug("Distributed data store client {} started", identifier);
101 String shardManagerId = ShardManagerIdentifier.builder()
102 .type(datastoreContextFactory.getBaseDatastoreContext().getDataStoreName()).build().toString();
104 LOG.info("Creating ShardManager : {}", shardManagerId);
106 String shardDispatcher =
107 new Dispatchers(actorSystem.dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
109 PrimaryShardInfoFutureCache primaryShardInfoCache = new PrimaryShardInfoFutureCache();
111 ShardManagerCreator creator = new ShardManagerCreator().cluster(cluster).configuration(configuration).
112 datastoreContextFactory(datastoreContextFactory).waitTillReadyCountdownLatch(waitTillReadyCountDownLatch).
113 primaryShardInfoCache(primaryShardInfoCache).restoreFromSnapshot(restoreFromSnapshot);
115 actorContext = new ActorContext(actorSystem, createShardManager(actorSystem, creator, shardDispatcher,
116 shardManagerId), cluster, configuration, datastoreContextFactory.getBaseDatastoreContext(), primaryShardInfoCache);
118 this.waitTillReadyTimeInMillis =
119 actorContext.getDatastoreContext().getShardLeaderElectionTimeout().duration().toMillis() * READY_WAIT_FACTOR;
121 this.txContextFactory = new TransactionContextFactory(actorContext, identifier);
123 datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(
124 datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
125 datastoreConfigMXBean.setContext(datastoreContextFactory.getBaseDatastoreContext());
126 datastoreConfigMXBean.registerMBean();
128 datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContextFactory.getBaseDatastoreContext().
129 getDataStoreMXBeanType(), actorContext);
130 datastoreInfoMXBean.registerMBean();
134 DistributedDataStore(ActorContext actorContext, ClientIdentifier identifier) {
135 this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
137 this.identifier = Preconditions.checkNotNull(identifier);
138 this.txContextFactory = new TransactionContextFactory(actorContext, identifier);
139 this.waitTillReadyTimeInMillis =
140 actorContext.getDatastoreContext().getShardLeaderElectionTimeout().duration().toMillis() * READY_WAIT_FACTOR;
143 public void setCloseable(AutoCloseable closeable) {
144 this.closeable = closeable;
147 @SuppressWarnings("unchecked")
149 public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
150 ListenerRegistration<L> registerChangeListener(
151 final YangInstanceIdentifier path, L listener,
152 AsyncDataBroker.DataChangeScope scope) {
154 Preconditions.checkNotNull(path, "path should not be null");
155 Preconditions.checkNotNull(listener, "listener should not be null");
157 LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
159 String shardName = actorContext.getShardStrategyFactory().getStrategy(path).findShard(path);
161 final DataChangeListenerRegistrationProxy listenerRegistrationProxy =
162 new DataChangeListenerRegistrationProxy(shardName, actorContext, listener);
163 listenerRegistrationProxy.init(path, scope);
165 return listenerRegistrationProxy;
169 public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(YangInstanceIdentifier treeId, L listener) {
170 Preconditions.checkNotNull(treeId, "treeId should not be null");
171 Preconditions.checkNotNull(listener, "listener should not be null");
173 final String shardName = actorContext.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
174 LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
176 final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
177 new DataTreeChangeListenerProxy<L>(actorContext, listener);
178 listenerRegistrationProxy.init(shardName, treeId);
180 return listenerRegistrationProxy;
185 public <C extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<C> registerCommitCohort(
186 DOMDataTreeIdentifier subtree, C cohort) {
187 YangInstanceIdentifier treeId =
188 Preconditions.checkNotNull(subtree, "subtree should not be null").getRootIdentifier();
189 Preconditions.checkNotNull(cohort, "listener should not be null");
192 final String shardName = actorContext.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
193 LOG.debug("Registering cohort: {} for tree: {} shard: {}", cohort, treeId, shardName);
195 DataTreeCohortRegistrationProxy<C> cohortProxy = new DataTreeCohortRegistrationProxy<C>(actorContext, subtree, cohort);
196 cohortProxy.init(shardName);
201 public DOMStoreTransactionChain createTransactionChain() {
202 return txContextFactory.createTransactionChain();
206 public DOMStoreReadTransaction newReadOnlyTransaction() {
207 return new TransactionProxy(txContextFactory, TransactionType.READ_ONLY);
211 public DOMStoreWriteTransaction newWriteOnlyTransaction() {
212 actorContext.acquireTxCreationPermit();
213 return new TransactionProxy(txContextFactory, TransactionType.WRITE_ONLY);
217 public DOMStoreReadWriteTransaction newReadWriteTransaction() {
218 actorContext.acquireTxCreationPermit();
219 return new TransactionProxy(txContextFactory, TransactionType.READ_WRITE);
223 public void onGlobalContextUpdated(SchemaContext schemaContext) {
224 actorContext.setSchemaContext(schemaContext);
228 public void onDatastoreContextUpdated(DatastoreContextFactory contextFactory) {
229 LOG.info("DatastoreContext updated for data store {}", actorContext.getDataStoreName());
231 actorContext.setDatastoreContext(contextFactory);
232 datastoreConfigMXBean.setContext(contextFactory.getBaseDatastoreContext());
236 public void close() {
237 LOG.info("Closing data store {}", identifier);
239 if (datastoreConfigMXBean != null) {
240 datastoreConfigMXBean.unregisterMBean();
242 if (datastoreInfoMXBean != null) {
243 datastoreInfoMXBean.unregisterMBean();
246 if (closeable != null) {
249 } catch (Exception e) {
250 LOG.debug("Error closing instance", e);
254 txContextFactory.close();
255 actorContext.shutdown();
257 if (client != null) {
263 public ActorContext getActorContext() {
267 public void waitTillReady(){
268 LOG.info("Beginning to wait for data store to become ready : {}", identifier);
271 if (waitTillReadyCountDownLatch.await(waitTillReadyTimeInMillis, TimeUnit.MILLISECONDS)) {
272 LOG.debug("Data store {} is now ready", identifier);
274 LOG.error("Shared leaders failed to settle in {} seconds, giving up", TimeUnit.MILLISECONDS.toSeconds(waitTillReadyTimeInMillis));
276 } catch (InterruptedException e) {
277 LOG.error("Interrupted while waiting for shards to settle", e);
281 private static ActorRef createShardManager(ActorSystem actorSystem, ShardManagerCreator creator,
282 String shardDispatcher, String shardManagerId) {
283 Exception lastException = null;
285 for(int i=0;i<100;i++) {
287 return actorSystem.actorOf(creator.props().withDispatcher(shardDispatcher).withMailbox(
288 ActorContext.BOUNDED_MAILBOX), shardManagerId);
289 } catch (Exception e){
291 Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
292 LOG.debug("Could not create actor {} because of {} - waiting for sometime before retrying (retry count = {})",
293 shardManagerId, e.getMessage(), i);
297 throw new IllegalStateException("Failed to create Shard Manager", lastException);
301 public CountDownLatch getWaitTillReadyCountDownLatch() {
302 return waitTillReadyCountDownLatch;