package org.opendaylight.controller.cluster.datastore;
-import java.util.concurrent.Executors;
-
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
-
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.util.PropertyUtils;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
/**
*
*/
private static final Logger
LOG = LoggerFactory.getLogger(DistributedDataStore.class);
- private static final int DEFAULT_EXECUTOR_POOL_SIZE = 10;
+ private static final String EXECUTOR_MAX_POOL_SIZE_PROP =
+ "mdsal.dist-datastore-executor-pool.size";
+ private static final int DEFAULT_EXECUTOR_MAX_POOL_SIZE = 10;
+
+ private static final String EXECUTOR_MAX_QUEUE_SIZE_PROP =
+ "mdsal.dist-datastore-executor-queue.size";
+ private static final int DEFAULT_EXECUTOR_MAX_QUEUE_SIZE = 5000;
- private final String type;
private final ActorContext actorContext;
private SchemaContext schemaContext;
-
-
/**
* Executor used to run FutureTask's
*
* This is typically used when we need to make a request to an actor and
* wait for it's response and the consumer needs to be provided a Future.
- *
- * FIXME : Make the thread pool size configurable.
*/
private final ListeningExecutorService executor =
- MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(DEFAULT_EXECUTOR_POOL_SIZE));
+ MoreExecutors.listeningDecorator(
+ SpecialExecutors.newBlockingBoundedFastThreadPool(
+ PropertyUtils.getIntSystemProperty(
+ EXECUTOR_MAX_POOL_SIZE_PROP,
+ DEFAULT_EXECUTOR_MAX_POOL_SIZE),
+ PropertyUtils.getIntSystemProperty(
+ EXECUTOR_MAX_QUEUE_SIZE_PROP,
+ DEFAULT_EXECUTOR_MAX_QUEUE_SIZE), "DistDataStore"));
public DistributedDataStore(ActorSystem actorSystem, String type, ClusterWrapper cluster, Configuration configuration) {
- this(new ActorContext(actorSystem, actorSystem
+ Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
+ Preconditions.checkNotNull(type, "type should not be null");
+ Preconditions.checkNotNull(cluster, "cluster should not be null");
+ Preconditions.checkNotNull(configuration, "configuration should not be null");
+
+
+ String shardManagerId = ShardManagerIdentifier.builder().type(type).build().toString();
+
+ LOG.info("Creating ShardManager : {}", shardManagerId);
+
+ this.actorContext = new ActorContext(actorSystem, actorSystem
.actorOf(ShardManager.props(type, cluster, configuration),
- "shardmanager-" + type), cluster, configuration), type);
+ shardManagerId ), cluster, configuration);
}
- public DistributedDataStore(ActorContext actorContext, String type) {
- this.type = type;
- this.actorContext = actorContext;
+ public DistributedDataStore(ActorContext actorContext) {
+ this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
}
YangInstanceIdentifier path, L listener,
AsyncDataBroker.DataChangeScope scope) {
+ Preconditions.checkNotNull(path, "path should not be null");
+ Preconditions.checkNotNull(listener, "listener should not be null");
+
+
+ LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
+
ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf(
DataChangeListener.props(schemaContext,listener,path ));
Object result = actorContext.executeLocalShardOperation(shardName,
new RegisterChangeListener(path, dataChangeListenerActor.path(),
- scope).toSerializable(),
+ scope),
ActorContext.ASK_DURATION
);
if (result != null) {
- RegisterChangeListenerReply reply = RegisterChangeListenerReply
- .fromSerializable(actorContext.getActorSystem(), result);
+ RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result;
return new DataChangeListenerRegistrationProxy(actorContext
.actorSelection(reply.getListenerRegistrationPath()), listener,
dataChangeListenerActor);