<configfile finalname="configuration/initial/akka.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/akkaconf</configfile>
<configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
+ <configfile finalname="etc/org.opendaylight.controller.cluster.datastore.cfg">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/cfg/datastore</configfile>
</feature>
<feature name='odl-clustering-test-app' version='${project.version}'>
<type>xml</type>
<classifier>moduleconf</classifier>
</artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/datastore.cfg</file>
+ <type>cfg</type>
+ <classifier>datastore</classifier>
+ </artifact>
</artifacts>
</configuration>
</execution>
--- /dev/null
+# This file specifies property settings for the clustered data store to control its behavior. A
+# property may be applied to every data store type ("config" and "operational") or can be customized
+# differently for each data store type by prefixing the data store type + '.'. For example, specifying
+# the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying
+# "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly,
+# specifying "config.shard-election-timeout-factor" would only apply to the "config" data store.
+
+# The multiplication factor to be used to determine shard election timeout. The shard election timeout
+# is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor.
+#shard-election-timeout-factor=2
+
+# The interval at which a shard will send a heart beat message to its remote shard.
+#shard-heartbeat-interval-in-millis=500
+
+# The maximum amount of time to wait for a shard to elect a leader before failing an operation (eg transaction create).
+#shard-leader-election-timeout-in-seconds=30
+
+# Enable or disable data persistence.
+#persistent=true
+
+# Disable persistence for the operational data store by default.
+operational.persistent=false
+
+# The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.
+#shard-transaction-idle-timeout-in-minutes=10
+
+# The maximum amount of time a shard transaction three-phase commit can be idle without receiving the
+# next messages before it aborts the transaction.
+#shard-transaction-commit-timeout-in-seconds=30
+
+# The maximum allowed capacity for each shard's transaction commit queue.
+#shard-transaction-commit-queue-capacity=20000
+
+# The maximum amount of time to wait for a shard to initialize from persistence on startup before
+# failing an operation (eg transaction create and change listener registration).
+#shard-initialization-timeout-in-seconds=300
+
+# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
+#shard-journal-recovery-log-batch-size=5000
+
+# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-batch-count=20000
+
+# The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-data-threshold-percentage=12
+
+# The interval at which the leader of the shard will check if its majority followers are active and
+# term itself as isolated.
+#shard-isolated-leader-check-interval-in-millis=5000
+
+# The number of transaction modification operations (put, merge, delete) to batch before sending to the
+# shard transaction actor. Batching improves performance as less modifications messages are sent to the
+# actor and thus lessens the chance that the transaction actor's mailbox queue could get full.
+#shard-batched-modification-count=100
+
+# The maximum amount of time for akka operations (remote or local) to complete before failing.
+#operation-timeout-in-seconds=5
+
+# The initial number of transactions per second that are allowed before the data store should begin
+# applying back pressure. This number is only used as an initial guidance, subsequently the datastore
+# measures the latency for a commit and auto-adjusts the rate limit.
+#transaction-creation-initial-rate-limit=100
+
+# The maximum thread pool size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-pool-size=20
+
+# The maximum queue size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-queue-size=1000
+
+# The maximum queue size for each shard's data store data change listener.
+#max-shard-data-change-listener-queue-size=1000
+
+# The maximum queue size for each shard's data store executor.
+#max-shard-data-store-executor-queue-size=5000
+
private String dataStoreType = UNKNOWN_DATA_STORE_TYPE;
private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
- private DatastoreContext(){
+ private DatastoreContext() {
setShardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE);
setSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT);
setHeartbeatInterval(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS);
setIsolatedLeaderCheckInterval(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS);
setSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE);
+ setElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR);
+ }
+
+ private DatastoreContext(DatastoreContext other) {
+ this.dataStoreProperties = other.dataStoreProperties;
+ this.shardTransactionIdleTimeout = other.shardTransactionIdleTimeout;
+ this.operationTimeoutInSeconds = other.operationTimeoutInSeconds;
+ this.dataStoreMXBeanType = other.dataStoreMXBeanType;
+ this.shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds;
+ this.shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity;
+ this.shardInitializationTimeout = other.shardInitializationTimeout;
+ this.shardLeaderElectionTimeout = other.shardLeaderElectionTimeout;
+ this.persistent = other.persistent;
+ this.configurationReader = other.configurationReader;
+ this.transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
+ this.dataStoreType = other.dataStoreType;
+ this.shardBatchedModificationCount = other.shardBatchedModificationCount;
+
+ setShardJournalRecoveryLogBatchSize(other.raftConfig.getJournalRecoveryLogBatchSize());
+ setSnapshotBatchCount(other.raftConfig.getSnapshotBatchCount());
+ setHeartbeatInterval(other.raftConfig.getHeartBeatInterval().toMillis());
+ setIsolatedLeaderCheckInterval(other.raftConfig.getIsolatedCheckIntervalInMillis());
+ setSnapshotDataThresholdPercentage(other.raftConfig.getSnapshotDataThresholdPercentage());
+ setElectionTimeoutFactor(other.raftConfig.getElectionTimeoutFactor());
}
public static Builder newBuilder() {
- return new Builder();
+ return new Builder(new DatastoreContext());
+ }
+
+ public static Builder newBuilderFrom(DatastoreContext context) {
+ return new Builder(new DatastoreContext(context));
}
public InMemoryDOMDataStoreConfigProperties getDataStoreProperties() {
raftConfig.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
}
- private void setSnapshotBatchCount(int shardSnapshotBatchCount) {
+ private void setSnapshotBatchCount(long shardSnapshotBatchCount) {
raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
}
}
public static class Builder {
- private final DatastoreContext datastoreContext = new DatastoreContext();
+ private final DatastoreContext datastoreContext;
+ private int maxShardDataChangeExecutorPoolSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE;
+ private int maxShardDataChangeExecutorQueueSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE;
+ private int maxShardDataChangeListenerQueueSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE;
+ private int maxShardDataStoreExecutorQueueSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE;
+
+ private Builder(DatastoreContext datastoreContext) {
+ this.datastoreContext = datastoreContext;
+
+ if(datastoreContext.getDataStoreProperties() != null) {
+ maxShardDataChangeExecutorPoolSize =
+ datastoreContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize();
+ maxShardDataChangeExecutorQueueSize =
+ datastoreContext.getDataStoreProperties().getMaxDataChangeExecutorQueueSize();
+ maxShardDataChangeListenerQueueSize =
+ datastoreContext.getDataStoreProperties().getMaxDataChangeListenerQueueSize();
+ maxShardDataStoreExecutorQueueSize =
+ datastoreContext.getDataStoreProperties().getMaxDataStoreExecutorQueueSize();
+ }
+ }
- public Builder shardTransactionIdleTimeout(Duration shardTransactionIdleTimeout) {
- datastoreContext.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
+ public Builder boundedMailboxCapacity(int boundedMailboxCapacity) {
+ // TODO - this is defined in the yang DataStoreProperties but not currently used.
return this;
}
+ public Builder enableMetricCapture(boolean enableMetricCapture) {
+ // TODO - this is defined in the yang DataStoreProperties but not currently used.
+ return this;
+ }
+
+
+ public Builder shardTransactionIdleTimeout(long timeout, TimeUnit unit) {
+ datastoreContext.shardTransactionIdleTimeout = Duration.create(timeout, unit);
+ return this;
+ }
+
+ public Builder shardTransactionIdleTimeoutInMinutes(long timeout) {
+ return shardTransactionIdleTimeout(timeout, TimeUnit.MINUTES);
+ }
+
public Builder operationTimeoutInSeconds(int operationTimeoutInSeconds) {
datastoreContext.operationTimeoutInSeconds = operationTimeoutInSeconds;
return this;
return this;
}
- public Builder dataStoreProperties(InMemoryDOMDataStoreConfigProperties dataStoreProperties) {
- datastoreContext.dataStoreProperties = dataStoreProperties;
- return this;
- }
-
public Builder shardTransactionCommitTimeoutInSeconds(int shardTransactionCommitTimeoutInSeconds) {
datastoreContext.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
return this;
return this;
}
+ public Builder shardInitializationTimeoutInSeconds(long timeout) {
+ return shardInitializationTimeout(timeout, TimeUnit.SECONDS);
+ }
+
public Builder shardLeaderElectionTimeout(long timeout, TimeUnit unit) {
datastoreContext.shardLeaderElectionTimeout = new Timeout(timeout, unit);
return this;
}
+ public Builder shardLeaderElectionTimeoutInSeconds(long timeout) {
+ return shardLeaderElectionTimeout(timeout, TimeUnit.SECONDS);
+ }
+
public Builder configurationReader(ConfigurationReader configurationReader){
datastoreContext.configurationReader = configurationReader;
return this;
return this;
}
+ public Builder maxShardDataChangeExecutorPoolSize(int maxShardDataChangeExecutorPoolSize) {
+ this.maxShardDataChangeExecutorPoolSize = maxShardDataChangeExecutorPoolSize;
+ return this;
+ }
+
+ public Builder maxShardDataChangeExecutorQueueSize(int maxShardDataChangeExecutorQueueSize) {
+ this.maxShardDataChangeExecutorQueueSize = maxShardDataChangeExecutorQueueSize;
+ return this;
+ }
+
+ public Builder maxShardDataChangeListenerQueueSize(int maxShardDataChangeListenerQueueSize) {
+ this.maxShardDataChangeListenerQueueSize = maxShardDataChangeListenerQueueSize;
+ return this;
+ }
+
+ public Builder maxShardDataStoreExecutorQueueSize(int maxShardDataStoreExecutorQueueSize) {
+ this.maxShardDataStoreExecutorQueueSize = maxShardDataStoreExecutorQueueSize;
+ return this;
+ }
+
public DatastoreContext build() {
+ datastoreContext.dataStoreProperties = InMemoryDOMDataStoreConfigProperties.create(
+ maxShardDataChangeExecutorPoolSize, maxShardDataChangeExecutorQueueSize,
+ maxShardDataChangeListenerQueueSize, maxShardDataStoreExecutorQueueSize);
return datastoreContext;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.io.IOException;
+import java.util.Dictionary;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.service.cm.Configuration;
+import org.osgi.service.cm.ConfigurationAdmin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Class that overlays DatastoreContext settings with settings obtained from an OSGi Config Admin
+ * service.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextConfigAdminOverlay implements AutoCloseable {
+ public static final String CONFIG_ID = "org.opendaylight.controller.cluster.datastore";
+
+ private static final Logger LOG = LoggerFactory.getLogger(DatastoreContextConfigAdminOverlay.class);
+
+ private final DatastoreContextIntrospector introspector;
+ private final BundleContext bundleContext;
+
+ public DatastoreContextConfigAdminOverlay(DatastoreContextIntrospector introspector, BundleContext bundleContext) {
+ this.introspector = introspector;
+ this.bundleContext = bundleContext;
+
+ ServiceReference<ConfigurationAdmin> configAdminServiceReference =
+ bundleContext.getServiceReference(ConfigurationAdmin.class);
+ if(configAdminServiceReference == null) {
+ LOG.warn("No ConfigurationAdmin service found");
+ } else {
+ overlaySettings(configAdminServiceReference);
+ }
+ }
+
+ private void overlaySettings(ServiceReference<ConfigurationAdmin> configAdminServiceReference) {
+ try {
+ ConfigurationAdmin configAdmin = bundleContext.getService(configAdminServiceReference);
+
+ Configuration config = configAdmin.getConfiguration(CONFIG_ID);
+ if(config != null) {
+ Dictionary<String, Object> properties = config.getProperties();
+
+ LOG.debug("Overlaying settings: {}", properties);
+
+ introspector.update(properties);
+ } else {
+ LOG.debug("No Configuration found for {}", CONFIG_ID);
+ }
+ } catch (IOException e) {
+ LOG.error("Error obtaining Configuration for pid {}", CONFIG_ID, e);
+ } catch(IllegalStateException e) {
+ // Ignore - indicates the bundleContext has been closed.
+ } finally {
+ try {
+ bundleContext.ungetService(configAdminServiceReference);
+ } catch (Exception e) {
+ LOG.debug("Error from ungetService", e);
+ }
+ }
+ }
+
+ @Override
+ public void close() {
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.primitives.Primitives;
+import java.beans.BeanInfo;
+import java.beans.ConstructorProperties;
+import java.beans.IntrospectionException;
+import java.beans.Introspector;
+import java.beans.MethodDescriptor;
+import java.beans.PropertyDescriptor;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Dictionary;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.text.WordUtils;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Introspects on a DatastoreContext instance to set its properties via reflection.
+ * i
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextIntrospector {
+ private static final Logger LOG = LoggerFactory.getLogger(DatastoreContextIntrospector.class);
+
+ private static final Map<String, Class<?>> dataStorePropTypes = new HashMap<>();
+
+ private static final Map<Class<?>, Constructor<?>> constructors = new HashMap<>();
+
+ private static final Map<Class<?>, Method> yangTypeGetters = new HashMap<>();
+
+ private static final Map<String, Method> builderSetters = new HashMap<>();
+
+ static {
+ try {
+ introspectDatastoreContextBuilder();
+ introspectDataStoreProperties();
+ introspectPrimitiveTypes();
+ } catch (IntrospectionException e) {
+ LOG.error("Error initializing DatastoreContextIntrospector", e);
+ }
+ }
+
+ /**
+ * Introspects each primitive wrapper (ie Integer, Long etc) and String type to find the
+ * constructor that takes a single String argument. For primitive wrappers, this constructor
+ * converts from a String representation.
+ */
+ private static void introspectPrimitiveTypes() {
+
+ Set<Class<?>> primitives = ImmutableSet.<Class<?>>builder().addAll(
+ Primitives.allWrapperTypes()).add(String.class).build();
+ for(Class<?> primitive: primitives) {
+ try {
+ processPropertyType(primitive);
+ } catch (Exception e) {
+ // Ignore primitives that can't be constructed from a String, eg Character and Void.
+ }
+ }
+ }
+
+ /**
+ * Introspects the DatastoreContext.Builder class to find all its setter methods that we will
+ * invoke via reflection. We can't use the bean Introspector here as the Builder setters don't
+ * follow the bean property naming convention, ie setter prefixed with "set", so look for all
+ * the methods that return Builder.
+ */
+ private static void introspectDatastoreContextBuilder() {
+ for(Method method: Builder.class.getMethods()) {
+ if(Builder.class.equals(method.getReturnType())) {
+ builderSetters.put(method.getName(), method);
+ }
+ }
+ }
+
+ /**
+ * Introspects the DataStoreProperties interface that is generated from the DataStoreProperties
+ * yang grouping. We use the bean Introspector to find the types of all the properties defined
+ * in the interface (this is the type returned from the getter method). For each type, we find
+ * the appropriate constructor that we will use.
+ */
+ private static void introspectDataStoreProperties() throws IntrospectionException {
+ BeanInfo beanInfo = Introspector.getBeanInfo(DataStoreProperties.class);
+ for(PropertyDescriptor desc: beanInfo.getPropertyDescriptors()) {
+ processDataStoreProperty(desc.getName(), desc.getPropertyType());
+ }
+
+ // Getter methods that return Boolean and start with "is" instead of "get" aren't recognized as
+ // properties and thus aren't returned from getPropertyDescriptors. A getter starting with
+ // "is" is only supported if it returns primitive boolean. So we'll check for these via
+ // getMethodDescriptors.
+ for(MethodDescriptor desc: beanInfo.getMethodDescriptors()) {
+ String methodName = desc.getName();
+ if(Boolean.class.equals(desc.getMethod().getReturnType()) && methodName.startsWith("is")) {
+ String propertyName = WordUtils.uncapitalize(methodName.substring(2));
+ processDataStoreProperty(propertyName, Boolean.class);
+ }
+ }
+ }
+
+ /**
+ * Processes a property defined on the DataStoreProperties interface.
+ */
+ private static void processDataStoreProperty(String name, Class<?> propertyType) {
+ Preconditions.checkArgument(builderSetters.containsKey(name), String.format(
+ "DataStoreProperties property \"%s\" does not have corresponding setter in DatastoreContext.Builder", name));
+ try {
+ processPropertyType(propertyType);
+ dataStorePropTypes.put(name, propertyType);
+ } catch (Exception e) {
+ LOG.error("Error finding constructor for type {}", propertyType, e);
+ }
+ }
+
+ /**
+ * Finds the appropriate constructor for the specified type that we will use to construct
+ * instances.
+ */
+ private static void processPropertyType(Class<?> propertyType) throws Exception {
+ Class<?> wrappedType = Primitives.wrap(propertyType);
+ if(constructors.containsKey(wrappedType)) {
+ return;
+ }
+
+ // If the type is a primitive (or String type), we look for the constructor that takes a
+ // single String argument, which, for primitives, validates and converts from a String
+ // representation which is the form we get on ingress.
+ if(propertyType.isPrimitive() || Primitives.isWrapperType(propertyType) ||
+ propertyType.equals(String.class))
+ {
+ constructors.put(wrappedType, propertyType.getConstructor(String.class));
+ } else {
+ // This must be a yang-defined type. We need to find the constructor that takes a
+ // primitive as the only argument. This will be used to construct instances to perform
+ // validation (eg range checking). The yang-generated types have a couple single-argument
+ // constructors but the one we want has the bean ConstructorProperties annotation.
+ for(Constructor<?> ctor: propertyType.getConstructors()) {
+ ConstructorProperties ctorPropsAnnotation = ctor.getAnnotation(ConstructorProperties.class);
+ if(ctor.getParameterTypes().length == 1 && ctorPropsAnnotation != null) {
+ findYangTypeGetter(propertyType, ctorPropsAnnotation.value()[0]);
+ constructors.put(propertyType, ctor);
+ break;
+ }
+ }
+ }
+ }
+
+ /**
+ * Finds the getter method on a yang-generated type for the specified property name.
+ */
+ private static void findYangTypeGetter(Class<?> type, String propertyName)
+ throws Exception {
+ for(PropertyDescriptor desc: Introspector.getBeanInfo(type).getPropertyDescriptors()) {
+ if(desc.getName().equals(propertyName)) {
+ yangTypeGetters.put(type, desc.getReadMethod());
+ return;
+ }
+ }
+
+ throw new IllegalArgumentException(String.format(
+ "Getter method for constructor property %s not found for YANG type %s",
+ propertyName, type));
+ }
+
+ private DatastoreContext context;
+
+ public DatastoreContextIntrospector(DatastoreContext context) {
+ this.context = context;
+ }
+
+ public DatastoreContext getContext() {
+ return context;
+ }
+
+ /**
+ * Applies the given properties to the cached DatastoreContext and yields a new DatastoreContext
+ * instance which can be obtained via {@link getContext}.
+ *
+ * @param properties the properties to apply
+ * @return true if the cached DatastoreContext was updated, false otherwise.
+ */
+ public boolean update(Dictionary<String, Object> properties) {
+ if(properties == null || properties.isEmpty()) {
+ return false;
+ }
+
+ Builder builder = DatastoreContext.newBuilderFrom(context);
+
+ final String dataStoreTypePrefix = context.getDataStoreType() + '.';
+
+ // Sort the property keys by putting the names prefixed with the data store type last. This
+ // is done so data store specific settings are applied after global settings.
+ ArrayList<String> keys = Collections.list(properties.keys());
+ Collections.sort(keys, new Comparator<String>() {
+ @Override
+ public int compare(String key1, String key2) {
+ return key1.startsWith(dataStoreTypePrefix) ? 1 :
+ key2.startsWith(dataStoreTypePrefix) ? -1 : key1.compareTo(key2);
+ }
+ });
+
+ boolean updated = false;
+ for(String key: keys) {
+ Object value = properties.get(key);
+ try {
+ // If the key is prefixed with the data store type, strip it off.
+ if(key.startsWith(dataStoreTypePrefix)) {
+ key = key.replaceFirst(dataStoreTypePrefix, "");
+ }
+
+ key = convertToCamelCase(key);
+
+ // Convert the value to the right type.
+ value = convertValue(key, value);
+ if(value == null) {
+ continue;
+ }
+
+ LOG.debug("Converted value for property {}: {} ({})",
+ key, value, value.getClass().getSimpleName());
+
+ // Call the setter method on the Builder instance.
+ Method setter = builderSetters.get(key);
+ setter.invoke(builder, constructorValueRecursively(
+ Primitives.wrap(setter.getParameterTypes()[0]), value.toString()));
+
+ updated = true;
+
+ } catch (Exception e) {
+ LOG.error("Error converting value ({}) for property {}", value, key, e);
+ }
+ }
+
+ if(updated) {
+ context = builder.build();
+ }
+
+ return updated;
+ }
+
+ private String convertToCamelCase(String inString) {
+ String str = inString.trim();
+ if(StringUtils.contains(str, '-') || StringUtils.contains(str, ' ')) {
+ str = inString.replace('-', ' ');
+ str = WordUtils.capitalizeFully(str);
+ str = StringUtils.deleteWhitespace(str);
+ }
+
+ return StringUtils.uncapitalize(str);
+ }
+
+ private Object convertValue(String name, Object from) throws Exception {
+ Class<?> propertyType = dataStorePropTypes.get(name);
+ if(propertyType == null) {
+ LOG.debug("Property not found for {}", name);
+ return null;
+ }
+
+ LOG.debug("Type for property {}: {}, converting value {} ({})",
+ name, propertyType.getSimpleName(), from, from.getClass().getSimpleName());
+
+ // Recurse the chain of constructors depth-first to get the resulting value. Eg, if the
+ // property type is the yang-generated NonZeroUint32Type, it's constructor takes a Long so
+ // we have to first construct a Long instance from the input value.
+ Object converted = constructorValueRecursively(propertyType, from.toString());
+
+ // If the converted type is a yang-generated type, call the getter to obtain the actual value.
+ Method getter = yangTypeGetters.get(converted.getClass());
+ if(getter != null) {
+ converted = getter.invoke(converted);
+ }
+
+ return converted;
+ }
+
+ private Object constructorValueRecursively(Class<?> toType, Object fromValue) throws Exception {
+ LOG.debug("convertValueRecursively - toType: {}, fromValue {} ({})",
+ toType.getSimpleName(), fromValue, fromValue.getClass().getSimpleName());
+
+ Constructor<?> ctor = constructors.get(toType);
+
+ LOG.debug("Found {}", ctor);
+
+ if(ctor == null) {
+ throw new IllegalArgumentException(String.format("Constructor not found for type %s", toType));
+ }
+
+ Object value = fromValue;
+
+ // Since the original input type is a String, once we find a constructor that takes a String
+ // argument, we're done recursing.
+ if(!ctor.getParameterTypes()[0].equals(String.class)) {
+ value = constructorValueRecursively(ctor.getParameterTypes()[0], fromValue);
+ }
+
+ return ctor.newInstance(value);
+ }
+}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
private final ActorContext actorContext;
+ private AutoCloseable closeable;
+
+ private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
+
public DistributedDataStore(ActorSystem actorSystem, ClusterWrapper cluster,
Configuration configuration, DatastoreContext datastoreContext) {
Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
ShardManager.props(cluster, configuration, datastoreContext)
.withDispatcher(shardDispatcher).withMailbox(ActorContext.MAILBOX), shardManagerId ),
cluster, configuration, datastoreContext);
+
+ datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(datastoreContext.getDataStoreMXBeanType());
+ datastoreConfigMXBean.setContext(datastoreContext);
+ datastoreConfigMXBean.registerMBean();
}
public DistributedDataStore(ActorContext actorContext) {
this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
}
+ public void setCloseable(AutoCloseable closeable) {
+ this.closeable = closeable;
+ }
+
@SuppressWarnings("unchecked")
@Override
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
}
@Override
- public void close() throws Exception {
+ public void close() {
+ datastoreConfigMXBean.unregisterMBean();
+
+ if(closeable != null) {
+ try {
+ closeable.close();
+ } catch (Exception e) {
+ LOG.debug("Error closing insance", e);
+ }
+ }
+
actorContext.shutdown();
}
private static volatile ActorSystem persistentActorSystem = null;
public static DistributedDataStore createInstance(SchemaService schemaService,
- DatastoreContext datastoreContext, BundleContext bundleContext) {
+ DatastoreContext datastoreContext, BundleContext bundleContext) {
+
+ DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(datastoreContext);
+ DatastoreContextConfigAdminOverlay overlay = new DatastoreContextConfigAdminOverlay(
+ introspector, bundleContext);
ActorSystem actorSystem = getOrCreateInstance(bundleContext, datastoreContext.getConfigurationReader());
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
- final DistributedDataStore dataStore =
- new DistributedDataStore(actorSystem, new ClusterWrapperImpl(actorSystem),
- config, datastoreContext);
+ final DistributedDataStore dataStore = new DistributedDataStore(actorSystem,
+ new ClusterWrapperImpl(actorSystem), config, introspector.getContext());
ShardStrategyFactory.setConfiguration(config);
schemaService.registerSchemaContextListener(dataStore);
+
+ dataStore.setCloseable(overlay);
return dataStore;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+
+/**
+ * MXBean interface for data store configuration.
+ *
+ * @author Thomas Pantelis
+ */
+public interface DatastoreConfigurationMXBean {
+ long getShardTransactionIdleTimeoutInSeconds();
+
+ long getOperationTimeoutInSeconds();
+
+ long getShardHeartbeatIntervalInMillis();
+
+ int getShardJournalRecoveryLogBatchSize();
+
+ long getShardIsolatedLeaderCheckIntervalInMillis();
+
+ long getShardElectionTimeoutFactor();
+
+ int getShardSnapshotDataThresholdPercentage();
+
+ long getShardSnapshotBatchCount();
+
+ long getShardTransactionCommitTimeoutInSeconds();
+
+ int getShardTransactionCommitQueueCapacity();
+
+ long getShardInitializationTimeoutInSeconds();
+
+ long getShardLeaderElectionTimeoutInSeconds();
+
+ boolean isPersistent();
+
+ long getTransactionCreationInitialRateLimit();
+
+ int getMaxShardDataChangeExecutorPoolSize();
+
+ int getMaxShardDataChangeExecutorQueueSize();
+
+ int getMaxShardDataChangeListenerQueueSize();
+
+ int getMaxShardDataStoreExecutorQueueSize();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+
+/**
+ * Implementation of DatastoreConfigurationMXBean.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreConfigurationMXBeanImpl extends AbstractMXBean implements DatastoreConfigurationMXBean {
+ public static final String JMX_CATEGORY_CONFIGURATION = "Configuration";
+
+ private DatastoreContext context;
+
+ public DatastoreConfigurationMXBeanImpl(String mxBeanType) {
+ super("Datastore", mxBeanType, JMX_CATEGORY_CONFIGURATION);
+ }
+
+ public void setContext(DatastoreContext context) {
+ this.context = context;
+ }
+
+ @Override
+ public long getShardTransactionIdleTimeoutInSeconds() {
+ return context.getShardTransactionIdleTimeout().toSeconds();
+ }
+
+ @Override
+ public long getOperationTimeoutInSeconds() {
+ return context.getOperationTimeoutInSeconds();
+ }
+
+ @Override
+ public long getShardHeartbeatIntervalInMillis() {
+ return context.getShardRaftConfig().getHeartBeatInterval().toMillis();
+ }
+
+ @Override
+ public int getShardJournalRecoveryLogBatchSize() {
+ return context.getShardRaftConfig().getJournalRecoveryLogBatchSize();
+ }
+
+ @Override
+ public long getShardIsolatedLeaderCheckIntervalInMillis() {
+ return context.getShardRaftConfig().getIsolatedCheckIntervalInMillis();
+ }
+
+ @Override
+ public long getShardElectionTimeoutFactor() {
+ return context.getShardRaftConfig().getElectionTimeoutFactor();
+ }
+
+ @Override
+ public int getShardSnapshotDataThresholdPercentage() {
+ return context.getShardRaftConfig().getSnapshotDataThresholdPercentage();
+ }
+
+ @Override
+ public long getShardSnapshotBatchCount() {
+ return context.getShardRaftConfig().getSnapshotBatchCount();
+ }
+
+ @Override
+ public long getShardTransactionCommitTimeoutInSeconds() {
+ return context.getShardTransactionCommitTimeoutInSeconds();
+ }
+
+ @Override
+ public int getShardTransactionCommitQueueCapacity() {
+ return context.getShardTransactionCommitQueueCapacity();
+ }
+
+ @Override
+ public long getShardInitializationTimeoutInSeconds() {
+ return context.getShardInitializationTimeout().duration().toSeconds();
+ }
+
+ @Override
+ public long getShardLeaderElectionTimeoutInSeconds() {
+ return context.getShardLeaderElectionTimeout().duration().toSeconds();
+ }
+
+ @Override
+ public boolean isPersistent() {
+ return context.isPersistent();
+ }
+
+ @Override
+ public long getTransactionCreationInitialRateLimit() {
+ return context.getTransactionCreationInitialRateLimit();
+ }
+
+ @Override
+ public int getMaxShardDataChangeExecutorPoolSize() {
+ return context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize();
+ }
+
+ @Override
+ public int getMaxShardDataChangeExecutorQueueSize() {
+ return context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize();
+ }
+
+ @Override
+ public int getMaxShardDataChangeListenerQueueSize() {
+ return context.getDataStoreProperties().getMaxDataChangeListenerQueueSize();
+ }
+
+ @Override
+ public int getMaxShardDataStoreExecutorQueueSize() {
+ return context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize();
+ }
+}
package org.opendaylight.controller.config.yang.config.distributed_datastore_provider;
-import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
import org.osgi.framework.BundleContext;
-import scala.concurrent.duration.Duration;
public class DistributedConfigDataStoreProviderModule extends
org.opendaylight.controller.config.yang.config.distributed_datastore_provider.AbstractDistributedConfigDataStoreProviderModule {
DatastoreContext datastoreContext = DatastoreContext.newBuilder()
.dataStoreType("config")
- .dataStoreProperties(InMemoryDOMDataStoreConfigProperties.create(
- props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
- props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
- props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
- props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()))
- .shardTransactionIdleTimeout(Duration.create(
- props.getShardTransactionIdleTimeoutInMinutes().getValue(), TimeUnit.MINUTES))
+ .maxShardDataChangeExecutorPoolSize(props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue())
+ .maxShardDataChangeExecutorQueueSize(props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue())
+ .maxShardDataChangeListenerQueueSize(props.getMaxShardDataChangeListenerQueueSize().getValue().intValue())
+ .maxShardDataStoreExecutorQueueSize(props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue())
+ .shardTransactionIdleTimeoutInMinutes(props.getShardTransactionIdleTimeoutInMinutes().getValue())
.operationTimeoutInSeconds(props.getOperationTimeoutInSeconds().getValue())
.shardJournalRecoveryLogBatchSize(props.getShardJournalRecoveryLogBatchSize().
getValue().intValue())
.shardSnapshotBatchCount(props.getShardSnapshotBatchCount().getValue().intValue())
.shardSnapshotDataThresholdPercentage(props.getShardSnapshotDataThresholdPercentage().getValue().intValue())
- .shardHeartbeatIntervalInMillis(props.getShardHearbeatIntervalInMillis().getValue())
- .shardInitializationTimeout(props.getShardInitializationTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
- .shardLeaderElectionTimeout(props.getShardLeaderElectionTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
+ .shardHeartbeatIntervalInMillis(props.getShardHeartbeatIntervalInMillis().getValue())
+ .shardInitializationTimeoutInSeconds(props.getShardInitializationTimeoutInSeconds().getValue())
+ .shardLeaderElectionTimeoutInSeconds(props.getShardLeaderElectionTimeoutInSeconds().getValue())
.shardTransactionCommitTimeoutInSeconds(
props.getShardTransactionCommitTimeoutInSeconds().getValue().intValue())
.shardTransactionCommitQueueCapacity(
.shardIsolatedLeaderCheckIntervalInMillis(
props.getShardIsolatedLeaderCheckIntervalInMillis().getValue())
.shardElectionTimeoutFactor(props.getShardElectionTimeoutFactor().getValue())
- .transactionCreationInitialRateLimit(props.getTxCreationInitialRateLimit().getValue())
+ .transactionCreationInitialRateLimit(props.getTransactionCreationInitialRateLimit().getValue())
.shardBatchedModificationCount(props.getShardBatchedModificationCount().getValue().intValue())
.build();
package org.opendaylight.controller.config.yang.config.distributed_datastore_provider;
-import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
import org.osgi.framework.BundleContext;
-import scala.concurrent.duration.Duration;
public class DistributedOperationalDataStoreProviderModule extends
org.opendaylight.controller.config.yang.config.distributed_datastore_provider.AbstractDistributedOperationalDataStoreProviderModule {
DatastoreContext datastoreContext = DatastoreContext.newBuilder()
.dataStoreType("operational")
- .dataStoreProperties(InMemoryDOMDataStoreConfigProperties.create(
- props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
- props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
- props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
- props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()))
- .shardTransactionIdleTimeout(Duration.create(
- props.getShardTransactionIdleTimeoutInMinutes().getValue(), TimeUnit.MINUTES))
+ .maxShardDataChangeExecutorPoolSize(props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue())
+ .maxShardDataChangeExecutorQueueSize(props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue())
+ .maxShardDataChangeListenerQueueSize(props.getMaxShardDataChangeListenerQueueSize().getValue().intValue())
+ .maxShardDataStoreExecutorQueueSize(props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue())
+ .shardTransactionIdleTimeoutInMinutes(props.getShardTransactionIdleTimeoutInMinutes().getValue())
.operationTimeoutInSeconds(props.getOperationTimeoutInSeconds().getValue())
.shardJournalRecoveryLogBatchSize(props.getShardJournalRecoveryLogBatchSize().
getValue().intValue())
.shardSnapshotBatchCount(props.getShardSnapshotBatchCount().getValue().intValue())
.shardSnapshotDataThresholdPercentage(props.getShardSnapshotDataThresholdPercentage().getValue().intValue())
- .shardHeartbeatIntervalInMillis(props.getShardHearbeatIntervalInMillis().getValue())
- .shardInitializationTimeout(props.getShardInitializationTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
- .shardLeaderElectionTimeout(props.getShardLeaderElectionTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
+ .shardHeartbeatIntervalInMillis(props.getShardHeartbeatIntervalInMillis().getValue())
+ .shardInitializationTimeoutInSeconds(props.getShardInitializationTimeoutInSeconds().getValue())
+ .shardLeaderElectionTimeoutInSeconds(props.getShardLeaderElectionTimeoutInSeconds().getValue())
.shardTransactionCommitTimeoutInSeconds(
props.getShardTransactionCommitTimeoutInSeconds().getValue().intValue())
.shardTransactionCommitQueueCapacity(
.shardIsolatedLeaderCheckIntervalInMillis(
props.getShardIsolatedLeaderCheckIntervalInMillis().getValue())
.shardElectionTimeoutFactor(props.getShardElectionTimeoutFactor().getValue())
- .transactionCreationInitialRateLimit(props.getTxCreationInitialRateLimit().getValue())
+ .transactionCreationInitialRateLimit(props.getTransactionCreationInitialRateLimit().getValue())
.shardBatchedModificationCount(props.getShardBatchedModificationCount().getValue().intValue())
.build();
}
- leaf shard-hearbeat-interval-in-millis {
+ leaf shard-heartbeat-interval-in-millis {
default 500;
type heartbeat-interval-type;
description "The interval at which a shard will send a heart beat message to its remote shard.";
followers are active and term itself as isolated";
}
- leaf tx-creation-initial-rate-limit {
+ leaf transaction-creation-initial-rate-limit {
default 100;
type non-zero-uint32-type;
description "The initial number of transactions per second that are allowed before the data store
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import java.io.IOException;
+import java.util.Dictionary;
+import java.util.Hashtable;
+import org.junit.Test;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.service.cm.Configuration;
+import org.osgi.service.cm.ConfigurationAdmin;
+
+/**
+ * Unit tests for DatastoreContextConfigAdminOverlay.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextConfigAdminOverlayTest {
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void test() throws IOException {
+ BundleContext mockBundleContext = mock(BundleContext.class);
+ ServiceReference<ConfigurationAdmin> mockServiceRef = mock(ServiceReference.class);
+ ConfigurationAdmin mockConfigAdmin = mock(ConfigurationAdmin.class);
+ Configuration mockConfig = mock(Configuration.class);
+ DatastoreContextIntrospector mockIntrospector = mock(DatastoreContextIntrospector.class);
+
+ doReturn(mockServiceRef).when(mockBundleContext).getServiceReference(ConfigurationAdmin.class);
+ doReturn(mockConfigAdmin).when(mockBundleContext).getService(mockServiceRef);
+
+ doReturn(mockConfig).when(mockConfigAdmin).getConfiguration(DatastoreContextConfigAdminOverlay.CONFIG_ID);
+
+ doReturn(DatastoreContextConfigAdminOverlay.CONFIG_ID).when(mockConfig).getPid();
+
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("property", "value");
+ doReturn(properties).when(mockConfig).getProperties();
+
+ try(DatastoreContextConfigAdminOverlay overlay = new DatastoreContextConfigAdminOverlay(
+ mockIntrospector, mockBundleContext)) {
+ }
+
+ verify(mockIntrospector).update(properties);
+
+ verify(mockBundleContext).ungetService(mockServiceRef);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS;
+import java.util.Dictionary;
+import java.util.Hashtable;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
+
+/**
+ * Unit tests for DatastoreContextIntrospector.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextIntrospectorTest {
+
+ @Test
+ public void testUpdate() {
+ DatastoreContext context = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(context );
+
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("shard-transaction-idle-timeout-in-minutes", "31");
+ properties.put("operation-timeout-in-seconds", "26");
+ properties.put("shard-transaction-commit-timeout-in-seconds", "100");
+ properties.put("shard-journal-recovery-log-batch-size", "199");
+ properties.put("shard-snapshot-batch-count", "212");
+ properties.put("shard-heartbeat-interval-in-millis", "101");
+ properties.put("shard-transaction-commit-queue-capacity", "567");
+ properties.put("shard-initialization-timeout-in-seconds", "82");
+ properties.put("shard-leader-election-timeout-in-seconds", "66");
+ properties.put("shard-isolated-leader-check-interval-in-millis", "123");
+ properties.put("shard-snapshot-data-threshold-percentage", "100");
+ properties.put("shard-election-timeout-factor", "21");
+ properties.put("shard-batched-modification-count", "901");
+ properties.put("transactionCreationInitialRateLimit", "200");
+ properties.put("MaxShardDataChangeExecutorPoolSize", "41");
+ properties.put("Max-Shard-Data-Change Executor-Queue Size", "1111");
+ properties.put(" max shard data change listener queue size", "2222");
+ properties.put("mAx-shaRd-data-STORE-executor-quEUe-size", "3333");
+ properties.put("persistent", "false");
+
+ boolean updated = introspector.update(properties);
+ assertEquals("updated", true, updated);
+ context = introspector.getContext();
+
+ assertEquals(31, context.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(26, context.getOperationTimeoutInSeconds());
+ assertEquals(100, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(199, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(212, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(101, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(567, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(82, context.getShardInitializationTimeout().duration().toSeconds());
+ assertEquals(66, context.getShardLeaderElectionTimeout().duration().toSeconds());
+ assertEquals(123, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(100, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(21, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(901, context.getShardBatchedModificationCount());
+ assertEquals(200, context.getTransactionCreationInitialRateLimit());
+ assertEquals(41, context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(1111, context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(2222, context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(3333, context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
+ assertEquals(false, context.isPersistent());
+
+ properties.put("shard-transaction-idle-timeout-in-minutes", "32");
+ properties.put("operation-timeout-in-seconds", "27");
+ properties.put("shard-heartbeat-interval-in-millis", "102");
+ properties.put("shard-election-timeout-factor", "22");
+ properties.put("max-shard-data-change-executor-pool-size", "42");
+ properties.put("max-shard-data-store-executor-queue-size", "4444");
+ properties.put("persistent", "true");
+
+ updated = introspector.update(properties);
+ assertEquals("updated", true, updated);
+ context = introspector.getContext();
+
+ assertEquals(32, context.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(27, context.getOperationTimeoutInSeconds());
+ assertEquals(100, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(199, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(212, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(102, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(567, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(82, context.getShardInitializationTimeout().duration().toSeconds());
+ assertEquals(66, context.getShardLeaderElectionTimeout().duration().toSeconds());
+ assertEquals(123, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(100, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(22, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(200, context.getTransactionCreationInitialRateLimit());
+ assertEquals(42, context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(1111, context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(2222, context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(4444, context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
+ assertEquals(true, context.isPersistent());
+
+ updated = introspector.update(null);
+ assertEquals("updated", false, updated);
+
+ updated = introspector.update(new Hashtable<String, Object>());
+ assertEquals("updated", false, updated);
+ }
+
+
+ @Test
+ public void testUpdateWithInvalidValues() {
+ DatastoreContext context = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(context );
+
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("shard-transaction-idle-timeout-in-minutes", "0"); // bad - must be > 0
+ properties.put("shard-journal-recovery-log-batch-size", "199");
+ properties.put("shard-transaction-commit-timeout-in-seconds", "bogus"); // bad - NaN
+ properties.put("shard-snapshot-batch-count", "212"); // good
+ properties.put("operation-timeout-in-seconds", "4"); // bad - must be >= 5
+ properties.put("shard-heartbeat-interval-in-millis", "99"); // bad - must be >= 100
+ properties.put("shard-transaction-commit-queue-capacity", "567"); // good
+ properties.put("shard-snapshot-data-threshold-percentage", "101"); // bad - must be 0-100
+ properties.put("shard-initialization-timeout-in-seconds", "-1"); // bad - must be > 0
+ properties.put("max-shard-data-change-executor-pool-size", "bogus"); // bad - NaN
+ properties.put("unknownProperty", "1"); // bad - invalid property name
+
+ boolean updated = introspector.update(properties);
+ assertEquals("updated", true, updated);
+ context = introspector.getContext();
+
+ assertEquals(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT, context.getShardTransactionIdleTimeout());
+ assertEquals(199, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(212, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS, context.getOperationTimeoutInSeconds());
+ assertEquals(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(567, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE,
+ context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT, context.getShardInitializationTimeout());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ }
+
+ @Test
+ public void testUpdateWithDatastoreTypeSpecificProperties() {
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("shard-transaction-idle-timeout-in-minutes", "22"); // global setting
+ properties.put("operational.shard-transaction-idle-timeout-in-minutes", "33"); // operational override
+ properties.put("config.shard-transaction-idle-timeout-in-minutes", "44"); // config override
+
+ properties.put("max-shard-data-change-executor-pool-size", "222"); // global setting
+ properties.put("operational.max-shard-data-change-executor-pool-size", "333"); // operational override
+ properties.put("config.max-shard-data-change-executor-pool-size", "444"); // config override
+
+ properties.put("persistent", "false"); // global setting
+ properties.put("operational.Persistent", "true"); // operational override
+
+ DatastoreContext operContext = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContextIntrospector operIntrospector = new DatastoreContextIntrospector(operContext);
+ boolean updated = operIntrospector.update(properties);
+ assertEquals("updated", true, updated);
+ operContext = operIntrospector.getContext();
+
+ assertEquals(33, operContext.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(true, operContext.isPersistent());
+ assertEquals(333, operContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+
+ DatastoreContext configContext = DatastoreContext.newBuilder().dataStoreType("config").build();
+ DatastoreContextIntrospector configIntrospector = new DatastoreContextIntrospector(configContext);
+ updated = configIntrospector.update(properties);
+ assertEquals("updated", true, updated);
+ configContext = configIntrospector.getContext();
+
+ assertEquals(44, configContext.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(false, configContext.isPersistent());
+ assertEquals(444, configContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ }
+}
package org.opendaylight.controller.cluster.datastore;
import static org.junit.Assert.assertEquals;
-import org.junit.Before;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_CONFIGURATION_READER;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_PERSISTENT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SNAPSHOT_BATCH_COUNT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT;
+import java.util.concurrent.TimeUnit;
+import org.junit.Assert;
import org.junit.Test;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
public class DatastoreContextTest {
- private DatastoreContext.Builder builder;
+ @Test
+ public void testNewBuilderWithDefaultSettings() {
+ DatastoreContext context = DatastoreContext.newBuilder().build();
- @Before
- public void setUp(){
- builder = new DatastoreContext.Builder();
+ assertEquals(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT, context.getShardTransactionIdleTimeout());
+ assertEquals(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS, context.getOperationTimeoutInSeconds());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(DEFAULT_SNAPSHOT_BATCH_COUNT, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.duration().toMillis(),
+ context.getShardInitializationTimeout().duration().toMillis());
+ assertEquals(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis(),
+ context.getShardLeaderElectionTimeout().duration().toMillis());
+ assertEquals(DEFAULT_PERSISTENT, context.isPersistent());
+ assertEquals(DEFAULT_CONFIGURATION_READER, context.getConfigurationReader());
+ assertEquals(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT, context.getTransactionCreationInitialRateLimit());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT, context.getShardBatchedModificationCount());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE,
+ context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
}
@Test
- public void testDefaults(){
- DatastoreContext build = builder.build();
-
- assertEquals(DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT , build.getShardTransactionIdleTimeout());
- assertEquals(DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS, build.getOperationTimeoutInSeconds());
- assertEquals(DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS, build.getShardTransactionCommitTimeoutInSeconds());
- assertEquals(DatastoreContext.DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE, build.getShardRaftConfig().getJournalRecoveryLogBatchSize());
- assertEquals(DatastoreContext.DEFAULT_SNAPSHOT_BATCH_COUNT, build.getShardRaftConfig().getSnapshotBatchCount());
- assertEquals(DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS, build.getShardRaftConfig().getHeartBeatInterval().length());
- assertEquals(DatastoreContext.DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY, build.getShardTransactionCommitQueueCapacity());
- assertEquals(DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT, build.getShardInitializationTimeout());
- assertEquals(DatastoreContext.DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT, build.getShardLeaderElectionTimeout());
- assertEquals(DatastoreContext.DEFAULT_PERSISTENT, build.isPersistent());
- assertEquals(DatastoreContext.DEFAULT_CONFIGURATION_READER, build.getConfigurationReader());
- assertEquals(DatastoreContext.DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS, build.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
- assertEquals(DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE, build.getShardRaftConfig().getSnapshotDataThresholdPercentage());
- assertEquals(DatastoreContext.DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR, build.getShardRaftConfig().getElectionTimeoutFactor());
- assertEquals(DatastoreContext.DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT, build.getTransactionCreationInitialRateLimit());
- assertEquals(DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT, build.getShardBatchedModificationCount());
+ public void testNewBuilderWithCustomSettings() {
+ DatastoreContext.Builder builder = DatastoreContext.newBuilder();
+
+ builder.shardTransactionIdleTimeout(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT.toMillis() + 1,
+ TimeUnit.MILLISECONDS);
+ builder.operationTimeoutInSeconds(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS + 1);
+ builder.shardTransactionCommitTimeoutInSeconds(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS + 1);
+ builder.shardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE + 1);
+ builder.shardSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT + 1);
+ builder.shardHeartbeatIntervalInMillis(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS + 1);
+ builder.shardTransactionCommitQueueCapacity(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY + 1);
+ builder.shardInitializationTimeout(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.
+ duration().toMillis() + 1, TimeUnit.MILLISECONDS);
+ builder.shardInitializationTimeout(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.duration().toMillis() + 1,
+ TimeUnit.MILLISECONDS);
+ builder.shardLeaderElectionTimeout(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis() + 1,
+ TimeUnit.MILLISECONDS);
+ builder.persistent(!DEFAULT_PERSISTENT);
+ builder.shardIsolatedLeaderCheckIntervalInMillis(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS + 1);
+ builder.shardSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE + 1);
+ builder.shardElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR + 1);
+ builder.transactionCreationInitialRateLimit(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT + 1);
+ builder.shardBatchedModificationCount(DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT + 1);
+ builder.maxShardDataChangeExecutorPoolSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE + 1);
+ builder.maxShardDataChangeExecutorQueueSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE + 1);
+ builder.maxShardDataChangeListenerQueueSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE + 1);
+ builder.maxShardDataStoreExecutorQueueSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE + 1);
+
+ DatastoreContext context = builder.build();
+
+ verifyCustomSettings(context);
+
+ builder = DatastoreContext.newBuilderFrom(context);
+
+ DatastoreContext newContext = builder.build();
+
+ verifyCustomSettings(newContext);
+
+ Assert.assertNotSame(context, newContext);
}
+ private void verifyCustomSettings(DatastoreContext context) {
+ assertEquals(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT.toMillis() + 1,
+ context.getShardTransactionIdleTimeout().toMillis());
+ assertEquals(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS + 1, context.getOperationTimeoutInSeconds());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS + 1,
+ context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE + 1,
+ context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(DEFAULT_SNAPSHOT_BATCH_COUNT + 1, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS + 1,
+ context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY + 1, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.duration().toMillis() + 1,
+ context.getShardInitializationTimeout().duration().toMillis());
+ assertEquals(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis() + 1,
+ context.getShardLeaderElectionTimeout().duration().toMillis());
+ assertEquals(!DEFAULT_PERSISTENT, context.isPersistent());
+ assertEquals(DEFAULT_CONFIGURATION_READER, context.getConfigurationReader());
+ assertEquals(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS + 1,
+ context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE + 1,
+ context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR + 1, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT + 1, context.getTransactionCreationInitialRateLimit());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT + 1,
+ context.getShardBatchedModificationCount());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
+ }
}
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.duration.Duration;
public class ShardTransactionTest extends AbstractActorTest {
public void testShardTransactionInactivity() {
datastoreContext = DatastoreContext.newBuilder().shardTransactionIdleTimeout(
- Duration.create(500, TimeUnit.MILLISECONDS)).build();
+ 500, TimeUnit.MILLISECONDS).build();
new JavaTestKit(getSystem()) {{
final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),