import akka.util.Timeout;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.PeerAddressResolver;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
public static final long DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS = TimeUnit.MILLISECONDS.convert(2, TimeUnit.MINUTES);
public static final int DEFAULT_SHARD_SNAPSHOT_CHUNK_SIZE = 2048000;
- private static Set<String> globalDatastoreTypes = Sets.newConcurrentHashSet();
+ private static final Set<String> globalDatastoreTypes = Sets.newConcurrentHashSet();
private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
private Duration shardTransactionIdleTimeout = DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
private AkkaConfigurationReader configurationReader = DEFAULT_CONFIGURATION_READER;
private long transactionCreationInitialRateLimit = DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT;
private final DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
- private String dataStoreType = UNKNOWN_DATA_STORE_TYPE;
+ private String dataStoreName = UNKNOWN_DATA_STORE_TYPE;
+ private LogicalDatastoreType logicalStoreType = LogicalDatastoreType.OPERATIONAL;
private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
private boolean writeOnlyTransactionOptimizationsEnabled = true;
private long shardCommitQueueExpiryTimeoutInMillis = DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS;
this.persistent = other.persistent;
this.configurationReader = other.configurationReader;
this.transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
- this.dataStoreType = other.dataStoreType;
+ this.dataStoreName = other.dataStoreName;
+ this.logicalStoreType = other.logicalStoreType;
this.shardBatchedModificationCount = other.shardBatchedModificationCount;
this.writeOnlyTransactionOptimizationsEnabled = other.writeOnlyTransactionOptimizationsEnabled;
this.shardCommitQueueExpiryTimeoutInMillis = other.shardCommitQueueExpiryTimeoutInMillis;
return raftConfig.getElectionTimeoutFactor();
}
+ public String getDataStoreName(){
+ return dataStoreName;
+ }
+
+ public LogicalDatastoreType getLogicalStoreType() {
+ return logicalStoreType;
+ }
+
+ /**
+ * @deprecated Use {@link #getDataStoreName()} or {@link #getLogicalStoreType()} instead.
+ */
+ @Deprecated
public String getDataStoreType(){
- return dataStoreType;
+ return getDataStoreName();
}
public long getTransactionCreationInitialRateLimit() {
return this;
}
+ /**
+ * @deprecated Use {@link #logicalStoreType(LogicalDatastoreType)} or {@link #dataStoreName(String)}.
+ */
+ @Deprecated
public Builder dataStoreType(String dataStoreType){
- datastoreContext.dataStoreType = dataStoreType;
- datastoreContext.dataStoreMXBeanType = "Distributed" + WordUtils.capitalize(dataStoreType) + "Datastore";
+ return dataStoreName(dataStoreType);
+ }
+
+ public Builder logicalStoreType(LogicalDatastoreType logicalStoreType){
+ datastoreContext.logicalStoreType = Preconditions.checkNotNull(logicalStoreType);
+
+ // Retain compatible naming
+ switch (logicalStoreType) {
+ case CONFIGURATION:
+ dataStoreName("config");
+ break;
+ case OPERATIONAL:
+ dataStoreName("operational");
+ break;
+ default:
+ dataStoreName(logicalStoreType.name());
+ }
+
+ return this;
+ }
+
+ public Builder dataStoreName(String dataStoreName){
+ datastoreContext.dataStoreName = Preconditions.checkNotNull(dataStoreName);
+ datastoreContext.dataStoreMXBeanType = "Distributed" + WordUtils.capitalize(dataStoreName) + "Datastore";
return this;
}
maxShardDataChangeExecutorPoolSize, maxShardDataChangeExecutorQueueSize,
maxShardDataChangeListenerQueueSize, maxShardDataStoreExecutorQueueSize);
- if(datastoreContext.dataStoreType != null) {
- globalDatastoreTypes.add(datastoreContext.dataStoreType);
+ if(datastoreContext.dataStoreName != null) {
+ globalDatastoreTypes.add(datastoreContext.dataStoreName);
}
return datastoreContext;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DatastoreSnapshotRestore;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.osgi.framework.BundleContext;
public class DistributedConfigDataStoreProviderModule extends
}
DatastoreContext datastoreContext = DatastoreContext.newBuilder()
- .dataStoreType("config")
+ .logicalStoreType(LogicalDatastoreType.CONFIGURATION)
.maxShardDataChangeExecutorPoolSize(props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue())
.maxShardDataChangeExecutorQueueSize(props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue())
.maxShardDataChangeListenerQueueSize(props.getMaxShardDataChangeListenerQueueSize().getValue().intValue())
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DatastoreSnapshotRestore;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.osgi.framework.BundleContext;
public class DistributedOperationalDataStoreProviderModule extends
}
DatastoreContext datastoreContext = DatastoreContext.newBuilder()
- .dataStoreType("operational")
+ .logicalStoreType(LogicalDatastoreType.OPERATIONAL)
.maxShardDataChangeExecutorPoolSize(props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue())
.maxShardDataChangeExecutorQueueSize(props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue())
.maxShardDataChangeListenerQueueSize(props.getMaxShardDataChangeListenerQueueSize().getValue().intValue())
import java.util.Dictionary;
import java.util.Hashtable;
import org.junit.Test;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
/**
@Test
public void testUpdate() {
- DatastoreContext context = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContext context = DatastoreContext.newBuilder().
+ logicalStoreType(LogicalDatastoreType.OPERATIONAL).build();
DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(context );
Dictionary<String, Object> properties = new Hashtable<>();
@Test
public void testUpdateWithInvalidValues() {
- DatastoreContext context = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContext context = DatastoreContext.newBuilder().
+ logicalStoreType(LogicalDatastoreType.OPERATIONAL).build();
DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(context );
Dictionary<String, Object> properties = new Hashtable<>();
properties.put("persistent", "false"); // global setting
properties.put("operational.Persistent", "true"); // operational override
- DatastoreContext operContext = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContext operContext = DatastoreContext.newBuilder().
+ logicalStoreType(LogicalDatastoreType.OPERATIONAL).build();
DatastoreContextIntrospector operIntrospector = new DatastoreContextIntrospector(operContext);
boolean updated = operIntrospector.update(properties);
assertEquals("updated", true, updated);
assertEquals(true, operContext.isPersistent());
assertEquals(333, operContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
- DatastoreContext configContext = DatastoreContext.newBuilder().dataStoreType("config").build();
+ DatastoreContext configContext = DatastoreContext.newBuilder()
+ .logicalStoreType(LogicalDatastoreType.CONFIGURATION).build();
DatastoreContextIntrospector configIntrospector = new DatastoreContextIntrospector(configContext);
updated = configIntrospector.update(properties);
assertEquals("updated", true, updated);
properties.put("config.shard-transaction-idle-timeout-in-minutes", "44"); // config override
properties.put("topology.shard-transaction-idle-timeout-in-minutes", "55"); // global shard override
- DatastoreContext operContext = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContext operContext = DatastoreContext.newBuilder().
+ logicalStoreType(LogicalDatastoreType.OPERATIONAL).build();
DatastoreContextIntrospector operIntrospector = new DatastoreContextIntrospector(operContext);
DatastoreContext shardContext = operIntrospector.newContextFactory().getShardDatastoreContext("topology");
shardContext = operIntrospector.newContextFactory().getShardDatastoreContext("topology");
assertEquals(55, shardContext.getShardTransactionIdleTimeout().toMinutes());
- DatastoreContext configContext = DatastoreContext.newBuilder().dataStoreType("config").build();
+ DatastoreContext configContext = DatastoreContext.newBuilder().
+ logicalStoreType(LogicalDatastoreType.CONFIGURATION).build();
DatastoreContextIntrospector configIntrospector = new DatastoreContextIntrospector(configContext);
configIntrospector.update(properties);
configContext = configIntrospector.getContext();
ClusterWrapper cluster = new ClusterWrapperImpl(getSystem());
Configuration config = new ConfigurationImpl(moduleShardsConfig, "modules.conf");
- datastoreContextBuilder.dataStoreType(typeName);
+ datastoreContextBuilder.dataStoreName(typeName);
DatastoreContext datastoreContext = datastoreContextBuilder.build();
DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
private static String mockShardName;
private final DatastoreContext.Builder datastoreContextBuilder = DatastoreContext.newBuilder().
- dataStoreType(shardMrgIDSuffix).shardInitializationTimeout(600, TimeUnit.MILLISECONDS)
+ dataStoreName(shardMrgIDSuffix).shardInitializationTimeout(600, TimeUnit.MILLISECONDS)
.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(6);
private final Collection<ActorSystem> actorSystems = new ArrayList<>();
static int ID_COUNTER = 1;
static final QName QNAME = QName.create("test", "2015-08-11", "foo");
- private final String dataStoreType = "config" + ID_COUNTER++;
+ private final String dataStoreName = "config" + ID_COUNTER++;
private DistributedDataStore dataStore;
@Before
public void setUp() {
- DatastoreContext datastoreContext = DatastoreContext.newBuilder().dataStoreType(dataStoreType).
+ DatastoreContext datastoreContext = DatastoreContext.newBuilder().dataStoreName(dataStoreName).
shardInitializationTimeout(10, TimeUnit.SECONDS).build();
Configuration configuration = new ConfigurationImpl(new EmptyModuleShardConfigProvider()) {
import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
import org.opendaylight.controller.cluster.raft.utils.EchoActor;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
TestActorRef<MessageCollectorActor> shardManager =
TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class));
- DatastoreContext dataStoreContext = DatastoreContext.newBuilder().dataStoreType("config").
+ DatastoreContext dataStoreContext = DatastoreContext.newBuilder().
+ logicalStoreType(LogicalDatastoreType.CONFIGURATION).
shardLeaderElectionTimeout(100, TimeUnit.MILLISECONDS).build();
final String expPrimaryPath = "akka://test-system/find-primary-shard";
TestActorRef<MessageCollectorActor> shardManager =
TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class));
- DatastoreContext dataStoreContext = DatastoreContext.newBuilder().dataStoreType("config").
+ DatastoreContext dataStoreContext = DatastoreContext.newBuilder().
+ logicalStoreType(LogicalDatastoreType.CONFIGURATION).
shardLeaderElectionTimeout(100, TimeUnit.MILLISECONDS).build();
final DataTree mockDataTree = Mockito.mock(DataTree.class);
TestActorRef<MessageCollectorActor> shardManager =
TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class));
- DatastoreContext dataStoreContext = DatastoreContext.newBuilder().dataStoreType("config").
+ DatastoreContext dataStoreContext = DatastoreContext.newBuilder().
+ logicalStoreType(LogicalDatastoreType.CONFIGURATION).
shardLeaderElectionTimeout(100, TimeUnit.MILLISECONDS).build();
ActorContext actorContext =