X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatastore%2FDatastoreContext.java;h=98afd7f4e02fc76354fd22765e75ed2ba8b98123;hb=refs%2Fchanges%2F33%2F78433%2F4;hp=02f2768fbb2a74f3fce79994800dbfb88da774d9;hpb=b25ae9347455b1bae8f25424a9ceffc017f2f0db;p=controller.git diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java index 02f2768fbb..98afd7f4e0 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java @@ -5,7 +5,6 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore; import akka.util.Timeout; @@ -14,16 +13,19 @@ import com.google.common.base.Preconditions; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; -import org.apache.commons.lang3.text.WordUtils; +import org.apache.commons.text.WordUtils; +import org.opendaylight.controller.cluster.access.client.AbstractClientConnection; +import org.opendaylight.controller.cluster.access.client.ClientActorConfig; import org.opendaylight.controller.cluster.common.actor.AkkaConfigurationReader; import org.opendaylight.controller.cluster.common.actor.FileAkkaConfigurationReader; import org.opendaylight.controller.cluster.raft.ConfigParams; import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl; import org.opendaylight.controller.cluster.raft.PeerAddressResolver; -import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; -import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties; +import org.opendaylight.mdsal.common.api.LogicalDatastoreType; +import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStoreConfigProperties; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import scala.concurrent.duration.Duration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import scala.concurrent.duration.FiniteDuration; /** @@ -31,10 +33,12 @@ import scala.concurrent.duration.FiniteDuration; * * @author Thomas Pantelis */ -public class DatastoreContext { +// Non-final for mocking +public class DatastoreContext implements ClientActorConfig { public static final String METRICS_DOMAIN = "org.opendaylight.controller.cluster.datastore"; - public static final Duration DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT = Duration.create(10, TimeUnit.MINUTES); + public static final FiniteDuration DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT = FiniteDuration.create(10, + TimeUnit.MINUTES); public static final int DEFAULT_OPERATION_TIMEOUT_IN_MS = 5000; public static final int DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS = 30; public static final int DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE = 1; @@ -54,16 +58,19 @@ public class DatastoreContext { public static final int DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT = 1000; public static final long DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS = TimeUnit.MILLISECONDS.convert(2, TimeUnit.MINUTES); - public static final int DEFAULT_SHARD_SNAPSHOT_CHUNK_SIZE = 2048000; + public static final int DEFAULT_MAX_MESSAGE_SLICE_SIZE = 2048 * 1000; // 2MB + public static final int DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY = 512; public static final long DEFAULT_SYNC_INDEX_THRESHOLD = 10; + private static final Logger LOG = LoggerFactory.getLogger(DatastoreContext.class); + private static final Set GLOBAL_DATASTORE_NAMES = ConcurrentHashMap.newKeySet(); private final DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl(); private InMemoryDOMDataStoreConfigProperties dataStoreProperties; - private Duration shardTransactionIdleTimeout = DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT; + private FiniteDuration shardTransactionIdleTimeout = DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT; private long operationTimeoutInMillis = DEFAULT_OPERATION_TIMEOUT_IN_MS; private String dataStoreMXBeanType; private int shardTransactionCommitTimeoutInSeconds = DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS; @@ -82,20 +89,25 @@ public class DatastoreContext { private boolean useTellBasedProtocol = false; private boolean transactionDebugContextEnabled = false; private String shardManagerPersistenceId; + private int maximumMessageSliceSize = DEFAULT_MAX_MESSAGE_SLICE_SIZE; + private long backendAlivenessTimerInterval = AbstractClientConnection.DEFAULT_BACKEND_ALIVE_TIMEOUT_NANOS; + private long requestTimeout = AbstractClientConnection.DEFAULT_REQUEST_TIMEOUT_NANOS; + private long noProgressTimeout = AbstractClientConnection.DEFAULT_NO_PROGRESS_TIMEOUT_NANOS; + private int initialPayloadSerializedBufferCapacity = DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY; public static Set getGlobalDatastoreNames() { return GLOBAL_DATASTORE_NAMES; } - private DatastoreContext() { + DatastoreContext() { setShardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE); setSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT); setHeartbeatInterval(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS); setIsolatedLeaderCheckInterval(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS); setSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE); setElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR); - setShardSnapshotChunkSize(DEFAULT_SHARD_SNAPSHOT_CHUNK_SIZE); setSyncIndexThreshold(DEFAULT_SYNC_INDEX_THRESHOLD); + setMaximumMessageSliceSize(DEFAULT_MAX_MESSAGE_SLICE_SIZE); } private DatastoreContext(final DatastoreContext other) { @@ -119,6 +131,10 @@ public class DatastoreContext { this.transactionDebugContextEnabled = other.transactionDebugContextEnabled; this.shardManagerPersistenceId = other.shardManagerPersistenceId; this.useTellBasedProtocol = other.useTellBasedProtocol; + this.backendAlivenessTimerInterval = other.backendAlivenessTimerInterval; + this.requestTimeout = other.requestTimeout; + this.noProgressTimeout = other.noProgressTimeout; + this.initialPayloadSerializedBufferCapacity = other.initialPayloadSerializedBufferCapacity; setShardJournalRecoveryLogBatchSize(other.raftConfig.getJournalRecoveryLogBatchSize()); setSnapshotBatchCount(other.raftConfig.getSnapshotBatchCount()); @@ -127,6 +143,7 @@ public class DatastoreContext { setSnapshotDataThresholdPercentage(other.raftConfig.getSnapshotDataThresholdPercentage()); setElectionTimeoutFactor(other.raftConfig.getElectionTimeoutFactor()); setCustomRaftPolicyImplementation(other.raftConfig.getCustomRaftPolicyImplementationClass()); + setMaximumMessageSliceSize(other.getMaximumMessageSliceSize()); setShardSnapshotChunkSize(other.raftConfig.getSnapshotChunkSize()); setPeerAddressResolver(other.raftConfig.getPeerAddressResolver()); setTempFileDirectory(other.getTempFileDirectory()); @@ -146,7 +163,7 @@ public class DatastoreContext { return dataStoreProperties; } - public Duration getShardTransactionIdleTimeout() { + public FiniteDuration getShardTransactionIdleTimeout() { return shardTransactionIdleTimeout; } @@ -210,6 +227,7 @@ public class DatastoreContext { return shardManagerPersistenceId; } + @Override public String getTempFileDirectory() { return raftConfig.getTempFileDirectory(); } @@ -218,6 +236,7 @@ public class DatastoreContext { raftConfig.setTempFileDirectory(tempFileDirectory); } + @Override public int getFileBackedStreamingThreshold() { return raftConfig.getFileBackedStreamingThreshold(); } @@ -263,8 +282,18 @@ public class DatastoreContext { raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount); } + @Deprecated private void setShardSnapshotChunkSize(final int shardSnapshotChunkSize) { - raftConfig.setSnapshotChunkSize(shardSnapshotChunkSize); + // We'll honor the shardSnapshotChunkSize setting for backwards compatibility but only if it doesn't exceed + // maximumMessageSliceSize. + if (shardSnapshotChunkSize < maximumMessageSliceSize) { + raftConfig.setSnapshotChunkSize(shardSnapshotChunkSize); + } + } + + private void setMaximumMessageSliceSize(final int maximumMessageSliceSize) { + raftConfig.setSnapshotChunkSize(maximumMessageSliceSize); + this.maximumMessageSliceSize = maximumMessageSliceSize; } private void setSyncIndexThreshold(final long syncIndexThreshold) { @@ -291,8 +320,28 @@ public class DatastoreContext { return useTellBasedProtocol; } - public int getShardSnapshotChunkSize() { - return raftConfig.getSnapshotChunkSize(); + @Override + public int getMaximumMessageSliceSize() { + return maximumMessageSliceSize; + } + + @Override + public long getBackendAlivenessTimerInterval() { + return backendAlivenessTimerInterval; + } + + @Override + public long getRequestTimeout() { + return requestTimeout; + } + + @Override + public long getNoProgressTimeout() { + return noProgressTimeout; + } + + public int getInitialPayloadSerializedBufferCapacity() { + return initialPayloadSerializedBufferCapacity; } public static class Builder implements org.opendaylight.yangtools.concepts.Builder { @@ -306,7 +355,7 @@ public class DatastoreContext { private int maxShardDataStoreExecutorQueueSize = InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE; - private Builder(final DatastoreContext datastoreContext) { + Builder(final DatastoreContext datastoreContext) { this.datastoreContext = datastoreContext; if (datastoreContext.getDataStoreProperties() != null) { @@ -333,7 +382,7 @@ public class DatastoreContext { public Builder shardTransactionIdleTimeout(final long timeout, final TimeUnit unit) { - datastoreContext.shardTransactionIdleTimeout = Duration.create(timeout, unit); + datastoreContext.shardTransactionIdleTimeout = FiniteDuration.create(timeout, unit); return this; } @@ -484,23 +533,23 @@ public class DatastoreContext { return this; } - public Builder maxShardDataChangeExecutorPoolSize(final int maxShardDataChangeExecutorPoolSize) { - this.maxShardDataChangeExecutorPoolSize = maxShardDataChangeExecutorPoolSize; + public Builder maxShardDataChangeExecutorPoolSize(final int newMaxShardDataChangeExecutorPoolSize) { + this.maxShardDataChangeExecutorPoolSize = newMaxShardDataChangeExecutorPoolSize; return this; } - public Builder maxShardDataChangeExecutorQueueSize(final int maxShardDataChangeExecutorQueueSize) { - this.maxShardDataChangeExecutorQueueSize = maxShardDataChangeExecutorQueueSize; + public Builder maxShardDataChangeExecutorQueueSize(final int newMaxShardDataChangeExecutorQueueSize) { + this.maxShardDataChangeExecutorQueueSize = newMaxShardDataChangeExecutorQueueSize; return this; } - public Builder maxShardDataChangeListenerQueueSize(final int maxShardDataChangeListenerQueueSize) { - this.maxShardDataChangeListenerQueueSize = maxShardDataChangeListenerQueueSize; + public Builder maxShardDataChangeListenerQueueSize(final int newMaxShardDataChangeListenerQueueSize) { + this.maxShardDataChangeListenerQueueSize = newMaxShardDataChangeListenerQueueSize; return this; } - public Builder maxShardDataStoreExecutorQueueSize(final int maxShardDataStoreExecutorQueueSize) { - this.maxShardDataStoreExecutorQueueSize = maxShardDataStoreExecutorQueueSize; + public Builder maxShardDataStoreExecutorQueueSize(final int newMaxShardDataStoreExecutorQueueSize) { + this.maxShardDataStoreExecutorQueueSize = newMaxShardDataStoreExecutorQueueSize; return this; } @@ -523,11 +572,19 @@ public class DatastoreContext { return this; } + @Deprecated public Builder shardSnapshotChunkSize(final int shardSnapshotChunkSize) { + LOG.warn("The shard-snapshot-chunk-size configuration parameter is deprecated - " + + "use maximum-message-slice-size instead"); datastoreContext.setShardSnapshotChunkSize(shardSnapshotChunkSize); return this; } + public Builder maximumMessageSliceSize(final int maximumMessageSliceSize) { + datastoreContext.setMaximumMessageSliceSize(maximumMessageSliceSize); + return this; + } + public Builder shardPeerAddressResolver(final PeerAddressResolver resolver) { datastoreContext.setPeerAddressResolver(resolver); return this; @@ -538,7 +595,7 @@ public class DatastoreContext { return this; } - public Builder fileBackedStreamingThresholdInMegabytes(final int fileBackedStreamingThreshold) { + public Builder fileBackedStreamingThresholdInMegabytes(final int fileBackedStreamingThreshold) { datastoreContext.setFileBackedStreamingThreshold(fileBackedStreamingThreshold * ConfigParams.MEGABYTE); return this; } @@ -548,11 +605,34 @@ public class DatastoreContext { return this; } + public Builder backendAlivenessTimerIntervalInSeconds(final long interval) { + datastoreContext.backendAlivenessTimerInterval = TimeUnit.SECONDS.toNanos(interval); + return this; + } + + public Builder frontendRequestTimeoutInSeconds(final long timeout) { + datastoreContext.requestTimeout = TimeUnit.SECONDS.toNanos(timeout); + return this; + } + + public Builder frontendNoProgressTimeoutInSeconds(final long timeout) { + datastoreContext.noProgressTimeout = TimeUnit.SECONDS.toNanos(timeout); + return this; + } + + public Builder initialPayloadSerializedBufferCapacity(final int capacity) { + datastoreContext.initialPayloadSerializedBufferCapacity = capacity; + return this; + } + @Override public DatastoreContext build() { - datastoreContext.dataStoreProperties = InMemoryDOMDataStoreConfigProperties.create( - maxShardDataChangeExecutorPoolSize, maxShardDataChangeExecutorQueueSize, - maxShardDataChangeListenerQueueSize, maxShardDataStoreExecutorQueueSize); + datastoreContext.dataStoreProperties = InMemoryDOMDataStoreConfigProperties.builder() + .maxDataChangeExecutorPoolSize(maxShardDataChangeExecutorPoolSize) + .maxDataChangeExecutorQueueSize(maxShardDataChangeExecutorQueueSize) + .maxDataChangeListenerQueueSize(maxShardDataChangeListenerQueueSize) + .maxDataStoreExecutorQueueSize(maxShardDataStoreExecutorQueueSize) + .build(); if (datastoreContext.dataStoreName != null) { GLOBAL_DATASTORE_NAMES.add(datastoreContext.dataStoreName);