import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Stopwatch;
-import com.google.common.base.Ticker;
import com.google.common.base.Verify;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.File;
import java.io.IOException;
-import java.util.AbstractMap.SimpleEntry;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collection;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand;
import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload;
import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload;
import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.store.impl.DataChangeListenerRegistration;
import org.opendaylight.yangtools.concepts.Identifier;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
private static final Timeout COMMIT_STEP_TIMEOUT = new Timeout(Duration.create(5, TimeUnit.SECONDS));
private static final Logger LOG = LoggerFactory.getLogger(ShardDataTree.class);
- private final Map<LocalHistoryIdentifier, ShardDataTreeTransactionChain> transactionChains = new HashMap<>();
+ /**
+ * Process this many transactions in a single batched run. If we exceed this limit, we need to schedule later
+ * execution to finish up the batch. This is necessary in case of a long list of transactions which progress
+ * immediately through their preCommit phase -- if that happens, their completion eats up stack frames and could
+ * result in StackOverflowError.
+ */
+ private static final int MAX_TRANSACTION_BATCH = 100;
+ private final Map<LocalHistoryIdentifier, ShardDataTreeTransactionChain> transactionChains = new HashMap<>();
private final DataTreeCohortActorRegistry cohortRegistry = new DataTreeCohortActorRegistry();
private final Queue<CommitEntry> pendingTransactions = new ArrayDeque<>();
private final Queue<CommitEntry> pendingCommits = new ArrayDeque<>();
private SchemaContext schemaContext;
+ private int currentTransactionBatch;
+
ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TipProducingDataTree dataTree,
final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
final ShardDataChangeListenerPublisher dataChangeListenerPublisher, final String logContext,
@VisibleForTesting
public ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType) {
this(shard, schemaContext, treeType, YangInstanceIdentifier.EMPTY,
- new DefaultShardDataTreeChangeListenerPublisher(), new DefaultShardDataChangeListenerPublisher(), "");
+ new DefaultShardDataTreeChangeListenerPublisher(""),
+ new DefaultShardDataChangeListenerPublisher(""), "");
}
final String logContext() {
return logContext;
}
- final Ticker ticker() {
- return shard.ticker();
+ final long readTime() {
+ return shard.ticker().read();
}
public TipProducingDataTree getDataTree() {
this.schemaContext = Preconditions.checkNotNull(newSchemaContext);
}
+ void resetTransactionBatch() {
+ currentTransactionBatch = 0;
+ }
+
/**
* Take a snapshot of current state for later recovery.
*
dataTree.commit(candidate);
notifyListeners(candidate);
- LOG.debug("{}: state snapshot applied in %s", logContext, elapsed);
+ LOG.debug("{}: state snapshot applied in {}", logContext, elapsed);
}
/**
* pre-Boron state -- which limits the number of options here.
*/
if (payload instanceof CommitTransactionPayload) {
+ final TransactionIdentifier txId;
if (identifier == null) {
final Entry<TransactionIdentifier, DataTreeCandidate> e =
((CommitTransactionPayload) payload).getCandidate();
- applyReplicatedCandidate(e.getKey(), e.getValue());
- allMetadataCommittedTransaction(e.getKey());
+ txId = e.getKey();
+ applyReplicatedCandidate(txId, e.getValue());
} else {
Verify.verify(identifier instanceof TransactionIdentifier);
- payloadReplicationComplete((TransactionIdentifier) identifier);
+ txId = (TransactionIdentifier) identifier;
+ payloadReplicationComplete(txId);
}
+ allMetadataCommittedTransaction(txId);
} else if (payload instanceof AbortTransactionPayload) {
if (identifier != null) {
payloadReplicationComplete((AbortTransactionPayload) payload);
- } else {
- allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
}
+ allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
} else if (payload instanceof PurgeTransactionPayload) {
if (identifier != null) {
payloadReplicationComplete((PurgeTransactionPayload) payload);
- } else {
- allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
}
+ allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
} else if (payload instanceof CloseLocalHistoryPayload) {
if (identifier != null) {
payloadReplicationComplete((CloseLocalHistoryPayload) payload);
- } else {
- allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
}
+ allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
} else if (payload instanceof CreateLocalHistoryPayload) {
if (identifier != null) {
payloadReplicationComplete((CreateLocalHistoryPayload)payload);
- } else {
- allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
}
+ allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
} else if (payload instanceof PurgeLocalHistoryPayload) {
if (identifier != null) {
payloadReplicationComplete((PurgeLocalHistoryPayload)payload);
- } else {
- allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
}
+ allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
} else {
LOG.warn("{}: ignoring unhandled identifier {} payload {}", logContext, identifier, payload);
}
return ret;
}
- ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId) {
+ ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
+ @Nullable final Runnable callback) {
ShardDataTreeTransactionChain chain = transactionChains.get(historyId);
if (chain == null) {
chain = new ShardDataTreeTransactionChain(historyId, this);
transactionChains.put(historyId, chain);
- shard.persistPayload(historyId, CreateLocalHistoryPayload.create(historyId), true);
+ replicatePayload(historyId, CreateLocalHistoryPayload.create(historyId), callback);
+ } else if (callback != null) {
+ callback.run();
}
return chain;
return new ReadOnlyShardDataTreeTransaction(this, txId, dataTree.takeSnapshot());
}
- return ensureTransactionChain(txId.getHistoryId()).newReadOnlyTransaction(txId);
+ return ensureTransactionChain(txId.getHistoryId(), null).newReadOnlyTransaction(txId);
}
ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
.newModification());
}
- return ensureTransactionChain(txId.getHistoryId()).newReadWriteTransaction(txId);
+ return ensureTransactionChain(txId.getHistoryId(), null).newReadWriteTransaction(txId);
}
@VisibleForTesting
public void notifyListeners(final DataTreeCandidate candidate) {
- treeChangeListenerPublisher.publishChanges(candidate, logContext);
- dataChangeListenerPublisher.publishChanges(candidate, logContext);
- }
-
- void notifyOfInitialData(final DataChangeListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
- NormalizedNode<?, ?>>> listenerReg, final Optional<DataTreeCandidate> currentState) {
- if (currentState.isPresent()) {
- ShardDataChangeListenerPublisher localPublisher = dataChangeListenerPublisher.newInstance();
- localPublisher.registerDataChangeListener(listenerReg.getPath(), listenerReg.getInstance(),
- listenerReg.getScope());
- localPublisher.publishChanges(currentState.get(), logContext);
- }
- }
-
- void notifyOfInitialData(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
- final Optional<DataTreeCandidate> currentState) {
- if (currentState.isPresent()) {
- ShardDataTreeChangeListenerPublisher localPublisher = treeChangeListenerPublisher.newInstance();
- localPublisher.registerTreeChangeListener(path, listener);
- localPublisher.publishChanges(currentState.get(), logContext);
- }
+ treeChangeListenerPublisher.publishChanges(candidate);
+ dataChangeListenerPublisher.publishChanges(candidate);
}
/**
replicatePayload(id, PurgeLocalHistoryPayload.create(id), callback);
}
- Entry<DataChangeListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>,
- Optional<DataTreeCandidate>> registerChangeListener(final YangInstanceIdentifier path,
- final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener,
- final DataChangeScope scope) {
- DataChangeListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> reg =
- dataChangeListenerPublisher.registerDataChangeListener(path, listener, scope);
-
- return new SimpleEntry<>(reg, readCurrentData());
+ void registerDataChangeListener(final YangInstanceIdentifier path,
+ final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener,
+ final DataChangeScope scope, final Optional<DataTreeCandidate> initialState,
+ final Consumer<ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>>
+ onRegistration) {
+ dataChangeListenerPublisher.registerDataChangeListener(path, listener, scope, initialState, onRegistration);
}
- private Optional<DataTreeCandidate> readCurrentData() {
+ Optional<DataTreeCandidate> readCurrentData() {
final Optional<NormalizedNode<?, ?>> currentState =
dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY);
return currentState.isPresent() ? Optional.of(DataTreeCandidates.fromNormalizedNode(
YangInstanceIdentifier.EMPTY, currentState.get())) : Optional.<DataTreeCandidate>absent();
}
- public Entry<ListenerRegistration<DOMDataTreeChangeListener>, Optional<DataTreeCandidate>>
- registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener) {
- final ListenerRegistration<DOMDataTreeChangeListener> reg =
- treeChangeListenerPublisher.registerTreeChangeListener(path, listener);
-
- return new SimpleEntry<>(reg, readCurrentData());
+ public void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
+ final Optional<DataTreeCandidate> initialState,
+ final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+ treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration);
}
int getQueueSize() {
return dataTree.takeSnapshot().newModification();
}
- /**
- * Commits a modification.
- *
- * @deprecated This method violates DataTree containment and will be removed.
- */
- @VisibleForTesting
- @Deprecated
- public DataTreeCandidate commit(final DataTreeModification modification) throws DataValidationFailedException {
- // Direct modification commit is a utility, which cannot be used while we have transactions in-flight
- Preconditions.checkState(tip == dataTree, "Cannot modify data tree while transacgitons are pending");
-
- modification.ready();
- dataTree.validate(modification);
- DataTreeCandidate candidate = dataTree.prepare(modification);
- dataTree.commit(candidate);
- return candidate;
- }
-
public Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
Collection<ShardDataTreeCohort> ret = new ArrayList<>(getQueueSize());
return ret;
}
+ /**
+ * Called some time after {@link #processNextPendingTransaction()} decides to stop processing.
+ */
+ void resumeNextPendingTransaction() {
+ LOG.debug("{}: attempting to resume transaction processing", logContext);
+ processNextPending();
+ }
+
@SuppressWarnings("checkstyle:IllegalCatch")
private void processNextPendingTransaction() {
+ ++currentTransactionBatch;
+ if (currentTransactionBatch > MAX_TRANSACTION_BATCH) {
+ LOG.debug("{}: Already processed {}, scheduling continuation", logContext, currentTransactionBatch);
+ shard.scheduleNextPendingTransaction();
+ return;
+ }
+
processNextPending(pendingTransactions, State.CAN_COMMIT_PENDING, entry -> {
final SimpleShardDataTreeCohort cohort = entry.cohort;
final DataTreeModification modification = cohort.getDataTreeModification();
LOG.debug("{}: Validating transaction {}", logContext, cohort.getIdentifier());
Exception cause;
try {
+ cohort.throwCanCommitFailure();
+
tip.validate(modification);
LOG.debug("{}: Transaction {} validated", logContext, cohort.getIdentifier());
cohort.successfulCanCommit();
- entry.lastAccess = ticker().read();
+ entry.lastAccess = readTime();
return;
} catch (ConflictingModificationAppliedException e) {
LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.getIdentifier(),
// Set the tip of the data tree.
tip = Verify.verifyNotNull(candidate);
- entry.lastAccess = ticker().read();
+ entry.lastAccess = readTime();
pendingTransactions.remove();
pendingCommits.add(entry);
cohortRegistry.process(sender, message);
}
+ @Override
+ ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+ final Exception failure) {
+ SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort.DeadOnArrival(this, mod, txId, failure);
+ pendingTransactions.add(new CommitEntry(cohort, readTime()));
+ return cohort;
+ }
+
@Override
ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId,
- final DataTreeModification modification) {
- SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, modification, txId,
+ final DataTreeModification mod) {
+ SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort.Normal(this, mod, txId,
cohortRegistry.createCohort(schemaContext, txId, COMMIT_STEP_TIMEOUT));
- pendingTransactions.add(new CommitEntry(cohort, ticker().read()));
+ pendingTransactions.add(new CommitEntry(cohort, readTime()));
return cohort;
}
+ // Exposed for ShardCommitCoordinator so it does not have deal with local histories (it does not care), this mimics
+ // the newReadWriteTransaction()
+ ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod) {
+ if (txId.getHistoryId().getHistoryId() == 0) {
+ return createReadyCohort(txId, mod);
+ }
+
+ return ensureTransactionChain(txId.getHistoryId(), null).createReadyCohort(txId, mod);
+ }
+
@SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.")
void checkForExpiredTransactions(final long transactionCommitTimeoutMillis) {
final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
- final long now = ticker().read();
+ final long now = readTime();
final Queue<CommitEntry> currentQueue = !pendingFinishCommits.isEmpty() ? pendingFinishCommits :
!pendingCommits.isEmpty() ? pendingCommits : pendingTransactions;
runOnPendingTransactionsComplete = null;
}
}
+
+ ShardStats getStats() {
+ return shard.getShardMBean();
+ }
}