import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Stopwatch;
-import com.google.common.base.Ticker;
import com.google.common.base.Verify;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.File;
import java.io.IOException;
-import java.util.AbstractMap.SimpleEntry;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collection;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand;
import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload;
import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload;
import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.store.impl.DataChangeListenerRegistration;
import org.opendaylight.yangtools.concepts.Identifier;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
private static final Timeout COMMIT_STEP_TIMEOUT = new Timeout(Duration.create(5, TimeUnit.SECONDS));
private static final Logger LOG = LoggerFactory.getLogger(ShardDataTree.class);
- private final Map<LocalHistoryIdentifier, ShardDataTreeTransactionChain> transactionChains = new HashMap<>();
+ /**
+ * Process this many transactions in a single batched run. If we exceed this limit, we need to schedule later
+ * execution to finish up the batch. This is necessary in case of a long list of transactions which progress
+ * immediately through their preCommit phase -- if that happens, their completion eats up stack frames and could
+ * result in StackOverflowError.
+ */
+ private static final int MAX_TRANSACTION_BATCH = 100;
+ private final Map<LocalHistoryIdentifier, ShardDataTreeTransactionChain> transactionChains = new HashMap<>();
private final DataTreeCohortActorRegistry cohortRegistry = new DataTreeCohortActorRegistry();
private final Queue<CommitEntry> pendingTransactions = new ArrayDeque<>();
private final Queue<CommitEntry> pendingCommits = new ArrayDeque<>();
private SchemaContext schemaContext;
+ private int currentTransactionBatch;
+
ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TipProducingDataTree dataTree,
final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
final ShardDataChangeListenerPublisher dataChangeListenerPublisher, final String logContext,
@VisibleForTesting
public ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType) {
this(shard, schemaContext, treeType, YangInstanceIdentifier.EMPTY,
- new DefaultShardDataTreeChangeListenerPublisher(), new DefaultShardDataChangeListenerPublisher(), "");
+ new DefaultShardDataTreeChangeListenerPublisher(""),
+ new DefaultShardDataChangeListenerPublisher(""), "");
}
final String logContext() {
return logContext;
}
- final Ticker ticker() {
- return shard.ticker();
+ final long readTime() {
+ return shard.ticker().read();
}
public TipProducingDataTree getDataTree() {
this.schemaContext = Preconditions.checkNotNull(newSchemaContext);
}
+ void resetTransactionBatch() {
+ currentTransactionBatch = 0;
+ }
+
/**
* Take a snapshot of current state for later recovery.
*
dataTree.commit(candidate);
notifyListeners(candidate);
- LOG.debug("{}: state snapshot applied in %s", logContext, elapsed);
+ LOG.debug("{}: state snapshot applied in {}", logContext, elapsed);
}
/**
* pre-Boron state -- which limits the number of options here.
*/
if (payload instanceof CommitTransactionPayload) {
+ final TransactionIdentifier txId;
if (identifier == null) {
final Entry<TransactionIdentifier, DataTreeCandidate> e =
((CommitTransactionPayload) payload).getCandidate();
- applyReplicatedCandidate(e.getKey(), e.getValue());
- allMetadataCommittedTransaction(e.getKey());
+ txId = e.getKey();
+ applyReplicatedCandidate(txId, e.getValue());
} else {
Verify.verify(identifier instanceof TransactionIdentifier);
- payloadReplicationComplete((TransactionIdentifier) identifier);
+ txId = (TransactionIdentifier) identifier;
+ payloadReplicationComplete(txId);
}
+ allMetadataCommittedTransaction(txId);
} else if (payload instanceof AbortTransactionPayload) {
if (identifier != null) {
payloadReplicationComplete((AbortTransactionPayload) payload);
- } else {
- allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
}
+ allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
} else if (payload instanceof PurgeTransactionPayload) {
if (identifier != null) {
payloadReplicationComplete((PurgeTransactionPayload) payload);
- } else {
- allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
}
+ allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
} else if (payload instanceof CloseLocalHistoryPayload) {
if (identifier != null) {
payloadReplicationComplete((CloseLocalHistoryPayload) payload);
- } else {
- allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
- }
- } else if (payload instanceof CloseLocalHistoryPayload) {
- if (identifier != null) {
- payloadReplicationComplete((CloseLocalHistoryPayload) payload);
- } else {
- allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
}
+ allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
} else if (payload instanceof CreateLocalHistoryPayload) {
if (identifier != null) {
payloadReplicationComplete((CreateLocalHistoryPayload)payload);
- } else {
- allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
}
+ allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
} else if (payload instanceof PurgeLocalHistoryPayload) {
if (identifier != null) {
payloadReplicationComplete((PurgeLocalHistoryPayload)payload);
- } else {
- allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
}
+ allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
} else {
LOG.warn("{}: ignoring unhandled identifier {} payload {}", logContext, identifier, payload);
}
return ret;
}
- ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId) {
+ ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
+ @Nullable final Runnable callback) {
ShardDataTreeTransactionChain chain = transactionChains.get(historyId);
if (chain == null) {
chain = new ShardDataTreeTransactionChain(historyId, this);
transactionChains.put(historyId, chain);
- shard.persistPayload(historyId, CreateLocalHistoryPayload.create(historyId), true);
+ replicatePayload(historyId, CreateLocalHistoryPayload.create(historyId), callback);
+ } else if (callback != null) {
+ callback.run();
}
return chain;
return new ReadOnlyShardDataTreeTransaction(this, txId, dataTree.takeSnapshot());
}
- return ensureTransactionChain(txId.getHistoryId()).newReadOnlyTransaction(txId);
+ return ensureTransactionChain(txId.getHistoryId(), null).newReadOnlyTransaction(txId);
}
ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
.newModification());
}
- return ensureTransactionChain(txId.getHistoryId()).newReadWriteTransaction(txId);
+ return ensureTransactionChain(txId.getHistoryId(), null).newReadWriteTransaction(txId);
}
@VisibleForTesting
public void notifyListeners(final DataTreeCandidate candidate) {
- treeChangeListenerPublisher.publishChanges(candidate, logContext);
- dataChangeListenerPublisher.publishChanges(candidate, logContext);
- }
-
- void notifyOfInitialData(final DataChangeListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
- NormalizedNode<?, ?>>> listenerReg, final Optional<DataTreeCandidate> currentState) {
- if (currentState.isPresent()) {
- ShardDataChangeListenerPublisher localPublisher = dataChangeListenerPublisher.newInstance();
- localPublisher.registerDataChangeListener(listenerReg.getPath(), listenerReg.getInstance(),
- listenerReg.getScope());
- localPublisher.publishChanges(currentState.get(), logContext);
- }
- }
-
- void notifyOfInitialData(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
- final Optional<DataTreeCandidate> currentState) {
- if (currentState.isPresent()) {
- ShardDataTreeChangeListenerPublisher localPublisher = treeChangeListenerPublisher.newInstance();
- localPublisher.registerTreeChangeListener(path, listener);
- localPublisher.publishChanges(currentState.get(), logContext);
- }
+ treeChangeListenerPublisher.publishChanges(candidate);
+ dataChangeListenerPublisher.publishChanges(candidate);
}
/**
replicatePayload(id, PurgeLocalHistoryPayload.create(id), callback);
}
- Entry<DataChangeListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>,
- Optional<DataTreeCandidate>> registerChangeListener(final YangInstanceIdentifier path,
- final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener,
- final DataChangeScope scope) {
- DataChangeListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> reg =
- dataChangeListenerPublisher.registerDataChangeListener(path, listener, scope);
-
- return new SimpleEntry<>(reg, readCurrentData());
+ void registerDataChangeListener(final YangInstanceIdentifier path,
+ final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener,
+ final DataChangeScope scope, final Optional<DataTreeCandidate> initialState,
+ final Consumer<ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>>
+ onRegistration) {
+ dataChangeListenerPublisher.registerDataChangeListener(path, listener, scope, initialState, onRegistration);
}
- private Optional<DataTreeCandidate> readCurrentData() {
+ Optional<DataTreeCandidate> readCurrentData() {
final Optional<NormalizedNode<?, ?>> currentState =
dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY);
return currentState.isPresent() ? Optional.of(DataTreeCandidates.fromNormalizedNode(
YangInstanceIdentifier.EMPTY, currentState.get())) : Optional.<DataTreeCandidate>absent();
}
- public Entry<ListenerRegistration<DOMDataTreeChangeListener>, Optional<DataTreeCandidate>>
- registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener) {
- final ListenerRegistration<DOMDataTreeChangeListener> reg =
- treeChangeListenerPublisher.registerTreeChangeListener(path, listener);
-
- return new SimpleEntry<>(reg, readCurrentData());
+ public void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
+ final Optional<DataTreeCandidate> initialState,
+ final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+ treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration);
}
int getQueueSize() {
return ret;
}
+ /**
+ * Called some time after {@link #processNextPendingTransaction()} decides to stop processing.
+ */
+ void resumeNextPendingTransaction() {
+ LOG.debug("{}: attempting to resume transaction processing", logContext);
+ processNextPending();
+ }
+
@SuppressWarnings("checkstyle:IllegalCatch")
private void processNextPendingTransaction() {
+ ++currentTransactionBatch;
+ if (currentTransactionBatch > MAX_TRANSACTION_BATCH) {
+ LOG.debug("{}: Already processed {}, scheduling continuation", logContext, currentTransactionBatch);
+ shard.scheduleNextPendingTransaction();
+ return;
+ }
+
processNextPending(pendingTransactions, State.CAN_COMMIT_PENDING, entry -> {
final SimpleShardDataTreeCohort cohort = entry.cohort;
final DataTreeModification modification = cohort.getDataTreeModification();
tip.validate(modification);
LOG.debug("{}: Transaction {} validated", logContext, cohort.getIdentifier());
cohort.successfulCanCommit();
- entry.lastAccess = ticker().read();
+ entry.lastAccess = readTime();
return;
} catch (ConflictingModificationAppliedException e) {
LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.getIdentifier(),
}
void startCanCommit(final SimpleShardDataTreeCohort cohort) {
- final SimpleShardDataTreeCohort current = pendingTransactions.peek().cohort;
- if (!cohort.equals(current)) {
+ final CommitEntry head = pendingTransactions.peek();
+ if (head == null) {
+ LOG.warn("{}: No transactions enqueued while attempting to start canCommit on {}", logContext, cohort);
+ return;
+ }
+ if (!cohort.equals(head.cohort)) {
LOG.debug("{}: Transaction {} scheduled for canCommit step", logContext, cohort.getIdentifier());
return;
}
// Set the tip of the data tree.
tip = Verify.verifyNotNull(candidate);
- entry.lastAccess = ticker().read();
+ entry.lastAccess = readTime();
pendingTransactions.remove();
pendingCommits.add(entry);
}
@Override
- ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId,
- final DataTreeModification modification) {
- SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, modification, txId,
+ ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+ final Exception failure) {
+ final SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId, failure);
+ pendingTransactions.add(new CommitEntry(cohort, readTime()));
+ return cohort;
+ }
+
+ @Override
+ ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod) {
+ SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId,
cohortRegistry.createCohort(schemaContext, txId, COMMIT_STEP_TIMEOUT));
- pendingTransactions.add(new CommitEntry(cohort, ticker().read()));
+ pendingTransactions.add(new CommitEntry(cohort, readTime()));
return cohort;
}
+ // Exposed for ShardCommitCoordinator so it does not have deal with local histories (it does not care), this mimics
+ // the newReadWriteTransaction()
+ ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod) {
+ if (txId.getHistoryId().getHistoryId() == 0) {
+ return createReadyCohort(txId, mod);
+ }
+
+ return ensureTransactionChain(txId.getHistoryId(), null).createReadyCohort(txId, mod);
+ }
+
@SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.")
void checkForExpiredTransactions(final long transactionCommitTimeoutMillis) {
final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
- final long now = ticker().read();
+ final long now = readTime();
final Queue<CommitEntry> currentQueue = !pendingFinishCommits.isEmpty() ? pendingFinishCommits :
!pendingCommits.isEmpty() ? pendingCommits : pendingTransactions;
final CommitEntry currentTx = currentQueue.peek();
if (currentTx != null && currentTx.lastAccess + timeout < now) {
+ final State state = currentTx.cohort.getState();
LOG.warn("{}: Current transaction {} has timed out after {} ms in state {}", logContext,
- currentTx.cohort.getIdentifier(), transactionCommitTimeoutMillis, currentTx.cohort.getState());
+ currentTx.cohort.getIdentifier(), transactionCommitTimeoutMillis, state);
boolean processNext = true;
- switch (currentTx.cohort.getState()) {
+ final TimeoutException cohortFailure = new TimeoutException("Backend timeout in state " + state + " after "
+ + transactionCommitTimeoutMillis + "ms");
+
+ switch (state) {
case CAN_COMMIT_PENDING:
- currentQueue.remove().cohort.failedCanCommit(new TimeoutException());
+ currentQueue.remove().cohort.failedCanCommit(cohortFailure);
break;
case CAN_COMMIT_COMPLETE:
// The suppression of the FindBugs "DB_DUPLICATE_SWITCH_CLAUSES" warning pertains to this clause
// whose code is duplicated with PRE_COMMIT_COMPLETE. The clauses aren't combined in case the code
// in PRE_COMMIT_COMPLETE is changed.
- currentQueue.remove().cohort.reportFailure(new TimeoutException());
+ currentQueue.remove().cohort.reportFailure(cohortFailure);
break;
case PRE_COMMIT_PENDING:
- currentQueue.remove().cohort.failedPreCommit(new TimeoutException());
+ currentQueue.remove().cohort.failedPreCommit(cohortFailure);
break;
case PRE_COMMIT_COMPLETE:
// FIXME: this is a legacy behavior problem. Three-phase commit protocol specifies that after we
// In order to make the pre-commit timer working across failovers, though, we need
// a per-shard cluster-wide monotonic time, so a follower becoming the leader can accurately
// restart the timer.
- currentQueue.remove().cohort.reportFailure(new TimeoutException());
+ currentQueue.remove().cohort.reportFailure(cohortFailure);
break;
case COMMIT_PENDING:
LOG.warn("{}: Transaction {} is still committing, cannot abort", logContext,
currentTx.lastAccess = now;
processNext = false;
return;
+ case READY:
+ currentQueue.remove().cohort.reportFailure(cohortFailure);
+ break;
case ABORTED:
case COMMITTED:
case FAILED:
- case READY:
default:
currentQueue.remove();
}
runOnPendingTransactionsComplete = null;
}
}
+
+ ShardStats getStats() {
+ return shard.getShardMBean();
+ }
+
+ Iterator<SimpleShardDataTreeCohort> cohortIterator() {
+ return Iterables.transform(Iterables.concat(pendingFinishCommits, pendingCommits, pendingTransactions),
+ e -> e.cohort).iterator();
+ }
+
+ void removeTransactionChain(final LocalHistoryIdentifier id) {
+ if (transactionChains.remove(id) != null) {
+ LOG.debug("{}: Removed transaction chain {}", logContext, id);
+ }
+ }
}