<feature name='odl-netconf-util' version='${project.version}'>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<bundle>mvn:org.opendaylight.yangtools/yang-model-api/${yangtools.version}</bundle>
+ <bundle>mvn:org.opendaylight.yangtools/yang-data-api/${yangtools.version}</bundle>
<bundle>mvn:org.opendaylight.controller/netconf-util/${project.version}</bundle>
</feature>
<feature name='odl-netconf-impl' version='${project.version}' description="OpenDaylight :: Netconf :: Impl">
hosttracker.keyscheme=IP
# LISP Flow Mapping configuration
-# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings
+# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings (default: true)
lisp.mappingOverwrite = true
-# Enable the Solicit-Map-Request (SMR) mechanism
-lisp.smr = false
+# Enable the Solicit-Map-Request (SMR) mechanism (default: true)
+lisp.smr = true
+# Choose policy for Explicit Locator Path (ELP) handling
+# There are three options:
+# default: don't add or remove locator records, return mapping as-is
+# both: keep the ELP, but add the next hop as a standalone non-LCAF locator with a lower priority
+# replace: remove the ELP, add the next hop as a standalone non-LCAF locator
+lisp.elpPolicy = default
import java.io.ObjectOutputStream;
import java.util.HashMap;
import java.util.Map;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
import org.opendaylight.controller.cluster.example.messages.PrintRole;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.RaftActor;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
/**
* A sample actor showing how the RaftActor is to be extended
*/
-public class ExampleActor extends RaftActor {
+public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort, RaftActorSnapshotCohort {
- private final Map<String, String> state = new HashMap();
+ private final Map<String, String> state = new HashMap<>();
private long persistIdentifier = 1;
private final Optional<ActorRef> roleChangeNotifier;
}
}
- @Override protected void createSnapshot() {
+ @Override
+ public void createSnapshot(ActorRef actorRef) {
ByteString bs = null;
try {
bs = fromObject(state);
getSelf().tell(new CaptureSnapshotReply(bs.toByteArray()), null);
}
- @Override protected void applySnapshot(byte [] snapshot) {
+ @Override
+ public void applySnapshot(byte [] snapshot) {
state.clear();
try {
- state.putAll((HashMap) toObject(snapshot));
+ state.putAll((HashMap<String, String>) toObject(snapshot));
} catch (Exception e) {
LOG.error("Exception in applying snapshot", e);
}
if(LOG.isDebugEnabled()) {
- LOG.debug("Snapshot applied to state : {}", ((HashMap) state).size());
+ LOG.debug("Snapshot applied to state : {}", ((HashMap<?, ?>) state).size());
}
}
}
@Override
- protected void startLogRecoveryBatch(int maxBatchSize) {
+ @Nonnull
+ protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+ return this;
+ }
+
+ @Override
+ public void startLogRecoveryBatch(int maxBatchSize) {
+ }
+
+ @Override
+ public void appendRecoveredLogEntry(Payload data) {
}
@Override
- protected void appendRecoveredLogEntry(Payload data) {
+ public void applyCurrentLogRecoveryBatch() {
}
@Override
- protected void applyCurrentLogRecoveryBatch() {
+ public void onRecoveryComplete() {
}
@Override
- protected void onRecoveryComplete() {
+ public void applyRecoverySnapshot(byte[] snapshot) {
}
@Override
- protected void applyRecoverySnapshot(byte[] snapshot) {
+ protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+ return this;
}
}
}
// override this method to return the protobuff related extension fields and their values
- @Override public Map<GeneratedMessage.GeneratedExtension, String> encode() {
- Map<GeneratedMessage.GeneratedExtension, String> map = new HashMap<>();
+ @Override public Map<GeneratedMessage.GeneratedExtension<?, ?>, String> encode() {
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, String> map = new HashMap<>();
map.put(KeyValueMessages.key, getKey());
map.put(KeyValueMessages.value, getValue());
return map;
*/
package org.opendaylight.controller.cluster.raft;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
// We define this as ArrayList so we can use ensureCapacity.
- protected ArrayList<ReplicatedLogEntry> journal;
+ private ArrayList<ReplicatedLogEntry> journal;
private long snapshotIndex = -1;
private long snapshotTerm = -1;
private ArrayList<ReplicatedLogEntry> snapshottedJournal;
private long previousSnapshotIndex = -1;
private long previousSnapshotTerm = -1;
- protected int dataSize = 0;
+ private int dataSize = 0;
public AbstractReplicatedLogImpl(long snapshotIndex,
long snapshotTerm, List<ReplicatedLogEntry> unAppliedEntries) {
this.snapshotIndex = snapshotIndex;
this.snapshotTerm = snapshotTerm;
this.journal = new ArrayList<>(unAppliedEntries);
+
+ for(ReplicatedLogEntry entry: journal) {
+ dataSize += entry.size();
+ }
}
public AbstractReplicatedLogImpl() {
}
@Override
- public void removeFrom(long logEntryIndex) {
+ public long removeFrom(long logEntryIndex) {
int adjustedIndex = adjustedIndex(logEntryIndex);
if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
// physical index should be less than list size and >= 0
- return;
+ return -1;
+ }
+
+ for(int i = adjustedIndex; i < journal.size(); i++) {
+ dataSize -= journal.get(i).size();
}
+
journal.subList(adjustedIndex , journal.size()).clear();
+
+ return adjustedIndex;
}
@Override
public void append(ReplicatedLogEntry replicatedLogEntry) {
journal.add(replicatedLogEntry);
+ dataSize += replicatedLogEntry.size();
}
@Override
List<ReplicatedLogEntry> snapshotJournalEntries = journal.subList(0, (int) (snapshotCapturedIndex - snapshotIndex));
snapshottedJournal.addAll(snapshotJournalEntries);
- clear(0, (int) (snapshotCapturedIndex - snapshotIndex));
+ snapshotJournalEntries.clear();
previousSnapshotIndex = snapshotIndex;
setSnapshotIndex(snapshotCapturedIndex);
snapshotTerm = previousSnapshotTerm;
previousSnapshotTerm = -1;
}
+
+ @VisibleForTesting
+ ReplicatedLogEntry getAtPhysicalIndex(int index) {
+ return journal.get(index);
+ }
}
/*
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.japi.Procedure;
-import akka.persistence.RecoveryCompleted;
-import akka.persistence.SaveSnapshotFailure;
-import akka.persistence.SaveSnapshotSuccess;
-import akka.persistence.SnapshotOffer;
import akka.persistence.SnapshotSelectionCriteria;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Optional;
-import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.io.Serializable;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
+import javax.annotation.Nonnull;
import org.apache.commons.lang3.time.DurationFormatUtils;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
import org.opendaylight.controller.cluster.notifications.RoleChanged;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
-import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
-import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.behaviors.AbstractLeader;
import org.opendaylight.controller.cluster.raft.behaviors.DelegatingRaftActorBehavior;
private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50L); // 50 millis
- private static final String COMMIT_SNAPSHOT = "commit_snapshot";
-
protected final Logger LOG = LoggerFactory.getLogger(getClass());
/**
private final DelegatingPersistentDataProvider delegatingPersistenceProvider = new DelegatingPersistentDataProvider(null);
- private final Procedure<Void> createSnapshotProcedure = new CreateSnapshotProcedure();
-
- private Stopwatch recoveryTimer;
+ private RaftActorRecoverySupport raftRecovery;
- private int currentRecoveryBatchCount;
+ private RaftActorSnapshotMessageSupport snapshotSupport;
private final BehaviorStateHolder reusableBehaviorStateHolder = new BehaviorStateHolder();
context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, delegatingPersistenceProvider, currentBehavior));
}
- private void initRecoveryTimer() {
- if(recoveryTimer == null) {
- recoveryTimer = Stopwatch.createStarted();
- }
- }
-
@Override
public void preStart() throws Exception {
LOG.info("Starting recovery for {} with journal batch size {}", persistenceId(),
@Override
public void postStop() {
- if(currentBehavior != null) {
+ if(currentBehavior.getDelegate() != null) {
try {
currentBehavior.close();
} catch (Exception e) {
@Override
public void handleRecover(Object message) {
- if(persistence().isRecoveryApplicable()) {
- if (message instanceof SnapshotOffer) {
- onRecoveredSnapshot((SnapshotOffer) message);
- } else if (message instanceof ReplicatedLogEntry) {
- onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
- } else if (message instanceof ApplyLogEntries) {
- // Handle this message for backwards compatibility with pre-Lithium versions.
- onRecoveredApplyLogEntries(((ApplyLogEntries) message).getToIndex());
- } else if (message instanceof ApplyJournalEntries) {
- onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
- } else if (message instanceof DeleteEntries) {
- replicatedLog().removeFrom(((DeleteEntries) message).getFromIndex());
- } else if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
- ((UpdateElectionTerm) message).getVotedFor());
- } else if (message instanceof RecoveryCompleted) {
- onRecoveryCompletedMessage();
- }
- } else {
- if (message instanceof RecoveryCompleted) {
+ if(raftRecovery == null) {
+ raftRecovery = newRaftActorRecoverySupport();
+ }
+
+ boolean recoveryComplete = raftRecovery.handleRecoveryMessage(message);
+ if(recoveryComplete) {
+ if(!persistence().isRecoveryApplicable()) {
// Delete all the messages from the akka journal so that we do not end up with consistency issues
// Note I am not using the dataPersistenceProvider and directly using the akka api here
deleteMessages(lastSequenceNr());
// Delete all the akka snapshots as they will not be needed
deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), scala.Long.MaxValue()));
-
- onRecoveryComplete();
-
- initializeBehavior();
}
- }
- }
-
- private void onRecoveredSnapshot(SnapshotOffer offer) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: SnapshotOffer called..", persistenceId());
- }
-
- initRecoveryTimer();
-
- Snapshot snapshot = (Snapshot) offer.snapshot();
-
- // Create a replicated log with the snapshot information
- // The replicated log can be used later on to retrieve this snapshot
- // when we need to install it on a peer
-
- context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, delegatingPersistenceProvider,
- currentBehavior));
- context.setLastApplied(snapshot.getLastAppliedIndex());
- context.setCommitIndex(snapshot.getLastAppliedIndex());
-
- Stopwatch timer = Stopwatch.createStarted();
-
- // Apply the snapshot to the actors state
- applyRecoverySnapshot(snapshot.getState());
-
- timer.stop();
- LOG.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size=" +
- replicatedLog().size(), persistenceId(), timer.toString(),
- replicatedLog().getSnapshotIndex(), replicatedLog().getSnapshotTerm());
- }
- private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Received ReplicatedLogEntry for recovery: {}", persistenceId(), logEntry.getIndex());
- }
-
- replicatedLog().append(logEntry);
- }
-
- private void onRecoveredApplyLogEntries(long toIndex) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Received ApplyLogEntries for recovery, applying to state: {} to {}",
- persistenceId(), context.getLastApplied() + 1, toIndex);
- }
-
- for (long i = context.getLastApplied() + 1; i <= toIndex; i++) {
- batchRecoveredLogEntry(replicatedLog().get(i));
- }
-
- context.setLastApplied(toIndex);
- context.setCommitIndex(toIndex);
- }
-
- private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
- initRecoveryTimer();
+ onRecoveryComplete();
- int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
- if(currentRecoveryBatchCount == 0) {
- startLogRecoveryBatch(batchSize);
- }
-
- appendRecoveredLogEntry(logEntry.getData());
+ initializeBehavior();
- if(++currentRecoveryBatchCount >= batchSize) {
- endCurrentLogRecoveryBatch();
+ raftRecovery = null;
}
}
- private void endCurrentLogRecoveryBatch() {
- applyCurrentLogRecoveryBatch();
- currentRecoveryBatchCount = 0;
- }
-
- private void onRecoveryCompletedMessage() {
- if(currentRecoveryBatchCount > 0) {
- endCurrentLogRecoveryBatch();
- }
-
- onRecoveryComplete();
-
- String recoveryTime = "";
- if(recoveryTimer != null) {
- recoveryTimer.stop();
- recoveryTime = " in " + recoveryTimer.toString();
- recoveryTimer = null;
- }
-
- LOG.info(
- "Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
- "Persistence Id = " + persistenceId() +
- " Last index in log={}, snapshotIndex={}, snapshotTerm={}, " +
- "journal-size={}",
- replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
- replicatedLog().getSnapshotTerm(), replicatedLog().size());
-
- initializeBehavior();
+ protected RaftActorRecoverySupport newRaftActorRecoverySupport() {
+ return new RaftActorRecoverySupport(delegatingPersistenceProvider, context, currentBehavior,
+ getRaftActorRecoveryCohort());
}
protected void initializeBehavior(){
handleBehaviorChange(reusableBehaviorStateHolder, getCurrentBehavior());
}
- @Override public void handleCommand(Object message) {
+ @Override
+ public void handleCommand(Object message) {
+ if(snapshotSupport == null) {
+ snapshotSupport = newRaftActorSnapshotMessageSupport();
+ }
+
+ boolean handled = snapshotSupport.handleSnapshotMessage(message);
+ if(handled) {
+ return;
+ }
+
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
persistence().persist(applyEntries, NoopProcedure.instance());
- } else if(message instanceof ApplySnapshot ) {
- Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: ApplySnapshot called on Follower Actor " +
- "snapshotIndex:{}, snapshotTerm:{}", persistenceId(), snapshot.getLastAppliedIndex(),
- snapshot.getLastAppliedTerm()
- );
- }
-
- applySnapshot(snapshot.getState());
-
- //clears the followers log, sets the snapshot index to ensure adjusted-index works
- context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, delegatingPersistenceProvider,
- currentBehavior));
- context.setLastApplied(snapshot.getLastAppliedIndex());
-
} else if (message instanceof FindLeader) {
getSender().tell(
new FindLeaderReply(getLeaderAddress()),
getSelf()
);
-
- } else if (message instanceof SaveSnapshotSuccess) {
- SaveSnapshotSuccess success = (SaveSnapshotSuccess) message;
- LOG.info("{}: SaveSnapshotSuccess received for snapshot", persistenceId());
-
- long sequenceNumber = success.metadata().sequenceNr();
-
- commitSnapshot(sequenceNumber);
-
- } else if (message instanceof SaveSnapshotFailure) {
- SaveSnapshotFailure saveSnapshotFailure = (SaveSnapshotFailure) message;
-
- LOG.error("{}: SaveSnapshotFailure received for snapshot Cause:",
- persistenceId(), saveSnapshotFailure.cause());
-
- context.getSnapshotManager().rollback();
-
- } else if (message instanceof CaptureSnapshot) {
- LOG.debug("{}: CaptureSnapshot received by actor: {}", persistenceId(), message);
-
- context.getSnapshotManager().create(createSnapshotProcedure);
-
- } else if (message instanceof CaptureSnapshotReply) {
- handleCaptureSnapshotReply(((CaptureSnapshotReply) message).getSnapshot());
} else if(message instanceof GetOnDemandRaftState) {
onGetOnDemandRaftStats();
- } else if (message.equals(COMMIT_SNAPSHOT)) {
- commitSnapshot(-1);
} else {
reusableBehaviorStateHolder.init(getCurrentBehavior());
}
}
+ protected RaftActorSnapshotMessageSupport newRaftActorSnapshotMessageSupport() {
+ return new RaftActorSnapshotMessageSupport(delegatingPersistenceProvider, context,
+ currentBehavior, getRaftActorSnapshotCohort());
+ }
+
private void onGetOnDemandRaftStats() {
// Debugging message to retrieve raft stats.
// Make saving Snapshot successful
// Committing the snapshot here would end up calling commit in the creating state which would
// be a state violation. That's why now we send a message to commit the snapshot.
- self().tell(COMMIT_SNAPSHOT, self());
+ self().tell(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT, self());
}
});
}
context.setPeerAddress(peerId, peerAddress);
}
- protected void commitSnapshot(long sequenceNumber) {
- context.getSnapshotManager().commit(persistence(), sequenceNumber);
- }
-
/**
* The applyState method will be called by the RaftActor when some data
* needs to be applied to the actor's state
Object data);
/**
- * This method is called during recovery at the start of a batch of state entries. Derived
- * classes should perform any initialization needed to start a batch.
+ * Returns the RaftActorRecoveryCohort to participate in persistence recovery.
*/
- protected abstract void startLogRecoveryBatch(int maxBatchSize);
-
- /**
- * This method is called during recovery to append state data to the current batch. This method
- * is called 1 or more times after {@link #startLogRecoveryBatch}.
- *
- * @param data the state data
- */
- protected abstract void appendRecoveredLogEntry(Payload data);
-
- /**
- * This method is called during recovery to reconstruct the state of the actor.
- *
- * @param snapshotBytes A snapshot of the state of the actor
- */
- protected abstract void applyRecoverySnapshot(byte[] snapshotBytes);
-
- /**
- * This method is called during recovery at the end of a batch to apply the current batched
- * log entries. This method is called after {@link #appendRecoveredLogEntry}.
- */
- protected abstract void applyCurrentLogRecoveryBatch();
+ @Nonnull
+ protected abstract RaftActorRecoveryCohort getRaftActorRecoveryCohort();
/**
* This method is called when recovery is complete.
protected abstract void onRecoveryComplete();
/**
- * This method will be called by the RaftActor when a snapshot needs to be
- * created. The derived actor should respond with its current state.
- * <p/>
- * During recovery the state that is returned by the derived actor will
- * be passed back to it by calling the applySnapshot method
- *
- * @return The current state of the actor
+ * Returns the RaftActorSnapshotCohort to participate in persistence recovery.
*/
- protected abstract void createSnapshot();
-
- /**
- * This method can be called at any other point during normal
- * operations when the derived actor is out of sync with it's peers
- * and the only way to bring it in sync is by applying a snapshot
- *
- * @param snapshotBytes A snapshot of the state of the actor
- */
- protected abstract void applySnapshot(byte[] snapshotBytes);
+ @Nonnull
+ protected abstract RaftActorSnapshotCohort getRaftActorSnapshotCohort();
/**
* This method will be called by the RaftActor when the state of the
return peerAddress;
}
- private void handleCaptureSnapshotReply(byte[] snapshotBytes) {
- LOG.debug("{}: CaptureSnapshotReply received by actor: snapshot size {}", persistenceId(), snapshotBytes.length);
-
- context.getSnapshotManager().persist(persistence(), snapshotBytes, currentBehavior, context.getTotalMemory());
- }
-
protected boolean hasFollowers(){
return getRaftActorContext().hasFollowers();
}
+ /**
+ * @deprecated Deprecated in favor of {@link org.opendaylight.controller.cluster.raft.base.messages.DeleteEntriesTest}
+ * whose type for fromIndex is long instead of int. This class was kept for backwards
+ * compatibility with Helium.
+ */
+ @Deprecated
static class DeleteEntries implements Serializable {
private static final long serialVersionUID = 1L;
private final int fromIndex;
}
}
- private class CreateSnapshotProcedure implements Procedure<Void> {
-
- @Override
- public void apply(Void aVoid) throws Exception {
- createSnapshot();
- }
- }
-
private static class BehaviorStateHolder {
private RaftActorBehavior behavior;
private String leaderId;
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+
+/**
+ * Interface for a class that participates in raft actor persistence recovery.
+ *
+ * @author Thomas Pantelis
+ */
+public interface RaftActorRecoveryCohort {
+
+ /**
+ * This method is called during recovery at the start of a batch of state entries. Derived
+ * classes should perform any initialization needed to start a batch.
+ */
+ void startLogRecoveryBatch(int maxBatchSize);
+
+ /**
+ * This method is called during recovery to append state data to the current batch. This method
+ * is called 1 or more times after {@link #startLogRecoveryBatch}.
+ *
+ * @param data the state data
+ */
+ void appendRecoveredLogEntry(Payload data);
+
+ /**
+ * This method is called during recovery to reconstruct the state of the actor.
+ *
+ * @param snapshotBytes A snapshot of the state of the actor
+ */
+ void applyRecoverySnapshot(byte[] snapshotBytes);
+
+ /**
+ * This method is called during recovery at the end of a batch to apply the current batched
+ * log entries. This method is called after {@link #appendRecoveredLogEntry}.
+ */
+ void applyCurrentLogRecoveryBatch();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.SnapshotOffer;
+import com.google.common.base.Stopwatch;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+
+/**
+ * Support class that handles persistence recovery for a RaftActor.
+ *
+ * @author Thomas Pantelis
+ */
+class RaftActorRecoverySupport {
+ private final DataPersistenceProvider persistence;
+ private final RaftActorContext context;
+ private final RaftActorBehavior currentBehavior;
+ private final RaftActorRecoveryCohort cohort;
+
+ private int currentRecoveryBatchCount;
+
+ private Stopwatch recoveryTimer;
+ private final Logger log;
+
+ RaftActorRecoverySupport(DataPersistenceProvider persistence, RaftActorContext context,
+ RaftActorBehavior currentBehavior, RaftActorRecoveryCohort cohort) {
+ this.persistence = persistence;
+ this.context = context;
+ this.currentBehavior = currentBehavior;
+ this.cohort = cohort;
+ this.log = context.getLogger();
+ }
+
+ boolean handleRecoveryMessage(Object message) {
+ boolean recoveryComplete = false;
+ if(persistence.isRecoveryApplicable()) {
+ if (message instanceof SnapshotOffer) {
+ onRecoveredSnapshot((SnapshotOffer) message);
+ } else if (message instanceof ReplicatedLogEntry) {
+ onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
+ } else if (message instanceof ApplyLogEntries) {
+ // Handle this message for backwards compatibility with pre-Lithium versions.
+ onRecoveredApplyLogEntries(((ApplyLogEntries) message).getToIndex());
+ } else if (message instanceof ApplyJournalEntries) {
+ onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
+ } else if (message instanceof DeleteEntries) {
+ replicatedLog().removeFrom(((DeleteEntries) message).getFromIndex());
+ } else if (message instanceof org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries) {
+ // Handle this message for backwards compatibility with pre-Lithium versions.
+ replicatedLog().removeFrom(((org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries) message).getFromIndex());
+ } else if (message instanceof UpdateElectionTerm) {
+ context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+ ((UpdateElectionTerm) message).getVotedFor());
+ } else if (message instanceof RecoveryCompleted) {
+ onRecoveryCompletedMessage();
+ recoveryComplete = true;
+ }
+ } else if (message instanceof RecoveryCompleted) {
+ recoveryComplete = true;
+ }
+
+ return recoveryComplete;
+ }
+
+ private ReplicatedLog replicatedLog() {
+ return context.getReplicatedLog();
+ }
+
+ private void initRecoveryTimer() {
+ if(recoveryTimer == null) {
+ recoveryTimer = Stopwatch.createStarted();
+ }
+ }
+
+ private void onRecoveredSnapshot(SnapshotOffer offer) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: SnapshotOffer called..", context.getId());
+ }
+
+ initRecoveryTimer();
+
+ Snapshot snapshot = (Snapshot) offer.snapshot();
+
+ // Create a replicated log with the snapshot information
+ // The replicated log can be used later on to retrieve this snapshot
+ // when we need to install it on a peer
+
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, persistence, currentBehavior));
+ context.setLastApplied(snapshot.getLastAppliedIndex());
+ context.setCommitIndex(snapshot.getLastAppliedIndex());
+
+ Stopwatch timer = Stopwatch.createStarted();
+
+ // Apply the snapshot to the actors state
+ cohort.applyRecoverySnapshot(snapshot.getState());
+
+ timer.stop();
+ log.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size={}",
+ context.getId(), timer.toString(), replicatedLog().getSnapshotIndex(),
+ replicatedLog().getSnapshotTerm(), replicatedLog().size());
+ }
+
+ private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: Received ReplicatedLogEntry for recovery: index: {}, size: {}", context.getId(),
+ logEntry.getIndex(), logEntry.size());
+ }
+
+ replicatedLog().append(logEntry);
+ }
+
+ private void onRecoveredApplyLogEntries(long toIndex) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: Received ApplyLogEntries for recovery, applying to state: {} to {}",
+ context.getId(), context.getLastApplied() + 1, toIndex);
+ }
+
+ for (long i = context.getLastApplied() + 1; i <= toIndex; i++) {
+ batchRecoveredLogEntry(replicatedLog().get(i));
+ }
+
+ context.setLastApplied(toIndex);
+ context.setCommitIndex(toIndex);
+ }
+
+ private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
+ initRecoveryTimer();
+
+ int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
+ if(currentRecoveryBatchCount == 0) {
+ cohort.startLogRecoveryBatch(batchSize);
+ }
+
+ cohort.appendRecoveredLogEntry(logEntry.getData());
+
+ if(++currentRecoveryBatchCount >= batchSize) {
+ endCurrentLogRecoveryBatch();
+ }
+ }
+
+ private void endCurrentLogRecoveryBatch() {
+ cohort.applyCurrentLogRecoveryBatch();
+ currentRecoveryBatchCount = 0;
+ }
+
+ private void onRecoveryCompletedMessage() {
+ if(currentRecoveryBatchCount > 0) {
+ endCurrentLogRecoveryBatch();
+ }
+
+ String recoveryTime = "";
+ if(recoveryTimer != null) {
+ recoveryTimer.stop();
+ recoveryTime = " in " + recoveryTimer.toString();
+ recoveryTimer = null;
+ }
+
+ log.info("Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
+ "Persistence Id = " + context.getId() +
+ " Last index in log = {}, snapshotIndex = {}, snapshotTerm = {}, " +
+ "journal-size = {}", replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
+ replicatedLog().getSnapshotTerm(), replicatedLog().size());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+
+/**
+ * Interface for a class that participates in raft actor snapshotting.
+ *
+ * @author Thomas Pantelis
+ */
+public interface RaftActorSnapshotCohort {
+
+ /**
+ * This method is called by the RaftActor when a snapshot needs to be
+ * created. The implementation should send a CaptureSnapshotReply to the given actor.
+ *
+ * @param actorRef the actor to which to respond
+ */
+ void createSnapshot(ActorRef actorRef);
+
+ /**
+ * This method is called to apply a snapshot installed by the leader.
+ *
+ * @param snapshotBytes a snapshot of the state of the actor
+ */
+ void applySnapshot(byte[] snapshotBytes);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.japi.Procedure;
+import akka.persistence.SaveSnapshotFailure;
+import akka.persistence.SaveSnapshotSuccess;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+
+/**
+ * Handles snapshot related messages for a RaftActor.
+ *
+ * @author Thomas Pantelis
+ */
+class RaftActorSnapshotMessageSupport {
+ static final String COMMIT_SNAPSHOT = "commit_snapshot";
+
+ private final DataPersistenceProvider persistence;
+ private final RaftActorContext context;
+ private final RaftActorBehavior currentBehavior;
+ private final RaftActorSnapshotCohort cohort;
+ private final Logger log;
+
+ private final Procedure<Void> createSnapshotProcedure = new Procedure<Void>() {
+ @Override
+ public void apply(Void notUsed) throws Exception {
+ cohort.createSnapshot(context.getActor());
+ }
+ };
+
+ RaftActorSnapshotMessageSupport(DataPersistenceProvider persistence, RaftActorContext context,
+ RaftActorBehavior currentBehavior, RaftActorSnapshotCohort cohort) {
+ this.persistence = persistence;
+ this.context = context;
+ this.currentBehavior = currentBehavior;
+ this.cohort = cohort;
+ this.log = context.getLogger();
+ }
+
+ boolean handleSnapshotMessage(Object message) {
+ if(message instanceof ApplySnapshot ) {
+ onApplySnapshot(((ApplySnapshot) message).getSnapshot());
+ return true;
+ } else if (message instanceof SaveSnapshotSuccess) {
+ onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
+ return true;
+ } else if (message instanceof SaveSnapshotFailure) {
+ onSaveSnapshotFailure((SaveSnapshotFailure) message);
+ return true;
+ } else if (message instanceof CaptureSnapshot) {
+ onCaptureSnapshot(message);
+ return true;
+ } else if (message instanceof CaptureSnapshotReply) {
+ onCaptureSnapshotReply(((CaptureSnapshotReply) message).getSnapshot());
+ return true;
+ } else if (message.equals(COMMIT_SNAPSHOT)) {
+ context.getSnapshotManager().commit(persistence, -1);
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private void onCaptureSnapshotReply(byte[] snapshotBytes) {
+ log.debug("{}: CaptureSnapshotReply received by actor: snapshot size {}", context.getId(), snapshotBytes.length);
+
+ context.getSnapshotManager().persist(persistence, snapshotBytes, currentBehavior, context.getTotalMemory());
+ }
+
+ private void onCaptureSnapshot(Object message) {
+ log.debug("{}: CaptureSnapshot received by actor: {}", context.getId(), message);
+
+ context.getSnapshotManager().create(createSnapshotProcedure);
+ }
+
+ private void onSaveSnapshotFailure(SaveSnapshotFailure saveSnapshotFailure) {
+ log.error("{}: SaveSnapshotFailure received for snapshot Cause:",
+ context.getId(), saveSnapshotFailure.cause());
+
+ context.getSnapshotManager().rollback();
+ }
+
+ private void onSaveSnapshotSuccess(SaveSnapshotSuccess success) {
+ log.info("{}: SaveSnapshotSuccess received for snapshot", context.getId());
+
+ long sequenceNumber = success.metadata().sequenceNr();
+
+ context.getSnapshotManager().commit(persistence, sequenceNumber);
+ }
+
+ private void onApplySnapshot(Snapshot snapshot) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: ApplySnapshot called on Follower Actor " +
+ "snapshotIndex:{}, snapshotTerm:{}", context.getId(), snapshot.getLastAppliedIndex(),
+ snapshot.getLastAppliedTerm());
+ }
+
+ cohort.applySnapshot(snapshot.getState());
+
+ //clears the followers log, sets the snapshot index to ensure adjusted-index works
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, persistence,
+ currentBehavior));
+ context.setLastApplied(snapshot.getLastAppliedIndex());
+ }
+}
* information
*
* @param index the index of the log entry
+ * @return the adjusted index of the first log entry removed or -1 if log entry not found.
*/
- void removeFrom(long index);
+ long removeFrom(long index);
/**
import java.util.Collections;
import java.util.List;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
-import org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
/**
private final Procedure<DeleteEntries> deleteProcedure = new Procedure<DeleteEntries>() {
@Override
- public void apply(DeleteEntries param) {
- dataSize = 0;
- for (ReplicatedLogEntry entry : journal) {
- dataSize += entry.size();
- }
+ public void apply(DeleteEntries notUsed) {
}
};
@Override
public void removeFromAndPersist(long logEntryIndex) {
- int adjustedIndex = adjustedIndex(logEntryIndex);
-
- if (adjustedIndex < 0) {
- return;
- }
-
// FIXME: Maybe this should be done after the command is saved
- journal.subList(adjustedIndex , journal.size()).clear();
-
- persistence.persist(new DeleteEntries(adjustedIndex), deleteProcedure);
+ long adjustedIndex = removeFrom(logEntryIndex);
+ if(adjustedIndex >= 0) {
+ persistence.persist(new DeleteEntries(adjustedIndex), deleteProcedure);
+ }
}
@Override
}
// FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
- journal.add(replicatedLogEntry);
+ append(replicatedLogEntry);
// When persisting events with persist it is guaranteed that the
// persistent actor will not receive further commands between the
public void apply(ReplicatedLogEntry evt) throws Exception {
int logEntrySize = replicatedLogEntry.size();
- dataSize += logEntrySize;
- long dataSizeForCheck = dataSize;
+ long dataSizeForCheck = dataSize();
dataSizeSinceLastSnapshot += logEntrySize;
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+import java.io.Serializable;
+
+/**
+ * Internal message that is stored in the akka's persistent journal to delete journal entries.
+ *
+ * @author Thomas Pantelis
+ */
+public class DeleteEntries implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final long fromIndex;
+
+ public DeleteEntries(long fromIndex) {
+ this.fromIndex = fromIndex;
+ }
+
+ public long getFromIndex() {
+ return fromIndex;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("DeleteEntries [fromIndex=").append(fromIndex).append("]");
+ return builder.toString();
+ }
+}
import java.util.concurrent.TimeUnit;
import org.junit.After;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
-import org.opendaylight.controller.cluster.raft.RaftActorTest.MockRaftActor;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
}
@Override
- protected void createSnapshot() {
+ public void createSnapshot(ActorRef actorRef) {
if(snapshot != null) {
getSelf().tell(new CaptureSnapshotReply(snapshot), ActorRef.noSender());
}
}
@Override
- protected void applyRecoverySnapshot(byte[] bytes) {
+ public void applyRecoverySnapshot(byte[] bytes) {
}
void setSnapshot(byte[] snapshot) {
}
protected void waitUntilLeader(ActorRef actorRef) {
- RaftActorTest.RaftActorTestKit.waitUntilLeader(actorRef);
+ RaftActorTestKit.waitUntilLeader(actorRef);
}
protected TestActorRef<TestRaftActor> newTestRaftActor(String id, Map<String, String> peerAddresses,
/*
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
}
- @After
- public void tearDown() {
- replicatedLogImpl.journal.clear();
- replicatedLogImpl.setSnapshotIndex(-1);
- replicatedLogImpl.setSnapshotTerm(-1);
- replicatedLogImpl = null;
+ @Test
+ public void testEmptyLog() {
+ replicatedLogImpl = new MockAbstractReplicatedLogImpl();
+
+ assertEquals("size", 0, replicatedLogImpl.size());
+ assertEquals("dataSize", 0, replicatedLogImpl.dataSize());
+ assertEquals("getSnapshotIndex", -1, replicatedLogImpl.getSnapshotIndex());
+ assertEquals("getSnapshotTerm", -1, replicatedLogImpl.getSnapshotTerm());
+ assertEquals("lastIndex", -1, replicatedLogImpl.lastIndex());
+ assertEquals("lastTerm", -1, replicatedLogImpl.lastTerm());
+ assertEquals("isPresent", false, replicatedLogImpl.isPresent(0));
+ assertEquals("isInSnapshot", false, replicatedLogImpl.isInSnapshot(0));
+ Assert.assertNull("get(0)", replicatedLogImpl.get(0));
+ Assert.assertNull("last", replicatedLogImpl.last());
+
+ List<ReplicatedLogEntry> list = replicatedLogImpl.getFrom(0, 1);
+ assertEquals("getFrom size", 0, list.size());
+
+ assertEquals("removeFrom", -1, replicatedLogImpl.removeFrom(1));
+
+ replicatedLogImpl.setSnapshotIndex(2);
+ replicatedLogImpl.setSnapshotTerm(1);
+
+ assertEquals("getSnapshotIndex", 2, replicatedLogImpl.getSnapshotIndex());
+ assertEquals("getSnapshotTerm", 1, replicatedLogImpl.getSnapshotTerm());
+ assertEquals("lastIndex", 2, replicatedLogImpl.lastIndex());
+ assertEquals("lastTerm", 1, replicatedLogImpl.lastTerm());
}
@Test
// now create a snapshot of 3 entries, with 1 unapplied entry left in the log
// It removes the entries which have made it to snapshot
// and updates the snapshot index and term
- Map<Long, String> state = takeSnapshot(3);
+ takeSnapshot(3);
// check the values after the snapshot.
// each index value passed in the test is the logical index (log entry index)
assertEquals(2, replicatedLogImpl.getFrom(6).size());
// take a second snapshot with 5 entries with 0 unapplied entries left in the log
- state = takeSnapshot(5);
+ takeSnapshot(5);
assertEquals(0, replicatedLogImpl.size());
assertNull(replicatedLogImpl.last());
replicatedLogImpl.snapshotPreCommit(-1, -1);
assertEquals(8, replicatedLogImpl.size());
assertEquals(-1, replicatedLogImpl.getSnapshotIndex());
+ assertEquals(-1, replicatedLogImpl.getSnapshotTerm());
- replicatedLogImpl.snapshotPreCommit(4, 3);
+ replicatedLogImpl.snapshotPreCommit(4, 2);
assertEquals(3, replicatedLogImpl.size());
assertEquals(4, replicatedLogImpl.getSnapshotIndex());
+ assertEquals(2, replicatedLogImpl.getSnapshotTerm());
replicatedLogImpl.snapshotPreCommit(6, 3);
assertEquals(1, replicatedLogImpl.size());
assertEquals(6, replicatedLogImpl.getSnapshotIndex());
+ assertEquals(3, replicatedLogImpl.getSnapshotTerm());
replicatedLogImpl.snapshotPreCommit(7, 3);
assertEquals(0, replicatedLogImpl.size());
assertEquals(7, replicatedLogImpl.getSnapshotIndex());
+ assertEquals(3, replicatedLogImpl.getSnapshotTerm());
//running it again on an empty list should not throw exception
replicatedLogImpl.snapshotPreCommit(7, 3);
assertEquals(0, replicatedLogImpl.size());
assertEquals(7, replicatedLogImpl.getSnapshotIndex());
+ assertEquals(3, replicatedLogImpl.getSnapshotTerm());
+ }
+
+ @Test
+ public void testSnapshotCommit() {
+
+ replicatedLogImpl.snapshotPreCommit(1, 1);
+
+ replicatedLogImpl.snapshotCommit();
+
+ assertEquals("size", 2, replicatedLogImpl.size());
+ assertEquals("dataSize", 2, replicatedLogImpl.dataSize());
+ assertEquals("getSnapshotIndex", 1, replicatedLogImpl.getSnapshotIndex());
+ assertEquals("getSnapshotTerm", 1, replicatedLogImpl.getSnapshotTerm());
+ assertEquals("lastIndex", 3, replicatedLogImpl.lastIndex());
+ assertEquals("lastTerm", 2, replicatedLogImpl.lastTerm());
+
+ Assert.assertNull("get(0)", replicatedLogImpl.get(0));
+ Assert.assertNull("get(1)", replicatedLogImpl.get(1));
+ Assert.assertNotNull("get(2)", replicatedLogImpl.get(2));
+ Assert.assertNotNull("get(3)", replicatedLogImpl.get(3));
+ }
+
+ @Test
+ public void testSnapshotRollback() {
+
+ replicatedLogImpl.snapshotPreCommit(1, 1);
+
+ assertEquals("size", 2, replicatedLogImpl.size());
+ assertEquals("getSnapshotIndex", 1, replicatedLogImpl.getSnapshotIndex());
+ assertEquals("getSnapshotTerm", 1, replicatedLogImpl.getSnapshotTerm());
+
+ replicatedLogImpl.snapshotRollback();
+ assertEquals("size", 4, replicatedLogImpl.size());
+ assertEquals("dataSize", 4, replicatedLogImpl.dataSize());
+ assertEquals("getSnapshotIndex", -1, replicatedLogImpl.getSnapshotIndex());
+ assertEquals("getSnapshotTerm", -1, replicatedLogImpl.getSnapshotTerm());
+ Assert.assertNotNull("get(0)", replicatedLogImpl.get(0));
+ Assert.assertNotNull("get(3)", replicatedLogImpl.get(3));
}
@Test
assertTrue(replicatedLogImpl.isPresent(5));
}
+ @Test
+ public void testRemoveFrom() {
+
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 4, new MockPayload("E", 2)));
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 5, new MockPayload("F", 3)));
+
+ assertEquals("dataSize", 9, replicatedLogImpl.dataSize());
+
+ long adjusted = replicatedLogImpl.removeFrom(4);
+ assertEquals("removeFrom - adjusted", 4, adjusted);
+ assertEquals("size", 4, replicatedLogImpl.size());
+ assertEquals("dataSize", 4, replicatedLogImpl.dataSize());
+
+ takeSnapshot(1);
+
+ adjusted = replicatedLogImpl.removeFrom(2);
+ assertEquals("removeFrom - adjusted", 1, adjusted);
+ assertEquals("size", 1, replicatedLogImpl.size());
+ assertEquals("dataSize", 1, replicatedLogImpl.dataSize());
+
+ assertEquals("removeFrom - adjusted", -1, replicatedLogImpl.removeFrom(0));
+ assertEquals("removeFrom - adjusted", -1, replicatedLogImpl.removeFrom(100));
+ }
+
// create a snapshot for test
public Map<Long, String> takeSnapshot(final int numEntries) {
Map<Long, String> map = new HashMap<>(numEntries);
- List<ReplicatedLogEntry> entries = replicatedLogImpl.getEntriesTill(numEntries);
- for (ReplicatedLogEntry entry : entries) {
+
+ long lastIndex = 0;
+ long lastTerm = 0;
+ for(int i = 0; i < numEntries; i++) {
+ ReplicatedLogEntry entry = replicatedLogImpl.getAtPhysicalIndex(i);
map.put(entry.getIndex(), entry.getData().toString());
+ lastIndex = entry.getIndex();
+ lastTerm = entry.getTerm();
}
- int term = (int) replicatedLogImpl.lastTerm();
- int lastIndex = (int) entries.get(entries.size() - 1).getIndex();
- entries.clear();
- replicatedLogImpl.setSnapshotTerm(term);
- replicatedLogImpl.setSnapshotIndex(lastIndex);
+ replicatedLogImpl.snapshotPreCommit(lastIndex, lastTerm);
+ replicatedLogImpl.snapshotCommit();
return map;
public void removeFromAndPersist(final long index) {
}
- @Override
- public int dataSize() {
- return -1;
- }
-
- public List<ReplicatedLogEntry> getEntriesTill(final int index) {
- return journal.subList(0, index);
- }
-
@Override
public void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback) {
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.verify;
+import akka.japi.Procedure;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for ElectionTermImpl.
+ *
+ * @author Thomas Pantelis
+ */
+public class ElectionTermImplTest {
+ private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
+
+ @Mock
+ private DataPersistenceProvider mockPersistence;
+
+ @Before
+ public void setup() {
+ MockitoAnnotations.initMocks(this);
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testUpdateAndPersist() throws Exception {
+ ElectionTermImpl impl = new ElectionTermImpl(mockPersistence, "test", LOG);
+
+ impl.updateAndPersist(10, "member-1");
+
+ assertEquals("getCurrentTerm", 10, impl.getCurrentTerm());
+ assertEquals("getVotedFor", "member-1", impl.getVotedFor());
+
+ ArgumentCaptor<Object> message = ArgumentCaptor.forClass(Object.class);
+ ArgumentCaptor<Procedure> procedure = ArgumentCaptor.forClass(Procedure.class);
+ verify(mockPersistence).persist(message.capture(), procedure.capture());
+
+ assertEquals("Message type", UpdateElectionTerm.class, message.getValue().getClass());
+ UpdateElectionTerm update = (UpdateElectionTerm)message.getValue();
+ assertEquals("getCurrentTerm", 10, update.getCurrentTerm());
+ assertEquals("getVotedFor", "member-1", update.getVotedFor());
+
+ procedure.getValue().apply(null);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.japi.Creator;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+
+public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, RaftActorSnapshotCohort {
+
+ final RaftActor actorDelegate;
+ final RaftActorRecoveryCohort recoveryCohortDelegate;
+ final RaftActorSnapshotCohort snapshotCohortDelegate;
+ private final CountDownLatch recoveryComplete = new CountDownLatch(1);
+ private final List<Object> state;
+ private ActorRef roleChangeNotifier;
+ private final CountDownLatch initializeBehaviorComplete = new CountDownLatch(1);
+ private RaftActorRecoverySupport raftActorRecoverySupport;
+ private RaftActorSnapshotMessageSupport snapshotMessageSupport;
+
+ public static final class MockRaftActorCreator implements Creator<MockRaftActor> {
+ private static final long serialVersionUID = 1L;
+ private final Map<String, String> peerAddresses;
+ private final String id;
+ private final Optional<ConfigParams> config;
+ private final DataPersistenceProvider dataPersistenceProvider;
+ private final ActorRef roleChangeNotifier;
+ private RaftActorSnapshotMessageSupport snapshotMessageSupport;
+
+ private MockRaftActorCreator(Map<String, String> peerAddresses, String id,
+ Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider,
+ ActorRef roleChangeNotifier) {
+ this.peerAddresses = peerAddresses;
+ this.id = id;
+ this.config = config;
+ this.dataPersistenceProvider = dataPersistenceProvider;
+ this.roleChangeNotifier = roleChangeNotifier;
+ }
+
+ @Override
+ public MockRaftActor create() throws Exception {
+ MockRaftActor mockRaftActor = new MockRaftActor(id, peerAddresses, config,
+ dataPersistenceProvider);
+ mockRaftActor.roleChangeNotifier = this.roleChangeNotifier;
+ mockRaftActor.snapshotMessageSupport = snapshotMessageSupport;
+ return mockRaftActor;
+ }
+ }
+
+ public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config,
+ DataPersistenceProvider dataPersistenceProvider) {
+ super(id, peerAddresses, config);
+ state = new ArrayList<>();
+ this.actorDelegate = mock(RaftActor.class);
+ this.recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class);
+ this.snapshotCohortDelegate = mock(RaftActorSnapshotCohort.class);
+ if(dataPersistenceProvider == null){
+ setPersistence(true);
+ } else {
+ setPersistence(dataPersistenceProvider);
+ }
+ }
+
+ public void setRaftActorRecoverySupport(RaftActorRecoverySupport support) {
+ raftActorRecoverySupport = support;
+ }
+
+ @Override
+ public RaftActorRecoverySupport newRaftActorRecoverySupport() {
+ return raftActorRecoverySupport != null ? raftActorRecoverySupport : super.newRaftActorRecoverySupport();
+ }
+
+ @Override
+ protected RaftActorSnapshotMessageSupport newRaftActorSnapshotMessageSupport() {
+ return snapshotMessageSupport != null ? snapshotMessageSupport : super.newRaftActorSnapshotMessageSupport();
+ }
+
+ public void waitForRecoveryComplete() {
+ try {
+ assertEquals("Recovery complete", true, recoveryComplete.await(5, TimeUnit.SECONDS));
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ public void waitForInitializeBehaviorComplete() {
+ try {
+ assertEquals("Behavior initialized", true, initializeBehaviorComplete.await(5, TimeUnit.SECONDS));
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+
+ public void waitUntilLeader(){
+ for(int i = 0;i < 10; i++){
+ if(isLeader()){
+ break;
+ }
+ Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+ }
+ }
+
+ public List<Object> getState() {
+ return state;
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, null, null));
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config, RaftActorSnapshotMessageSupport snapshotMessageSupport){
+ MockRaftActorCreator creator = new MockRaftActorCreator(peerAddresses, id, config, null, null);
+ creator.snapshotMessageSupport = snapshotMessageSupport;
+ return Props.create(creator);
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, dataPersistenceProvider, null));
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config, ActorRef roleChangeNotifier){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, null, roleChangeNotifier));
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config, ActorRef roleChangeNotifier,
+ DataPersistenceProvider dataPersistenceProvider){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, dataPersistenceProvider, roleChangeNotifier));
+ }
+
+
+ @Override protected void applyState(ActorRef clientActor, String identifier, Object data) {
+ actorDelegate.applyState(clientActor, identifier, data);
+ LOG.info("{}: applyState called", persistenceId());
+ }
+
+ @Override
+ @Nonnull
+ protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+ return this;
+ }
+
+ @Override
+ protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+ return this;
+ }
+
+ @Override
+ public void startLogRecoveryBatch(int maxBatchSize) {
+ }
+
+ @Override
+ public void appendRecoveredLogEntry(Payload data) {
+ state.add(data);
+ }
+
+ @Override
+ public void applyCurrentLogRecoveryBatch() {
+ }
+
+ @Override
+ protected void onRecoveryComplete() {
+ actorDelegate.onRecoveryComplete();
+ recoveryComplete.countDown();
+ }
+
+ @Override
+ protected void initializeBehavior() {
+ super.initializeBehavior();
+ initializeBehaviorComplete.countDown();
+ }
+
+ @Override
+ public void applyRecoverySnapshot(byte[] bytes) {
+ recoveryCohortDelegate.applyRecoverySnapshot(bytes);
+ try {
+ Object data = toObject(bytes);
+ if (data instanceof List) {
+ state.addAll((List<?>) data);
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void createSnapshot(ActorRef actorRef) {
+ LOG.info("{}: createSnapshot called", persistenceId());
+ snapshotCohortDelegate.createSnapshot(actorRef);
+ }
+
+ @Override
+ public void applySnapshot(byte [] snapshot) {
+ LOG.info("{}: applySnapshot called", persistenceId());
+ snapshotCohortDelegate.applySnapshot(snapshot);
+ }
+
+ @Override
+ protected void onStateChanged() {
+ actorDelegate.onStateChanged();
+ }
+
+ @Override
+ protected Optional<ActorRef> getRoleChangeNotifier() {
+ return Optional.fromNullable(roleChangeNotifier);
+ }
+
+ @Override public String persistenceId() {
+ return this.getId();
+ }
+
+ private Object toObject(byte[] bs) throws ClassNotFoundException, IOException {
+ Object obj = null;
+ ByteArrayInputStream bis = null;
+ ObjectInputStream ois = null;
+ try {
+ bis = new ByteArrayInputStream(bs);
+ ois = new ObjectInputStream(bis);
+ obj = ois.readObject();
+ } finally {
+ if (bis != null) {
+ bis.close();
+ }
+ if (ois != null) {
+ ois.close();
+ }
+ }
+ return obj;
+ }
+
+ public ReplicatedLog getReplicatedLog(){
+ return this.getRaftActorContext().getReplicatedLog();
+ }
+}
\ No newline at end of file
this.size = size;
}
- @Override public Map<GeneratedMessage.GeneratedExtension, String> encode() {
- Map<GeneratedMessage.GeneratedExtension, String> map = new HashMap<GeneratedMessage.GeneratedExtension, String>();
+ @Override public Map<GeneratedMessage.GeneratedExtension<?, ?>, String> encode() {
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, String> map = new HashMap<>();
map.put(MockPayloadMessages.value, value);
return map;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotOffer;
+import java.util.Arrays;
+import java.util.Collections;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for RaftActorRecoverySupport.
+ *
+ * @author Thomas Pantelis
+ */
+public class RaftActorRecoverySupportTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
+
+ @Mock
+ private DataPersistenceProvider mockPersistence;
+
+ @Mock
+ private RaftActorBehavior mockBehavior;
+
+ @Mock
+ private RaftActorRecoveryCohort mockCohort;
+
+ private RaftActorRecoverySupport support;
+
+ private RaftActorContext context;
+ private final DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+
+ @Before
+ public void setup() {
+ MockitoAnnotations.initMocks(this);
+
+ context = new RaftActorContextImpl(null, null, "test", new ElectionTermImpl(mockPersistence, "test", LOG),
+ -1, -1, Collections.<String,String>emptyMap(), configParams, LOG);
+
+ support = new RaftActorRecoverySupport(mockPersistence, context , mockBehavior, mockCohort);
+
+ doReturn(true).when(mockPersistence).isRecoveryApplicable();
+
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior));
+ }
+
+ private void sendMessageToSupport(Object message) {
+ sendMessageToSupport(message, false);
+ }
+
+ private void sendMessageToSupport(Object message, boolean expComplete) {
+ boolean complete = support.handleRecoveryMessage(message);
+ assertEquals("complete", expComplete, complete);
+ }
+
+ @Test
+ public void testOnReplicatedLogEntry() {
+ MockRaftActorContext.MockReplicatedLogEntry logEntry = new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1", 5));
+
+ sendMessageToSupport(logEntry);
+
+ assertEquals("Journal log size", 1, context.getReplicatedLog().size());
+ assertEquals("Journal data size", 5, context.getReplicatedLog().dataSize());
+ assertEquals("Last index", 1, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", -1, context.getLastApplied());
+ assertEquals("Commit index", -1, context.getCommitIndex());
+ assertEquals("Snapshot term", -1, context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Snapshot index", -1, context.getReplicatedLog().getSnapshotIndex());
+ }
+
+ @Test
+ public void testOnApplyJournalEntries() {
+ configParams.setJournalRecoveryLogBatchSize(5);
+
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 0, new MockRaftActorContext.MockPayload("0")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 2, new MockRaftActorContext.MockPayload("2")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 3, new MockRaftActorContext.MockPayload("3")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 4, new MockRaftActorContext.MockPayload("4")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 5, new MockRaftActorContext.MockPayload("5")));
+
+ sendMessageToSupport(new ApplyJournalEntries(2));
+
+ assertEquals("Last applied", 2, context.getLastApplied());
+ assertEquals("Commit index", 2, context.getCommitIndex());
+
+ sendMessageToSupport(new ApplyJournalEntries(4));
+
+ assertEquals("Last applied", 4, context.getLastApplied());
+ assertEquals("Last applied", 4, context.getLastApplied());
+
+ sendMessageToSupport(new ApplyJournalEntries(5));
+
+ assertEquals("Last index", 5, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", 5, context.getLastApplied());
+ assertEquals("Commit index", 5, context.getCommitIndex());
+ assertEquals("Snapshot term", -1, context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Snapshot index", -1, context.getReplicatedLog().getSnapshotIndex());
+
+ InOrder inOrder = Mockito.inOrder(mockCohort);
+ inOrder.verify(mockCohort).startLogRecoveryBatch(5);
+
+ for(int i = 0; i < replicatedLog.size() - 1; i++) {
+ inOrder.verify(mockCohort).appendRecoveredLogEntry(replicatedLog.get(i).getData());
+ }
+
+ inOrder.verify(mockCohort).applyCurrentLogRecoveryBatch();
+ inOrder.verify(mockCohort).startLogRecoveryBatch(5);
+ inOrder.verify(mockCohort).appendRecoveredLogEntry(replicatedLog.get(replicatedLog.size() - 1).getData());
+
+ inOrder.verifyNoMoreInteractions();
+ }
+
+ @Test
+ public void testOnApplyLogEntries() {
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 0, new MockRaftActorContext.MockPayload("0")));
+
+ sendMessageToSupport(new ApplyLogEntries(0));
+
+ assertEquals("Last applied", 0, context.getLastApplied());
+ assertEquals("Commit index", 0, context.getCommitIndex());
+ }
+
+ @Test
+ public void testOnSnapshotOffer() {
+
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 2, new MockRaftActorContext.MockPayload("2")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 3, new MockRaftActorContext.MockPayload("3")));
+
+ byte[] snapshotBytes = {1,2,3,4,5};
+
+ ReplicatedLogEntry unAppliedEntry1 = new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 4, new MockRaftActorContext.MockPayload("4", 4));
+
+ ReplicatedLogEntry unAppliedEntry2 = new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 5, new MockRaftActorContext.MockPayload("5", 5));
+
+ int lastAppliedDuringSnapshotCapture = 3;
+ int lastIndexDuringSnapshotCapture = 5;
+
+ Snapshot snapshot = Snapshot.create(snapshotBytes, Arrays.asList(unAppliedEntry1, unAppliedEntry2),
+ lastIndexDuringSnapshotCapture, 1, lastAppliedDuringSnapshotCapture, 1);
+
+ SnapshotMetadata metadata = new SnapshotMetadata("test", 6, 12345);
+ SnapshotOffer snapshotOffer = new SnapshotOffer(metadata , snapshot);
+
+ sendMessageToSupport(snapshotOffer);
+
+ assertEquals("Journal log size", 2, context.getReplicatedLog().size());
+ assertEquals("Journal data size", 9, context.getReplicatedLog().dataSize());
+ assertEquals("Last index", lastIndexDuringSnapshotCapture, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", lastAppliedDuringSnapshotCapture, context.getLastApplied());
+ assertEquals("Commit index", lastAppliedDuringSnapshotCapture, context.getCommitIndex());
+ assertEquals("Snapshot term", 1, context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Snapshot index", lastAppliedDuringSnapshotCapture, context.getReplicatedLog().getSnapshotIndex());
+
+ verify(mockCohort).applyRecoverySnapshot(snapshotBytes);
+ }
+
+ @Test
+ public void testOnRecoveryCompletedWithRemainingBatch() {
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 0, new MockRaftActorContext.MockPayload("0")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1")));
+
+ sendMessageToSupport(new ApplyJournalEntries(1));
+
+ sendMessageToSupport(RecoveryCompleted.getInstance(), true);
+
+ assertEquals("Last applied", 1, context.getLastApplied());
+ assertEquals("Commit index", 1, context.getCommitIndex());
+
+ InOrder inOrder = Mockito.inOrder(mockCohort);
+ inOrder.verify(mockCohort).startLogRecoveryBatch(anyInt());
+
+ for(int i = 0; i < replicatedLog.size(); i++) {
+ inOrder.verify(mockCohort).appendRecoveredLogEntry(replicatedLog.get(i).getData());
+ }
+
+ inOrder.verify(mockCohort).applyCurrentLogRecoveryBatch();
+
+ inOrder.verifyNoMoreInteractions();
+ }
+
+ @Test
+ public void testOnRecoveryCompletedWithNoRemainingBatch() {
+ sendMessageToSupport(RecoveryCompleted.getInstance(), true);
+
+ verifyNoMoreInteractions(mockCohort);
+ }
+
+ @Test
+ public void testOnDeprecatedDeleteEntries() {
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 0, new MockRaftActorContext.MockPayload("0")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 2, new MockRaftActorContext.MockPayload("2")));
+
+ sendMessageToSupport(new org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries(1));
+
+ assertEquals("Journal log size", 1, context.getReplicatedLog().size());
+ assertEquals("Last index", 0, context.getReplicatedLog().lastIndex());
+ }
+
+ @Test
+ public void testOnDeleteEntries() {
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 0, new MockRaftActorContext.MockPayload("0")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 2, new MockRaftActorContext.MockPayload("2")));
+
+ sendMessageToSupport(new DeleteEntries(1));
+
+ assertEquals("Journal log size", 1, context.getReplicatedLog().size());
+ assertEquals("Last index", 0, context.getReplicatedLog().lastIndex());
+ }
+
+ @Test
+ public void testUpdateElectionTerm() {
+
+ sendMessageToSupport(new UpdateElectionTerm(5, "member2"));
+
+ assertEquals("Current term", 5, context.getTermInformation().getCurrentTerm());
+ assertEquals("Voted For", "member2", context.getTermInformation().getVotedFor());
+ }
+
+ @Test
+ public void testRecoveryWithPersistenceDisabled() {
+ doReturn(false).when(mockPersistence).isRecoveryApplicable();
+
+ Snapshot snapshot = Snapshot.create(new byte[]{1}, Collections.<ReplicatedLogEntry>emptyList(), 3, 1, 3, 1);
+ SnapshotOffer snapshotOffer = new SnapshotOffer(new SnapshotMetadata("test", 6, 12345), snapshot);
+
+ sendMessageToSupport(snapshotOffer);
+
+ sendMessageToSupport(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 4, new MockRaftActorContext.MockPayload("4")));
+ sendMessageToSupport(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 5, new MockRaftActorContext.MockPayload("5")));
+
+ sendMessageToSupport(new ApplyJournalEntries(4));
+
+ sendMessageToSupport(new DeleteEntries(5));
+
+ sendMessageToSupport(new org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries(5));
+
+ assertEquals("Journal log size", 0, context.getReplicatedLog().size());
+ assertEquals("Last index", -1, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", -1, context.getLastApplied());
+ assertEquals("Commit index", -1, context.getCommitIndex());
+ assertEquals("Snapshot term", -1, context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Snapshot index", -1, context.getReplicatedLog().getSnapshotIndex());
+
+ sendMessageToSupport(new UpdateElectionTerm(5, "member2"));
+
+ assertEquals("Current term", 0, context.getTermInformation().getCurrentTerm());
+ assertEquals("Voted For", null, context.getTermInformation().getVotedFor());
+
+ sendMessageToSupport(RecoveryCompleted.getInstance(), true);
+
+ verifyNoMoreInteractions(mockCohort);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.same;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.verify;
+import akka.actor.ActorRef;
+import akka.japi.Procedure;
+import akka.persistence.SaveSnapshotFailure;
+import akka.persistence.SaveSnapshotSuccess;
+import akka.persistence.SnapshotMetadata;
+import java.util.Arrays;
+import java.util.Collections;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for RaftActorSnapshotMessageSupport.
+ *
+ * @author Thomas Pantelis
+ */
+public class RaftActorSnapshotMessageSupportTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
+
+ @Mock
+ private DataPersistenceProvider mockPersistence;
+
+ @Mock
+ private RaftActorBehavior mockBehavior;
+
+ @Mock
+ private RaftActorSnapshotCohort mockCohort;
+
+ @Mock
+ private SnapshotManager mockSnapshotManager;
+
+ @Mock
+ ActorRef mockRaftActorRef;
+
+ private RaftActorSnapshotMessageSupport support;
+
+ private RaftActorContext context;
+ private final DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+
+ @Before
+ public void setup() {
+ MockitoAnnotations.initMocks(this);
+
+ context = new RaftActorContextImpl(mockRaftActorRef, null, "test",
+ new ElectionTermImpl(mockPersistence, "test", LOG),
+ -1, -1, Collections.<String,String>emptyMap(), configParams, LOG) {
+ @Override
+ public SnapshotManager getSnapshotManager() {
+ return mockSnapshotManager;
+ }
+ };
+
+ support = new RaftActorSnapshotMessageSupport(mockPersistence, context, mockBehavior, mockCohort);
+
+ doReturn(true).when(mockPersistence).isRecoveryApplicable();
+
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior));
+ }
+
+ private void sendMessageToSupport(Object message) {
+ sendMessageToSupport(message, true);
+ }
+
+ private void sendMessageToSupport(Object message, boolean expHandled) {
+ boolean handled = support.handleSnapshotMessage(message);
+ assertEquals("complete", expHandled, handled);
+ }
+
+ @Test
+ public void testOnApplySnapshot() {
+
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockReplicatedLogEntry(1, 1, new MockPayload("1")));
+
+ byte[] snapshotBytes = {1,2,3,4,5};
+
+ ReplicatedLogEntry unAppliedEntry = new MockReplicatedLogEntry(1, 2, new MockPayload("2"));
+
+ long lastAppliedDuringSnapshotCapture = 1;
+ long lastIndexDuringSnapshotCapture = 2;
+
+ Snapshot snapshot = Snapshot.create(snapshotBytes, Arrays.asList(unAppliedEntry),
+ lastIndexDuringSnapshotCapture, 1, lastAppliedDuringSnapshotCapture, 1);
+
+ sendMessageToSupport(new ApplySnapshot(snapshot));
+
+ assertEquals("Journal log size", 1, context.getReplicatedLog().size());
+ assertEquals("Last index", lastIndexDuringSnapshotCapture, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", lastAppliedDuringSnapshotCapture, context.getLastApplied());
+ assertEquals("Commit index", -1, context.getCommitIndex());
+ assertEquals("Snapshot term", 1, context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Snapshot index", lastAppliedDuringSnapshotCapture, context.getReplicatedLog().getSnapshotIndex());
+
+ verify(mockCohort).applySnapshot(snapshotBytes);
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testOnCaptureSnapshot() throws Exception {
+
+ sendMessageToSupport(new CaptureSnapshot(3, 1, 2, 1, 2, 1));
+
+ ArgumentCaptor<Procedure> procedure = ArgumentCaptor.forClass(Procedure.class);
+ verify(mockSnapshotManager).create(procedure.capture());
+
+ procedure.getValue().apply(null);
+
+ verify(mockCohort).createSnapshot(same(mockRaftActorRef));
+ }
+
+ @Test
+ public void testOnCaptureSnapshotReply() {
+
+ byte[] snapshot = {1,2,3,4,5};
+ sendMessageToSupport(new CaptureSnapshotReply(snapshot));
+
+ verify(mockSnapshotManager).persist(same(mockPersistence), same(snapshot), same(mockBehavior), anyLong());
+ }
+
+ @Test
+ public void testOnSaveSnapshotSuccess() {
+
+ long sequenceNumber = 100;
+ sendMessageToSupport(new SaveSnapshotSuccess(new SnapshotMetadata("foo", sequenceNumber, 1234L)));
+
+ verify(mockSnapshotManager).commit(mockPersistence, sequenceNumber);
+ }
+
+ @Test
+ public void testOnSaveSnapshotFailure() {
+
+ sendMessageToSupport(new SaveSnapshotFailure(new SnapshotMetadata("foo", 100, 1234L),
+ new Throwable("mock")));
+
+ verify(mockSnapshotManager).rollback();
+ }
+
+ @Test
+ public void testOnCommitSnapshot() {
+
+ sendMessageToSupport(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT);
+
+ verify(mockSnapshotManager).commit(mockPersistence, -1);
+ }
+
+ @Test
+ public void testUnhandledMessage() {
+
+ sendMessageToSupport("unhandled", false);
+ }
+}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.same;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.actor.Terminated;
-import akka.japi.Creator;
import akka.japi.Procedure;
-import akka.pattern.Patterns;
-import akka.persistence.RecoveryCompleted;
import akka.persistence.SaveSnapshotFailure;
import akka.persistence.SaveSnapshotSuccess;
import akka.persistence.SnapshotMetadata;
import akka.persistence.SnapshotOffer;
-import akka.persistence.SnapshotSelectionCriteria;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
-import akka.util.Timeout;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Uninterruptibles;
import com.google.protobuf.ByteString;
-import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.NonPersistentDataProvider;
-import org.opendaylight.controller.cluster.datastore.DataPersistenceProviderMonitor;
import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
import org.opendaylight.controller.cluster.notifications.RoleChanged;
import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
-import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
-import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
public class RaftActorTest extends AbstractActorTest {
InMemorySnapshotStore.clear();
}
- public static class MockRaftActor extends RaftActor {
-
- private final RaftActor delegate;
- private final CountDownLatch recoveryComplete = new CountDownLatch(1);
- private final List<Object> state;
- private ActorRef roleChangeNotifier;
- private final CountDownLatch initializeBehaviorComplete = new CountDownLatch(1);
-
- public static final class MockRaftActorCreator implements Creator<MockRaftActor> {
- private static final long serialVersionUID = 1L;
- private final Map<String, String> peerAddresses;
- private final String id;
- private final Optional<ConfigParams> config;
- private final DataPersistenceProvider dataPersistenceProvider;
- private final ActorRef roleChangeNotifier;
-
- private MockRaftActorCreator(Map<String, String> peerAddresses, String id,
- Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider,
- ActorRef roleChangeNotifier) {
- this.peerAddresses = peerAddresses;
- this.id = id;
- this.config = config;
- this.dataPersistenceProvider = dataPersistenceProvider;
- this.roleChangeNotifier = roleChangeNotifier;
- }
-
- @Override
- public MockRaftActor create() throws Exception {
- MockRaftActor mockRaftActor = new MockRaftActor(id, peerAddresses, config,
- dataPersistenceProvider);
- mockRaftActor.roleChangeNotifier = this.roleChangeNotifier;
- return mockRaftActor;
- }
- }
-
- public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config,
- DataPersistenceProvider dataPersistenceProvider) {
- super(id, peerAddresses, config);
- state = new ArrayList<>();
- this.delegate = mock(RaftActor.class);
- if(dataPersistenceProvider == null){
- setPersistence(true);
- } else {
- setPersistence(dataPersistenceProvider);
- }
- }
-
- public void waitForRecoveryComplete() {
- try {
- assertEquals("Recovery complete", true, recoveryComplete.await(5, TimeUnit.SECONDS));
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
-
- public void waitForInitializeBehaviorComplete() {
- try {
- assertEquals("Behavior initialized", true, initializeBehaviorComplete.await(5, TimeUnit.SECONDS));
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
-
-
- public void waitUntilLeader(){
- for(int i = 0;i < 10; i++){
- if(isLeader()){
- break;
- }
- Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
- }
- }
-
- public List<Object> getState() {
- return state;
- }
-
- public static Props props(final String id, final Map<String, String> peerAddresses,
- Optional<ConfigParams> config){
- return Props.create(new MockRaftActorCreator(peerAddresses, id, config, null, null));
- }
-
- public static Props props(final String id, final Map<String, String> peerAddresses,
- Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider){
- return Props.create(new MockRaftActorCreator(peerAddresses, id, config, dataPersistenceProvider, null));
- }
-
- public static Props props(final String id, final Map<String, String> peerAddresses,
- Optional<ConfigParams> config, ActorRef roleChangeNotifier){
- return Props.create(new MockRaftActorCreator(peerAddresses, id, config, null, roleChangeNotifier));
- }
-
- public static Props props(final String id, final Map<String, String> peerAddresses,
- Optional<ConfigParams> config, ActorRef roleChangeNotifier,
- DataPersistenceProvider dataPersistenceProvider){
- return Props.create(new MockRaftActorCreator(peerAddresses, id, config, dataPersistenceProvider, roleChangeNotifier));
- }
-
-
- @Override protected void applyState(ActorRef clientActor, String identifier, Object data) {
- delegate.applyState(clientActor, identifier, data);
- LOG.info("{}: applyState called", persistenceId());
- }
-
- @Override
- protected void startLogRecoveryBatch(int maxBatchSize) {
- }
-
- @Override
- protected void appendRecoveredLogEntry(Payload data) {
- state.add(data);
- }
-
- @Override
- protected void applyCurrentLogRecoveryBatch() {
- }
-
- @Override
- protected void onRecoveryComplete() {
- delegate.onRecoveryComplete();
- recoveryComplete.countDown();
- }
-
- @Override
- protected void initializeBehavior() {
- super.initializeBehavior();
- initializeBehaviorComplete.countDown();
- }
-
- @Override
- protected void applyRecoverySnapshot(byte[] bytes) {
- delegate.applyRecoverySnapshot(bytes);
- try {
- Object data = toObject(bytes);
- if (data instanceof List) {
- state.addAll((List<?>) data);
- }
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
-
- @Override protected void createSnapshot() {
- LOG.info("{}: createSnapshot called", persistenceId());
- delegate.createSnapshot();
- }
-
- @Override protected void applySnapshot(byte [] snapshot) {
- LOG.info("{}: applySnapshot called", persistenceId());
- delegate.applySnapshot(snapshot);
- }
-
- @Override protected void onStateChanged() {
- delegate.onStateChanged();
- }
-
- @Override
- protected Optional<ActorRef> getRoleChangeNotifier() {
- return Optional.fromNullable(roleChangeNotifier);
- }
-
- @Override public String persistenceId() {
- return this.getId();
- }
-
- private Object toObject(byte[] bs) throws ClassNotFoundException, IOException {
- Object obj = null;
- ByteArrayInputStream bis = null;
- ObjectInputStream ois = null;
- try {
- bis = new ByteArrayInputStream(bs);
- ois = new ObjectInputStream(bis);
- obj = ois.readObject();
- } finally {
- if (bis != null) {
- bis.close();
- }
- if (ois != null) {
- ois.close();
- }
- }
- return obj;
- }
-
- public ReplicatedLog getReplicatedLog(){
- return this.getRaftActorContext().getReplicatedLog();
- }
-
- }
-
-
- public static class RaftActorTestKit extends JavaTestKit {
- private final ActorRef raftActor;
-
- public RaftActorTestKit(ActorSystem actorSystem, String actorName) {
- super(actorSystem);
-
- raftActor = this.getSystem().actorOf(MockRaftActor.props(actorName,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>absent()), actorName);
-
- }
-
-
- public ActorRef getRaftActor() {
- return raftActor;
- }
-
- public boolean waitForLogMessage(final Class<?> logEventClass, String message){
- // Wait for a specific log message to show up
- return
- new JavaTestKit.EventFilter<Boolean>(logEventClass
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from(raftActor.path().toString())
- .message(message)
- .occurrences(1).exec();
-
-
- }
-
- protected void waitUntilLeader(){
- waitUntilLeader(raftActor);
- }
-
- public static void waitUntilLeader(ActorRef actorRef) {
- FiniteDuration duration = Duration.create(100, TimeUnit.MILLISECONDS);
- for(int i = 0; i < 20 * 5; i++) {
- Future<Object> future = Patterns.ask(actorRef, new FindLeader(), new Timeout(duration));
- try {
- FindLeaderReply resp = (FindLeaderReply) Await.result(future, duration);
- if(resp.getLeaderActor() != null) {
- return;
- }
- } catch(TimeoutException e) {
- } catch(Exception e) {
- System.err.println("FindLeader threw ex");
- e.printStackTrace();
- }
-
-
- Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
- }
-
- Assert.fail("Leader not found for actorRef " + actorRef.path());
- }
-
- }
-
-
@Test
public void testConstruction() {
new RaftActorTestKit(getSystem(), "testConstruction").waitUntilLeader();
}
@Test
- public void testRaftActorRecovery() throws Exception {
+ public void testRaftActorRecoveryWithPersistenceEnabled() throws Exception {
new JavaTestKit(getSystem()) {{
String persistenceId = factory.generateActorId("follower-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
// Set the heartbeat interval high to essentially disable election otherwise the test
// may fail if the actor is switched to Leader and the commitIndex is set to the last
// log entry.
config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
ActorRef followerActor = factory.createActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
+ ImmutableMap.<String, String>builder().put("member1", "address").build(),
+ Optional.<ConfigParams>of(config)), persistenceId);
watch(followerActor);
// add more entries after snapshot is taken
List<ReplicatedLogEntry> entries = new ArrayList<>();
ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
- new MockRaftActorContext.MockPayload("F"));
+ new MockRaftActorContext.MockPayload("F", 2));
ReplicatedLogEntry entry3 = new MockRaftActorContext.MockReplicatedLogEntry(1, 6,
- new MockRaftActorContext.MockPayload("G"));
+ new MockRaftActorContext.MockPayload("G", 3));
ReplicatedLogEntry entry4 = new MockRaftActorContext.MockReplicatedLogEntry(1, 7,
- new MockRaftActorContext.MockPayload("H"));
+ new MockRaftActorContext.MockPayload("H", 4));
entries.add(entry2);
entries.add(entry3);
entries.add(entry4);
MockRaftActor.props(persistenceId, Collections.<String, String>emptyMap(),
Optional.<ConfigParams>of(config)));
- ref.underlyingActor().waitForRecoveryComplete();
+ MockRaftActor mockRaftActor = ref.underlyingActor();
+
+ mockRaftActor.waitForRecoveryComplete();
- RaftActorContext context = ref.underlyingActor().getRaftActorContext();
+ RaftActorContext context = mockRaftActor.getRaftActorContext();
assertEquals("Journal log size", snapshotUnappliedEntries.size() + entries.size(),
context.getReplicatedLog().size());
+ assertEquals("Journal data size", 10, context.getReplicatedLog().dataSize());
assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
assertEquals("Last applied", lastAppliedToState, context.getLastApplied());
assertEquals("Commit index", lastAppliedToState, context.getCommitIndex());
- assertEquals("Recovered state size", 6, ref.underlyingActor().getState().size());
+ assertEquals("Recovered state size", 6, mockRaftActor.getState().size());
+
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
+ assertEquals("getRaftState", RaftState.Follower, mockRaftActor.getRaftState());
}};
}
@Test
- public void testRaftActorRecoveryWithPreLithuimApplyLogEntries() throws Exception {
+ public void testRaftActorRecoveryWithPersistenceDisabled() throws Exception {
new JavaTestKit(getSystem()) {{
- String persistenceId = factory.generateActorId("leader-");
+ String persistenceId = factory.generateActorId("follower-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- // Setup the persisted journal with some entries
- ReplicatedLogEntry entry0 = new MockRaftActorContext.MockReplicatedLogEntry(1, 0,
- new MockRaftActorContext.MockPayload("zero"));
- ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 1,
- new MockRaftActorContext.MockPayload("oen"));
- ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 2,
- new MockRaftActorContext.MockPayload("two"));
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- long seqNr = 1;
- InMemoryJournal.addEntry(persistenceId, seqNr++, entry0);
- InMemoryJournal.addEntry(persistenceId, seqNr++, entry1);
- InMemoryJournal.addEntry(persistenceId, seqNr++, new ApplyLogEntries(1));
- InMemoryJournal.addEntry(persistenceId, seqNr++, entry2);
+ TestActorRef<MockRaftActor> ref = factory.createTestActor(MockRaftActor.props(persistenceId,
+ ImmutableMap.<String, String>builder().put("member1", "address").build(),
+ Optional.<ConfigParams>of(config), new NonPersistentDataProvider()), persistenceId);
- int lastAppliedToState = 1;
- int lastIndex = 2;
+ MockRaftActor mockRaftActor = ref.underlyingActor();
- //reinstate the actor
- TestActorRef<MockRaftActor> leaderActor = factory.createTestActor(
- MockRaftActor.props(persistenceId, Collections.<String, String>emptyMap(),
- Optional.<ConfigParams>of(config)));
+ mockRaftActor.waitForRecoveryComplete();
- leaderActor.underlyingActor().waitForRecoveryComplete();
+ mockRaftActor.waitForInitializeBehaviorComplete();
- RaftActorContext context = leaderActor.underlyingActor().getRaftActorContext();
- assertEquals("Journal log size", 3, context.getReplicatedLog().size());
- assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
- assertEquals("Last applied", lastAppliedToState, context.getLastApplied());
- assertEquals("Commit index", lastAppliedToState, context.getCommitIndex());
+ assertEquals("getRaftState", RaftState.Follower, mockRaftActor.getRaftState());
}};
}
- /**
- * This test verifies that when recovery is applicable (typically when persistence is true) the RaftActor does
- * process recovery messages
- *
- * @throws Exception
- */
-
@Test
- public void testHandleRecoveryWhenDataPersistenceRecoveryApplicable() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- // Wait for akka's recovery to complete so it doesn't interfere.
- mockRaftActor.waitForRecoveryComplete();
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
-
- Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
- Lists.<ReplicatedLogEntry>newArrayList(), 3, 1, 3, 1);
-
- mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
-
- verify(mockRaftActor.delegate).applyRecoverySnapshot(eq(snapshotBytes.toByteArray()));
-
- mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(0, 1, new MockRaftActorContext.MockPayload("A")));
-
- ReplicatedLog replicatedLog = mockRaftActor.getReplicatedLog();
-
- assertEquals("add replicated log entry", 1, replicatedLog.size());
+ public void testRaftActorForwardsToRaftActorRecoverySupport() {
+ String persistenceId = factory.generateActorId("leader-");
- mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(1, 1, new MockRaftActorContext.MockPayload("A")));
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
- assertEquals("add replicated log entry", 2, replicatedLog.size());
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- mockRaftActor.onReceiveRecover(new ApplyJournalEntries(1));
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
- assertEquals("commit index 1", 1, mockRaftActor.getRaftActorContext().getCommitIndex());
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
- // The snapshot had 4 items + we added 2 more items during the test
- // We start removing from 5 and we should get 1 item in the replicated log
- mockRaftActor.onReceiveRecover(new RaftActor.DeleteEntries(5));
+ // Wait for akka's recovery to complete so it doesn't interfere.
+ mockRaftActor.waitForRecoveryComplete();
- assertEquals("remove log entries", 1, replicatedLog.size());
-
- mockRaftActor.onReceiveRecover(new UpdateElectionTerm(10, "foobar"));
-
- assertEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
- assertEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
-
- mockRaftActor.onReceiveRecover(mock(RecoveryCompleted.class));
-
- }};
- }
-
- /**
- * This test verifies that when recovery is not applicable (typically when persistence is false) the RaftActor does
- * not process recovery messages
- *
- * @throws Exception
- */
- @Test
- public void testHandleRecoveryWhenDataPersistenceRecoveryNotApplicable() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), new DataPersistenceProviderMonitor()), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- // Wait for akka's recovery to complete so it doesn't interfere.
- mockRaftActor.waitForRecoveryComplete();
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
-
- Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
- Lists.<ReplicatedLogEntry>newArrayList(), 3, 1, 3, 1);
-
- mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
-
- verify(mockRaftActor.delegate, times(0)).applyRecoverySnapshot(any(byte[].class));
-
- mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(0, 1, new MockRaftActorContext.MockPayload("A")));
-
- ReplicatedLog replicatedLog = mockRaftActor.getReplicatedLog();
-
- assertEquals("add replicated log entry", 0, replicatedLog.size());
-
- mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(1, 1, new MockRaftActorContext.MockPayload("A")));
-
- assertEquals("add replicated log entry", 0, replicatedLog.size());
-
- mockRaftActor.onReceiveRecover(new ApplyJournalEntries(1));
-
- assertEquals("commit index -1", -1, mockRaftActor.getRaftActorContext().getCommitIndex());
-
- mockRaftActor.onReceiveRecover(new RaftActor.DeleteEntries(2));
-
- assertEquals("remove log entries", 0, replicatedLog.size());
-
- mockRaftActor.onReceiveRecover(new UpdateElectionTerm(10, "foobar"));
-
- assertNotEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
- assertNotEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
-
- mockRaftActor.onReceiveRecover(mock(RecoveryCompleted.class));
- }};
- }
-
-
- @Test
- public void testUpdatingElectionTermCallsDataPersistence() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
+ RaftActorRecoverySupport mockSupport = mock(RaftActorRecoverySupport.class);
+ mockRaftActor.setRaftActorRecoverySupport(mockSupport );
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ Snapshot snapshot = Snapshot.create(new byte[]{1}, Collections.<ReplicatedLogEntry>emptyList(), 3, 1, 3, 1);
+ SnapshotOffer snapshotOffer = new SnapshotOffer(new SnapshotMetadata("test", 6, 12345), snapshot);
+ mockRaftActor.handleRecover(snapshotOffer);
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ MockRaftActorContext.MockReplicatedLogEntry logEntry = new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1", 5));
+ mockRaftActor.handleRecover(logEntry);
- CountDownLatch persistLatch = new CountDownLatch(1);
- DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
- dataPersistenceProviderMonitor.setPersistLatch(persistLatch);
+ ApplyJournalEntries applyJournalEntries = new ApplyJournalEntries(2);
+ mockRaftActor.handleRecover(applyJournalEntries);
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+ ApplyLogEntries applyLogEntries = new ApplyLogEntries(0);
+ mockRaftActor.handleRecover(applyLogEntries);
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ DeleteEntries deleteEntries = new DeleteEntries(1);
+ mockRaftActor.handleRecover(deleteEntries);
- mockRaftActor.waitForInitializeBehaviorComplete();
+ org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries deprecatedDeleteEntries =
+ new org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries(1);
+ mockRaftActor.handleRecover(deprecatedDeleteEntries);
- mockRaftActor.getRaftActorContext().getTermInformation().updateAndPersist(10, "foobar");
+ UpdateElectionTerm updateElectionTerm = new UpdateElectionTerm(5, "member2");
+ mockRaftActor.handleRecover(updateElectionTerm);
- assertEquals("Persist called", true, persistLatch.await(5, TimeUnit.SECONDS));
- }
- };
+ verify(mockSupport).handleRecoveryMessage(same(snapshotOffer));
+ verify(mockSupport).handleRecoveryMessage(same(logEntry));
+ verify(mockSupport).handleRecoveryMessage(same(applyJournalEntries));
+ verify(mockSupport).handleRecoveryMessage(same(applyLogEntries));
+ verify(mockSupport).handleRecoveryMessage(same(deleteEntries));
+ verify(mockSupport).handleRecoveryMessage(same(deprecatedDeleteEntries));
+ verify(mockSupport).handleRecoveryMessage(same(updateElectionTerm));
}
@Test
- public void testAddingReplicatedLogEntryCallsDataPersistence() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+ public void testRaftActorForwardsToRaftActorSnapshotMessageSupport() {
+ String persistenceId = factory.generateActorId("leader-");
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
- mockRaftActor.waitForInitializeBehaviorComplete();
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- MockRaftActorContext.MockReplicatedLogEntry logEntry = new MockRaftActorContext.MockReplicatedLogEntry(10, 10, mock(Payload.class));
+ RaftActorSnapshotMessageSupport mockSupport = mock(RaftActorSnapshotMessageSupport.class);
- mockRaftActor.getRaftActorContext().getReplicatedLog().appendAndPersist(logEntry);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), mockSupport), persistenceId);
- verify(dataPersistenceProvider).persist(eq(logEntry), any(Procedure.class));
- }
- };
- }
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
- @Test
- public void testRemovingReplicatedLogEntryCallsDataPersistence() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
+ // Wait for akka's recovery to complete so it doesn't interfere.
+ mockRaftActor.waitForRecoveryComplete();
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ ApplySnapshot applySnapshot = new ApplySnapshot(mock(Snapshot.class));
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(applySnapshot));
+ mockRaftActor.handleCommand(applySnapshot);
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ CaptureSnapshot captureSnapshot = new CaptureSnapshot(1, 1, 1, 1, 0, 1);
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(captureSnapshot));
+ mockRaftActor.handleCommand(captureSnapshot);
- DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ CaptureSnapshotReply captureSnapshotReply = new CaptureSnapshotReply(new byte[0]);
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(captureSnapshotReply));
+ mockRaftActor.handleCommand(captureSnapshotReply);
- mockRaftActor.waitForInitializeBehaviorComplete();
+ SaveSnapshotSuccess saveSnapshotSuccess = new SaveSnapshotSuccess(mock(SnapshotMetadata.class));
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotSuccess));
+ mockRaftActor.handleCommand(saveSnapshotSuccess);
- mockRaftActor.waitUntilLeader();
+ SaveSnapshotFailure saveSnapshotFailure = new SaveSnapshotFailure(mock(SnapshotMetadata.class), new Throwable());
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotFailure));
+ mockRaftActor.handleCommand(saveSnapshotFailure);
- mockRaftActor.getReplicatedLog().appendAndPersist(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT));
+ mockRaftActor.handleCommand(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT);
- mockRaftActor.getRaftActorContext().getReplicatedLog().removeFromAndPersist(0);
-
- verify(dataPersistenceProvider, times(3)).persist(anyObject(), any(Procedure.class));
- }
- };
+ verify(mockSupport).handleSnapshotMessage(same(applySnapshot));
+ verify(mockSupport).handleSnapshotMessage(same(captureSnapshot));
+ verify(mockSupport).handleSnapshotMessage(same(captureSnapshotReply));
+ verify(mockSupport).handleSnapshotMessage(same(saveSnapshotSuccess));
+ verify(mockSupport).handleSnapshotMessage(same(saveSnapshotFailure));
+ verify(mockSupport).handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT));
}
@Test
};
}
- @Test
- public void testCaptureSnapshotReplyCallsDataPersistence() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
- MockRaftActor.props(persistenceId, Collections.<String, String>emptyMap(),
- Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- mockRaftActor.waitForInitializeBehaviorComplete();
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
-
- RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
-
- raftActorContext.getSnapshotManager().capture(
- new MockRaftActorContext.MockReplicatedLogEntry(1, -1,
- new MockRaftActorContext.MockPayload("D")), -1);
-
- mockRaftActor.setCurrentBehavior(new Leader(raftActorContext));
-
- mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
-
- verify(dataPersistenceProvider).saveSnapshot(anyObject());
-
- }
- };
- }
-
- @Test
- public void testSaveSnapshotSuccessCallsDataPersistence() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- ImmutableMap.of("leader", "fake/path"), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- mockRaftActor.waitForInitializeBehaviorComplete();
- MockRaftActorContext.MockReplicatedLogEntry lastEntry = new MockRaftActorContext.MockReplicatedLogEntry(1, 4, mock(Payload.class));
-
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 1, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 2, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 3, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(lastEntry);
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
-
- RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
- mockRaftActor.setCurrentBehavior(new Follower(raftActorContext));
-
- long replicatedToAllIndex = 1;
-
- mockRaftActor.getRaftActorContext().getSnapshotManager().capture(lastEntry, replicatedToAllIndex);
-
- verify(mockRaftActor.delegate).createSnapshot();
-
- mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
-
- mockRaftActor.onReceiveCommand(new SaveSnapshotSuccess(new SnapshotMetadata("foo", 100, 100)));
-
- verify(dataPersistenceProvider).deleteSnapshots(any(SnapshotSelectionCriteria.class));
-
- verify(dataPersistenceProvider).deleteMessages(100);
-
- assertEquals(3, mockRaftActor.getReplicatedLog().size());
- assertEquals(1, mockRaftActor.getCurrentBehavior().getReplicatedToAllIndex());
-
- assertNotNull(mockRaftActor.getReplicatedLog().get(2));
- assertNotNull(mockRaftActor.getReplicatedLog().get(3));
- assertNotNull(mockRaftActor.getReplicatedLog().get(4));
-
- // Index 2 will not be in the log because it was removed due to snapshotting
- assertNull(mockRaftActor.getReplicatedLog().get(1));
- assertNull(mockRaftActor.getReplicatedLog().get(0));
-
- }
- };
- }
-
@Test
public void testApplyState() throws Exception {
mockRaftActor.onReceiveCommand(new ApplyState(mockActorRef, "apply-state", entry));
- verify(mockRaftActor.delegate).applyState(eq(mockActorRef), eq("apply-state"), anyObject());
-
- }
- };
- }
-
- @Test
- public void testApplySnapshot() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- mockRaftActor.waitForInitializeBehaviorComplete();
-
- ReplicatedLog oldReplicatedLog = mockRaftActor.getReplicatedLog();
-
- oldReplicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
- oldReplicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1, 1, mock(Payload.class)));
- oldReplicatedLog.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 2,
- mock(Payload.class)));
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
-
- Snapshot snapshot = mock(Snapshot.class);
-
- doReturn(snapshotBytes.toByteArray()).when(snapshot).getState();
-
- doReturn(3L).when(snapshot).getLastAppliedIndex();
-
- mockRaftActor.onReceiveCommand(new ApplySnapshot(snapshot));
-
- verify(mockRaftActor.delegate).applySnapshot(eq(snapshot.getState()));
-
- assertTrue("The replicatedLog should have changed",
- oldReplicatedLog != mockRaftActor.getReplicatedLog());
-
- assertEquals("lastApplied should be same as in the snapshot",
- (Long) 3L, mockRaftActor.getLastApplied());
-
- assertEquals(0, mockRaftActor.getReplicatedLog().size());
-
- }
- };
- }
-
- @Test
- public void testSaveSnapshotFailure() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- mockRaftActor.waitForInitializeBehaviorComplete();
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
-
- RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
-
- mockRaftActor.setCurrentBehavior(new Leader(raftActorContext));
-
- raftActorContext.getSnapshotManager().capture(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 1,
- new MockRaftActorContext.MockPayload("D")), 1);
-
- mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
-
- mockRaftActor.onReceiveCommand(new SaveSnapshotFailure(new SnapshotMetadata("foobar", 10L, 1234L),
- new Exception()));
-
- assertEquals("Snapshot index should not have advanced because save snapshot failed", -1,
- mockRaftActor.getReplicatedLog().getSnapshotIndex());
+ verify(mockRaftActor.actorDelegate).applyState(eq(mockActorRef), eq("apply-state"), anyObject());
}
};
.capture(new MockRaftActorContext.MockReplicatedLogEntry(1, 6,
new MockRaftActorContext.MockPayload("x")), 4);
- verify(leaderActor.delegate).createSnapshot();
+ verify(leaderActor.snapshotCohortDelegate).createSnapshot(any(ActorRef.class));
assertEquals(8, leaderActor.getReplicatedLog().size());
new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
new MockRaftActorContext.MockPayload("D")), 4);
- verify(followerActor.delegate).createSnapshot();
+ verify(followerActor.snapshotCohortDelegate).createSnapshot(any(ActorRef.class));
assertEquals(6, followerActor.getReplicatedLog().size());
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.pattern.Patterns;
+import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.Collections;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.junit.Assert;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
+
+public class RaftActorTestKit extends JavaTestKit {
+ private final ActorRef raftActor;
+
+ public RaftActorTestKit(ActorSystem actorSystem, String actorName) {
+ super(actorSystem);
+
+ raftActor = this.getSystem().actorOf(MockRaftActor.props(actorName,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>absent()), actorName);
+
+ }
+
+
+ public ActorRef getRaftActor() {
+ return raftActor;
+ }
+
+ public boolean waitForLogMessage(final Class<?> logEventClass, String message){
+ // Wait for a specific log message to show up
+ return
+ new JavaTestKit.EventFilter<Boolean>(logEventClass
+ ) {
+ @Override
+ protected Boolean run() {
+ return true;
+ }
+ }.from(raftActor.path().toString())
+ .message(message)
+ .occurrences(1).exec();
+
+
+ }
+
+ protected void waitUntilLeader(){
+ waitUntilLeader(raftActor);
+ }
+
+ public static void waitUntilLeader(ActorRef actorRef) {
+ FiniteDuration duration = Duration.create(100, TimeUnit.MILLISECONDS);
+ for(int i = 0; i < 20 * 5; i++) {
+ Future<Object> future = Patterns.ask(actorRef, new FindLeader(), new Timeout(duration));
+ try {
+ FindLeaderReply resp = (FindLeaderReply) Await.result(future, duration);
+ if(resp.getLeaderActor() != null) {
+ return;
+ }
+ } catch(TimeoutException e) {
+ } catch(Exception e) {
+ System.err.println("FindLeader threw ex");
+ e.printStackTrace();
+ }
+
+
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.fail("Leader not found for actorRef " + actorRef.path());
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.same;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import akka.japi.Procedure;
+import com.google.common.base.Supplier;
+import java.util.Collections;
+import org.hamcrest.BaseMatcher;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.mockito.internal.matchers.Same;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for ReplicatedLogImpl.
+ *
+ * @author Thomas Pantelis
+ */
+public class ReplicatedLogImplTest {
+ private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
+
+ @Mock
+ private DataPersistenceProvider mockPersistence;
+
+ @Mock
+ private RaftActorBehavior mockBehavior;
+
+ @Mock
+ private SnapshotManager mockSnapshotManager;
+
+ private RaftActorContext context;
+ private final DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+
+ @Before
+ public void setup() {
+ MockitoAnnotations.initMocks(this);
+
+ context = new RaftActorContextImpl(null, null, "test",
+ new ElectionTermImpl(mockPersistence, "test", LOG),
+ -1, -1, Collections.<String,String>emptyMap(), configParams, LOG) {
+ @Override
+ public SnapshotManager getSnapshotManager() {
+ return mockSnapshotManager;
+ }
+ };
+ }
+
+ private void verifyPersist(Object message) throws Exception {
+ verifyPersist(message, new Same(message));
+ }
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ private void verifyPersist(Object message, Matcher<?> matcher) throws Exception {
+ ArgumentCaptor<Procedure> procedure = ArgumentCaptor.forClass(Procedure.class);
+ verify(mockPersistence).persist(Matchers.argThat(matcher), procedure.capture());
+
+ procedure.getValue().apply(message);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testAppendAndPersistExpectingNoCapture() throws Exception {
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior);
+
+ MockReplicatedLogEntry logEntry = new MockReplicatedLogEntry(1, 1, new MockPayload("1"));
+
+ log.appendAndPersist(logEntry);
+
+ verifyPersist(logEntry);
+
+ assertEquals("size", 1, log.size());
+
+ reset(mockPersistence);
+
+ Procedure<ReplicatedLogEntry> mockCallback = Mockito.mock(Procedure.class);
+ log.appendAndPersist(logEntry, mockCallback);
+
+ verifyPersist(logEntry);
+
+ verify(mockCallback).apply(same(logEntry));
+ verifyNoMoreInteractions(mockSnapshotManager);
+
+ assertEquals("size", 2, log.size());
+ }
+
+ @Test
+ public void testAppendAndPersistExpectingCaptureDueToJournalCount() throws Exception {
+ configParams.setSnapshotBatchCount(2);
+
+ doReturn(1L).when(mockBehavior).getReplicatedToAllIndex();
+
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior);
+
+ MockReplicatedLogEntry logEntry1 = new MockReplicatedLogEntry(1, 2, new MockPayload("2"));
+ MockReplicatedLogEntry logEntry2 = new MockReplicatedLogEntry(1, 3, new MockPayload("3"));
+
+ log.appendAndPersist(logEntry1);
+ verifyPersist(logEntry1);
+
+ verifyNoMoreInteractions(mockSnapshotManager);
+ reset(mockPersistence);
+
+ log.appendAndPersist(logEntry2);
+ verifyPersist(logEntry2);
+
+ verify(mockSnapshotManager).capture(same(logEntry2), eq(1L));
+
+ assertEquals("size", 2, log.size());
+ }
+
+ @Test
+ public void testAppendAndPersistExpectingCaptureDueToDataSize() throws Exception {
+ doReturn(1L).when(mockBehavior).getReplicatedToAllIndex();
+
+ context.setTotalMemoryRetriever(new Supplier<Long>() {
+ @Override
+ public Long get() {
+ return 100L;
+ }
+ });
+
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior);
+
+ int dataSize = 600;
+ MockReplicatedLogEntry logEntry = new MockReplicatedLogEntry(1, 2, new MockPayload("2", dataSize));
+
+ doReturn(true).when(mockSnapshotManager).capture(same(logEntry), eq(1L));
+
+ log.appendAndPersist(logEntry);
+ verifyPersist(logEntry);
+
+ verify(mockSnapshotManager).capture(same(logEntry), eq(1L));
+
+ reset(mockPersistence, mockSnapshotManager);
+
+ logEntry = new MockReplicatedLogEntry(1, 3, new MockPayload("3", 5));
+
+ log.appendAndPersist(logEntry);
+ verifyPersist(logEntry);
+
+ verifyNoMoreInteractions(mockSnapshotManager);
+
+ assertEquals("size", 2, log.size());
+ }
+
+ @Test
+ public void testRemoveFromAndPersist() throws Exception {
+
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior);
+
+ log.append(new MockReplicatedLogEntry(1, 0, new MockPayload("0")));
+ log.append(new MockReplicatedLogEntry(1, 1, new MockPayload("1")));
+ log.append(new MockReplicatedLogEntry(1, 2, new MockPayload("2")));
+
+ log.removeFromAndPersist(1);
+
+ DeleteEntries deleteEntries = new DeleteEntries(1);
+ verifyPersist(deleteEntries, match(deleteEntries));
+
+ assertEquals("size", 1, log.size());
+
+ reset(mockPersistence);
+
+ log.removeFromAndPersist(1);
+
+ verifyNoMoreInteractions(mockPersistence);
+ }
+
+ public Matcher<DeleteEntries> match(final DeleteEntries actual){
+ return new BaseMatcher<DeleteEntries>() {
+ @Override
+ public boolean matches(Object o) {
+ DeleteEntries other = (DeleteEntries) o;
+ return actual.getFromIndex() == other.getFromIndex();
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("DeleteEntries: fromIndex: " + actual.getFromIndex());
+ }
+ };
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+import org.apache.commons.lang.SerializationUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Unit tests for DeleteEntries.
+ *
+ * @author Thomas Pantelis
+ */
+public class DeleteEntriesTest {
+
+ @Test
+ public void testSerialization() {
+
+ DeleteEntries deleteEntries = new DeleteEntries(11);
+
+ DeleteEntries clone = (DeleteEntries) SerializationUtils.clone(deleteEntries);
+
+ Assert.assertEquals("getFromIndex", 11, clone.getFromIndex());
+ }
+}
@Nonnull ModificationType getModificationType();
/**
- * Returns after state of top level container.
+ * Returns before-state of top level container. Implementations are encouraged,
+ * but not required to provide this state.
+ *
+ * @param root Class representing data container
+ * @return State of object before modification. Null if subtree was not present,
+ * or the implementation cannot provide the state.
+ */
+ @Nullable T getDataBefore();
+
+ /**
+ * Returns after-state of top level container.
*
* @param root Class representing data container
* @return State of object after modification. Null if subtree is not present.
*
* @return Instance identifier corresponding to the root node.
*/
- public @Nonnull InstanceIdentifier<?> getRootIdentifier() {
+ public @Nonnull InstanceIdentifier<T> getRootIdentifier() {
return rootIdentifier;
}
*
* @param <T> Type of Binding Data Object
*/
-class LazyDataObjectModification<T extends DataObject> implements DataObjectModification<T> {
+final class LazyDataObjectModification<T extends DataObject> implements DataObjectModification<T> {
private final static Logger LOG = LoggerFactory.getLogger(LazyDataObjectModification.class);
return new LazyDataObjectModification<>(codec,domData);
}
- static Collection<DataObjectModification<? extends DataObject>> from(final BindingCodecTreeNode<?> parentCodec,
+ private static Collection<DataObjectModification<? extends DataObject>> from(final BindingCodecTreeNode<?> parentCodec,
final Collection<DataTreeCandidateNode> domChildNodes) {
final ArrayList<DataObjectModification<? extends DataObject>> result = new ArrayList<>(domChildNodes.size());
populateList(result, parentCodec, domChildNodes);
parentCodec.yangPathArgumentChild(domChildNode.getIdentifier());
populateList(result,type, childCodec, domChildNode);
} catch (final IllegalArgumentException e) {
- if(type == BindingStructuralType.UNKNOWN) {
+ if (type == BindingStructuralType.UNKNOWN) {
LOG.debug("Unable to deserialize unknown DOM node {}",domChildNode,e);
} else {
LOG.debug("Binding representation for DOM node {} was not found",domChildNode,e);
}
}
-
private static void populateList(final List<DataObjectModification<? extends DataObject>> result,
final BindingStructuralType type, final BindingCodecTreeNode<?> childCodec,
final DataTreeCandidateNode domChildNode) {
}
}
+ @Override
+ public T getDataBefore() {
+ return deserialize(domData.getDataBefore());
+ }
+
@Override
public T getDataAfter() {
return deserialize(domData.getDataAfter());
@Override
public Collection<DataObjectModification<? extends DataObject>> getModifiedChildren() {
- if(childNodesCache == null) {
- childNodesCache = from(codec,domData.getChildNodes());
+ if (childNodesCache == null) {
+ childNodesCache = from(codec, domData.getChildNodes());
}
return childNodesCache;
}
}
private T deserialize(final Optional<NormalizedNode<?, ?>> dataAfter) {
- if(dataAfter.isPresent()) {
+ if (dataAfter.isPresent()) {
return codec.deserialize(dataAfter.get());
}
return null;
import org.opendaylight.controller.sal.binding.test.util.BindingBrokerTestFactory;
import org.opendaylight.controller.sal.binding.test.util.BindingTestContext;
-@SuppressWarnings("deprecation")
public abstract class AbstractDataServiceTest {
protected DataProviderService baDataService;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
* FIXME: THis test should be moved to sal-binding-broker and rewriten
* to use new DataBroker API
*/
-@SuppressWarnings("deprecation")
public class ConcurrentImplicitCreateTest extends AbstractDataServiceTest {
private static final TopLevelListKey FOO_KEY = new TopLevelListKey("foo");
private static final TopLevelListKey BAR_KEY = new TopLevelListKey("bar");
- private static InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.builder(Top.class).build();
- private static InstanceIdentifier<TopLevelList> FOO_PATH = TOP_PATH.child(TopLevelList.class, FOO_KEY);
- private static InstanceIdentifier<TopLevelList> BAR_PATH = TOP_PATH.child(TopLevelList.class, BAR_KEY);
+ private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.builder(Top.class).build();
+ private static final InstanceIdentifier<TopLevelList> FOO_PATH = TOP_PATH.child(TopLevelList.class, FOO_KEY);
+ private static final InstanceIdentifier<TopLevelList> BAR_PATH = TOP_PATH.child(TopLevelList.class, BAR_KEY);
@Test
public void testConcurrentCreate() throws InterruptedException, ExecutionException {
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-
+import com.google.common.util.concurrent.SettableFuture;
import java.util.Collections;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import com.google.common.util.concurrent.SettableFuture;
-
/**
* FIXME: THis test should be moved to compat test-suite
*/
-@SuppressWarnings("deprecation")
public class WildcardedDataChangeListenerTest extends AbstractDataServiceTest {
private static final TopLevelListKey TOP_LEVEL_LIST_0_KEY = new TopLevelListKey("test:0");
package org.opendaylight.controller.sal.binding.test.bugfix;
import static org.junit.Assert.assertFalse;
-
+import com.google.common.util.concurrent.SettableFuture;
import java.util.concurrent.ExecutionException;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import com.google.common.util.concurrent.SettableFuture;
-
-@SuppressWarnings("deprecation")
public class DeleteNestedAugmentationListenParentTest extends AbstractDataServiceTest {
private static final TopLevelListKey FOO_KEY = new TopLevelListKey("foo");
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-
+import com.google.common.util.concurrent.SettableFuture;
import java.util.concurrent.TimeUnit;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import com.google.common.util.concurrent.SettableFuture;
-
-@SuppressWarnings("deprecation")
public class WriteParentListenAugmentTest extends AbstractDataServiceTest {
private static final String TLL_NAME = "foo";
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-
+import com.google.common.collect.ImmutableList;
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
-import com.google.common.collect.ImmutableList;
-
-@SuppressWarnings("deprecation")
public class WriteParentReadChildTest extends AbstractDataServiceTest {
private static final int LIST11_ID = 1234;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
-
import java.util.concurrent.Future;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
-/**
- * FIXME: Migrate to use new Data Broker APIs
- */
-@SuppressWarnings("deprecation")
public class BrokerIntegrationTest extends AbstractDataServiceTest {
private static final TopLevelListKey TLL_FOO_KEY = new TopLevelListKey("foo");
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-@SuppressWarnings("deprecation")
public class CrossBrokerMountPointTest {
private static final QName TLL_NAME_QNAME = QName.create(TopLevelList.QNAME, "name");
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.MoreExecutors;
assertNotNull(moduleStream);
final List<InputStream> rpcModels = Collections.singletonList(moduleStream);
- @SuppressWarnings("deprecation")
- final
- Set<Module> modules = parser.parseYangModelsFromStreams(rpcModels);
- @SuppressWarnings("deprecation")
- final
- SchemaContext mountSchemaContext = parser.resolveSchemaContext(modules);
+ final Set<Module> modules = parser.parseYangModelsFromStreams(rpcModels);
+ final SchemaContext mountSchemaContext = parser.resolveSchemaContext(modules);
schemaContext = mountSchemaContext;
}
.child(TopLevelList.class, new TopLevelListKey(mount)).toInstance();
}
- @SuppressWarnings("deprecation")
@Test
public void test() throws ExecutionException, InterruptedException {
// FIXME: This is made to only make sure instance identifier codec
*/
void deleteMessages(long sequenceNumber);
+ /**
+ * Returns the last sequence number contained in the journal.
+ */
+ long getLastSequenceNumber();
}
public void deleteMessages(long sequenceNumber) {
delegate.deleteMessages(sequenceNumber);
}
+
+ @Override
+ public long getLastSequenceNumber() {
+ return delegate.getLastSequenceNumber();
+ }
}
@Override
public void deleteMessages(long sequenceNumber) {
}
+
+ @Override
+ public long getLastSequenceNumber() {
+ return -1;
+ }
}
\ No newline at end of file
public void deleteMessages(long sequenceNumber) {
persistentActor.deleteMessages(sequenceNumber);
}
+
+ @Override
+ public long getLastSequenceNumber() {
+ return persistentActor.lastSequenceNr();
+ }
}
\ No newline at end of file
return config;
}
- public static abstract class Builder<T extends Builder>{
+ public static abstract class Builder<T extends Builder<T>> {
protected Map<String, Object> configHolder;
protected Config fallback;
return cachedMailBoxPushTimeout;
}
- public static class Builder<T extends Builder> extends AbstractConfig.Builder<T>{
+ public static class Builder<T extends Builder<T>> extends AbstractConfig.Builder<T>{
public Builder(String actorSystemName) {
super(actorSystemName);
import akka.japi.Procedure;
import akka.persistence.SnapshotSelectionCriteria;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
-
import java.util.concurrent.CountDownLatch;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
/**
* This class is intended for testing purposes. It just triggers CountDownLatch's in each method.
public void setDeleteMessagesLatch(CountDownLatch deleteMessagesLatch) {
this.deleteMessagesLatch = deleteMessagesLatch;
}
+
+ @Override
+ public long getLastSequenceNumber() {
+ return -1;
+ }
}
return builder.build();
}
- private NormalizedNode<?, ?> buildDataContainer(DataContainerNodeBuilder builder, NormalizedNodeMessages.Node node){
+ private NormalizedNode<?, ?> buildDataContainer(DataContainerNodeBuilder<?, ?> builder, NormalizedNodeMessages.Node node){
for(NormalizedNodeMessages.Node child : node.getChildList()){
builder.withChild((DataContainerChild<?, ?>) deSerialize(child));
@Override
- public Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> encode() {
+ public Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> encode() {
Preconditions.checkState(byteString!=null);
- Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> map = new HashMap<>();
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> map = new HashMap<>();
map.put(org.opendaylight.controller.protobuff.messages.shard.CompositeModificationPayload.modification,
getModificationInternal());
return map;
this.modification = (PersistentMessages.CompositeModification) Preconditions.checkNotNull(modification, "modification should not be null");
}
- @Override public Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> encode() {
+ @Override public Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> encode() {
Preconditions.checkState(modification!=null);
- Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> map = new HashMap<>();
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> map = new HashMap<>();
map.put(
org.opendaylight.controller.protobuff.messages.shard.CompositeModificationPayload.modification, this.modification);
return map;
*/
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.collect.ImmutableList;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
-import scala.concurrent.Future;
abstract class AbstractTransactionContext implements TransactionContext {
- private final List<Future<Object>> recordedOperationFutures = new ArrayList<>();
private final TransactionIdentifier identifier;
protected AbstractTransactionContext(TransactionIdentifier identifier) {
this.identifier = identifier;
}
- @Override
- public final void copyRecordedOperationFutures(Collection<Future<Object>> target) {
- target.addAll(recordedOperationFutures);
- }
-
protected final TransactionIdentifier getIdentifier() {
return identifier;
}
-
- protected final Collection<Future<Object>> copyRecordedOperationFutures() {
- return ImmutableList.copyOf(recordedOperationFutures);
- }
-
- protected final int recordedOperationCount() {
- return recordedOperationFutures.size();
- }
-
- protected final void recordOperationFuture(Future<Object> future) {
- recordedOperationFutures.add(future);
- }
-}
+}
\ No newline at end of file
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
-import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
import org.opendaylight.controller.cluster.raft.RaftActor;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
*/
public class Shard extends RaftActor {
- private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build();
-
private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
@VisibleForTesting
private DatastoreContext datastoreContext;
- private SchemaContext schemaContext;
-
- private int createSnapshotTransactionCounter;
-
private final ShardCommitCoordinator commitCoordinator;
private long transactionCommitTimeout;
private final ReadyTransactionReply READY_TRANSACTION_REPLY = new ReadyTransactionReply(
Serialization.serializedActorPath(getSelf()));
+ private final DOMTransactionFactory domTransactionFactory;
- /**
- * Coordinates persistence recovery on startup.
- */
- private ShardRecoveryCoordinator recoveryCoordinator;
+ private final ShardTransactionActorFactory transactionActorFactory;
- private final DOMTransactionFactory transactionFactory;
-
- private final String txnDispatcherPath;
+ private final ShardSnapshotCohort snapshotCohort;
private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
this.name = name.toString();
this.datastoreContext = datastoreContext;
- this.schemaContext = schemaContext;
- this.txnDispatcherPath = new Dispatchers(context().system().dispatchers())
- .getDispatcherPath(Dispatchers.DispatcherType.Transaction);
setPersistence(datastoreContext.isPersistent());
getContext().become(new MeteringBehavior(this));
}
- transactionFactory = new DOMTransactionFactory(store, shardMBean, LOG, this.name);
+ domTransactionFactory = new DOMTransactionFactory(store, shardMBean, LOG, this.name);
- commitCoordinator = new ShardCommitCoordinator(transactionFactory,
+ commitCoordinator = new ShardCommitCoordinator(domTransactionFactory,
TimeUnit.SECONDS.convert(5, TimeUnit.MINUTES),
datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
- recoveryCoordinator = new ShardRecoveryCoordinator(store, persistenceId(), LOG);
+ transactionActorFactory = new ShardTransactionActorFactory(domTransactionFactory, datastoreContext,
+ new Dispatchers(context().system().dispatchers()).getDispatcherPath(
+ Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
+
+ snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
}
private void setTransactionCommitTimeout() {
//
if(isLeader()) {
try {
- BatchedModificationsReply reply = commitCoordinator.handleTransactionModifications(batched);
- sender().tell(reply, self());
+ boolean ready = commitCoordinator.handleTransactionModifications(batched);
+ if(ready) {
+ sender().tell(READY_TRANSACTION_REPLY, self());
+ } else {
+ sender().tell(new BatchedModificationsReply(batched.getModifications().size()), self());
+ }
} catch (Exception e) {
LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
batched.getTransactionID(), e);
// node. In that case, the subsequent 3-phase commit messages won't contain the
// transactionId so to maintain backwards compatibility, we create a separate cohort actor
// to provide the compatible behavior.
- if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
- LOG.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", persistenceId());
- ActorRef replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
- ready.getTransactionID()));
+ if(ready.getTxnClientVersion() < DataStoreVersions.LITHIUM_VERSION) {
+ ActorRef replyActorPath = getSelf();
+ if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
+ LOG.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", persistenceId());
+ replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
+ ready.getTransactionID()));
+ }
ReadyTransactionReply readyTransactionReply =
- new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath));
+ new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath),
+ ready.getTxnClientVersion());
getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
- readyTransactionReply, getSelf());
-
+ readyTransactionReply, getSelf());
} else {
-
- getSender().tell(ready.isReturnSerialized() ? READY_TRANSACTION_REPLY.toSerializable() :
- READY_TRANSACTION_REPLY, getSelf());
+ getSender().tell(READY_TRANSACTION_REPLY, getSelf());
}
}
}
private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
- transactionFactory.closeTransactionChain(closeTransactionChain.getTransactionChainId());
+ domTransactionFactory.closeTransactionChain(closeTransactionChain.getTransactionChainId());
}
private ActorRef createTypedTransactionActor(int transactionType,
ShardTransactionIdentifier transactionId, String transactionChainId,
short clientVersion ) {
- DOMStoreTransaction transaction = transactionFactory.newTransaction(
- TransactionProxy.TransactionType.fromInt(transactionType), transactionId.toString(),
- transactionChainId);
-
- return createShardTransaction(transaction, transactionId, clientVersion);
- }
-
- private ActorRef createShardTransaction(DOMStoreTransaction transaction, ShardTransactionIdentifier transactionId,
- short clientVersion){
- return getContext().actorOf(
- ShardTransaction.props(transaction, getSelf(),
- schemaContext, datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId(), clientVersion)
- .withDispatcher(txnDispatcherPath),
- transactionId.toString());
-
+ return transactionActorFactory.newShardTransaction(TransactionProxy.TransactionType.fromInt(transactionType),
+ transactionId, transactionChainId, clientVersion);
}
private void createTransaction(CreateTransaction createTransaction) {
return transactionActor;
}
- private void syncCommitTransaction(final DOMStoreWriteTransaction transaction)
- throws ExecutionException, InterruptedException {
- DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
- commitCohort.preCommit().get();
- commitCohort.commit().get();
- }
-
private void commitWithNewTransaction(final Modification modification) {
DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
modification.apply(tx);
try {
- syncCommitTransaction(tx);
+ snapshotCohort.syncCommitTransaction(tx);
shardMBean.incrementCommittedTransactionCount();
shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
} catch (InterruptedException | ExecutionException e) {
}
private void updateSchemaContext(final UpdateSchemaContext message) {
- this.schemaContext = message.getSchemaContext();
updateSchemaContext(message.getSchemaContext());
- store.onGlobalContextUpdated(message.getSchemaContext());
}
@VisibleForTesting
}
@Override
- protected
- void startLogRecoveryBatch(final int maxBatchSize) {
- recoveryCoordinator.startLogRecoveryBatch(maxBatchSize);
- }
-
- @Override
- protected void appendRecoveredLogEntry(final Payload data) {
- recoveryCoordinator.appendRecoveredLogPayload(data);
- }
-
- @Override
- protected void applyRecoverySnapshot(final byte[] snapshotBytes) {
- recoveryCoordinator.applyRecoveredSnapshot(snapshotBytes);
+ protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+ return snapshotCohort;
}
@Override
- protected void applyCurrentLogRecoveryBatch() {
- recoveryCoordinator.applyCurrentLogRecoveryBatch();
+ @Nonnull
+ protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+ return new ShardRecoveryCoordinator(store, persistenceId(), LOG);
}
@Override
protected void onRecoveryComplete() {
- recoveryCoordinator = null;
-
//notify shard manager
getContext().parent().tell(new ActorInitialized(), getSelf());
}
}
- @Override
- protected void createSnapshot() {
- // Create a transaction actor. We are really going to treat the transaction as a worker
- // so that this actor does not get block building the snapshot. THe transaction actor will
- // after processing the CreateSnapshot message.
-
- ActorRef createSnapshotTransaction = createTransaction(
- TransactionProxy.TransactionType.READ_ONLY.ordinal(),
- "createSnapshot" + ++createSnapshotTransactionCounter, "",
- DataStoreVersions.CURRENT_VERSION);
-
- createSnapshotTransaction.tell(CreateSnapshot.INSTANCE, self());
- }
-
- @VisibleForTesting
- @Override
- protected void applySnapshot(final byte[] snapshotBytes) {
- // Since this will be done only on Recovery or when this actor is a Follower
- // we can safely commit everything in here. We not need to worry about event notifications
- // as they would have already been disabled on the follower
-
- LOG.info("{}: Applying snapshot", persistenceId());
- try {
- DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
-
- NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
-
- // delete everything first
- transaction.delete(DATASTORE_ROOT);
-
- // Add everything from the remote node back
- transaction.write(DATASTORE_ROOT, node);
- syncCommitTransaction(transaction);
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("{}: An exception occurred when applying snapshot", persistenceId(), e);
- } finally {
- LOG.info("{}: Done applying snapshot", persistenceId());
- }
- }
-
@Override
protected void onStateChanged() {
boolean isLeader = isLeader();
persistenceId(), getId());
}
- transactionFactory.closeAllTransactionChains();
+ domTransactionFactory.closeAllTransactionChains();
}
}
+ @Override
+ protected void onLeaderChanged(String oldLeader, String newLeader) {
+ shardMBean.incrementLeadershipChangeCount();
+ }
+
@Override
public String persistenceId() {
return this.name;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
*
* @throws ExecutionException if an error occurs loading the cache
*/
- public BatchedModificationsReply handleTransactionModifications(BatchedModifications batched)
+ public boolean handleTransactionModifications(BatchedModifications batched)
throws ExecutionException {
CohortEntry cohortEntry = cohortCache.getIfPresent(batched.getTransactionID());
if(cohortEntry == null) {
cohortEntry.applyModifications(batched.getModifications());
- String cohortPath = null;
if(batched.isReady()) {
if(log.isDebugEnabled()) {
log.debug("{}: Readying Tx {}, client version {}", name,
}
cohortEntry.ready(cohortDecorator);
- cohortPath = shardActorPath;
}
- return new BatchedModificationsReply(batched.getModifications().size(), cohortPath);
+ return batched.isReady();
}
/**
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* @author: syedbahm
private final DOMStoreReadTransaction transaction;
public ShardReadTransaction(DOMStoreReadTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID,
- short clientTxVersion) {
- super(shardActor, schemaContext, shardStats, transactionID, clientTxVersion);
+ ShardStats shardStats, String transactionID, short clientTxVersion) {
+ super(shardActor, shardStats, transactionID, clientTxVersion);
this.transaction = transaction;
}
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* @author: syedbahm
private final DOMStoreReadWriteTransaction transaction;
public ShardReadWriteTransaction(DOMStoreReadWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID,
- short clientTxVersion) {
- super(transaction, shardActor, schemaContext, shardStats, transactionID, clientTxVersion);
+ ShardStats shardStats, String transactionID, short clientTxVersion) {
+ super(transaction, shardActor, shardStats, transactionID, clientTxVersion);
this.transaction = transaction;
}
import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
*
* @author Thomas Panetelis
*/
-class ShardRecoveryCoordinator {
+class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
private final InMemoryDOMDataStore store;
private List<ModificationPayload> currentLogRecoveryBatch;
this.log = log;
}
- void startLogRecoveryBatch(int maxBatchSize) {
+ @Override
+ public void startLogRecoveryBatch(int maxBatchSize) {
currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
}
- void appendRecoveredLogPayload(Payload payload) {
+ @Override
+ public void appendRecoveredLogEntry(Payload payload) {
try {
if(payload instanceof ModificationPayload) {
currentLogRecoveryBatch.add((ModificationPayload) payload);
/**
* Applies the current batched log entries to the data store.
*/
- void applyCurrentLogRecoveryBatch() {
+ @Override
+ public void applyCurrentLogRecoveryBatch() {
log.debug("{}: Applying current log recovery batch with size {}", shardName, currentLogRecoveryBatch.size());
DOMStoreWriteTransaction writeTx = store.newWriteOnlyTransaction();
*
* @param snapshotBytes the serialized snapshot
*/
- void applyRecoveredSnapshot(final byte[] snapshotBytes) {
+ @Override
+ public void applyRecoverySnapshot(final byte[] snapshotBytes) {
log.debug("{}: Applyng recovered sbapshot", shardName);
DOMStoreWriteTransaction writeTx = store.newWriteOnlyTransaction();
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import java.util.concurrent.ExecutionException;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
+import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
+import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+
+/**
+ * Participates in raft snapshotting on behalf of a Shard actor.
+ *
+ * @author Thomas Pantelis
+ */
+class ShardSnapshotCohort implements RaftActorSnapshotCohort {
+
+ private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build();
+
+ private int createSnapshotTransactionCounter;
+ private final ShardTransactionActorFactory transactionActorFactory;
+ private final InMemoryDOMDataStore store;
+ private final Logger log;
+ private final String logId;
+
+ ShardSnapshotCohort(ShardTransactionActorFactory transactionActorFactory, InMemoryDOMDataStore store,
+ Logger log, String logId) {
+ this.transactionActorFactory = transactionActorFactory;
+ this.store = store;
+ this.log = log;
+ this.logId = logId;
+ }
+
+ @Override
+ public void createSnapshot(ActorRef actorRef) {
+ // Create a transaction actor. We are really going to treat the transaction as a worker
+ // so that this actor does not get block building the snapshot. THe transaction actor will
+ // after processing the CreateSnapshot message.
+
+ ShardTransactionIdentifier transactionID = new ShardTransactionIdentifier(
+ "createSnapshot" + ++createSnapshotTransactionCounter);
+
+ ActorRef createSnapshotTransaction = transactionActorFactory.newShardTransaction(
+ TransactionProxy.TransactionType.READ_ONLY, transactionID, "", DataStoreVersions.CURRENT_VERSION);
+
+ createSnapshotTransaction.tell(CreateSnapshot.INSTANCE, actorRef);
+ }
+
+ @Override
+ public void applySnapshot(byte[] snapshotBytes) {
+ // Since this will be done only on Recovery or when this actor is a Follower
+ // we can safely commit everything in here. We not need to worry about event notifications
+ // as they would have already been disabled on the follower
+
+ log.info("{}: Applying snapshot", logId);
+
+ try {
+ DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
+
+ NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
+
+ // delete everything first
+ transaction.delete(DATASTORE_ROOT);
+
+ // Add everything from the remote node back
+ transaction.write(DATASTORE_ROOT, node);
+ syncCommitTransaction(transaction);
+ } catch (InterruptedException | ExecutionException e) {
+ log.error("{}: An exception occurred when applying snapshot", logId, e);
+ } finally {
+ log.info("{}: Done applying snapshot", logId);
+ }
+
+ }
+
+ void syncCommitTransaction(final DOMStoreWriteTransaction transaction)
+ throws ExecutionException, InterruptedException {
+ DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
+ commitCohort.preCommit().get();
+ commitCohort.commit().get();
+ }
+}
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* The ShardTransaction Actor represents a remote transaction
protected static final boolean SERIALIZED_REPLY = true;
private final ActorRef shardActor;
- private final SchemaContext schemaContext;
private final ShardStats shardStats;
private final String transactionID;
private final short clientTxVersion;
- protected ShardTransaction(ActorRef shardActor, SchemaContext schemaContext,
- ShardStats shardStats, String transactionID, short clientTxVersion) {
+ protected ShardTransaction(ActorRef shardActor, ShardStats shardStats, String transactionID,
+ short clientTxVersion) {
super("shard-tx"); //actor name override used for metering. This does not change the "real" actor name
this.shardActor = shardActor;
- this.schemaContext = schemaContext;
this.shardStats = shardStats;
this.transactionID = transactionID;
this.clientTxVersion = clientTxVersion;
}
public static Props props(DOMStoreTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext,DatastoreContext datastoreContext, ShardStats shardStats,
- String transactionID, short txnClientVersion) {
- return Props.create(new ShardTransactionCreator(transaction, shardActor, schemaContext,
+ DatastoreContext datastoreContext, ShardStats shardStats, String transactionID, short txnClientVersion) {
+ return Props.create(new ShardTransactionCreator(transaction, shardActor,
datastoreContext, shardStats, transactionID, txnClientVersion));
}
return transactionID;
}
- protected SchemaContext getSchemaContext() {
- return schemaContext;
- }
-
protected short getClientTxVersion() {
return clientTxVersion;
}
final DOMStoreTransaction transaction;
final ActorRef shardActor;
- final SchemaContext schemaContext;
final DatastoreContext datastoreContext;
final ShardStats shardStats;
final String transactionID;
final short txnClientVersion;
ShardTransactionCreator(DOMStoreTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, DatastoreContext datastoreContext,
- ShardStats shardStats, String transactionID, short txnClientVersion) {
+ DatastoreContext datastoreContext, ShardStats shardStats, String transactionID, short txnClientVersion) {
this.transaction = transaction;
this.shardActor = shardActor;
this.shardStats = shardStats;
- this.schemaContext = schemaContext;
this.datastoreContext = datastoreContext;
this.transactionID = transactionID;
this.txnClientVersion = txnClientVersion;
ShardTransaction tx;
if(transaction instanceof DOMStoreReadWriteTransaction) {
tx = new ShardReadWriteTransaction((DOMStoreReadWriteTransaction)transaction,
- shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
+ shardActor, shardStats, transactionID, txnClientVersion);
} else if(transaction instanceof DOMStoreReadTransaction) {
tx = new ShardReadTransaction((DOMStoreReadTransaction)transaction, shardActor,
- schemaContext, shardStats, transactionID, txnClientVersion);
+ shardStats, transactionID, txnClientVersion);
} else {
tx = new ShardWriteTransaction((DOMStoreWriteTransaction)transaction,
- shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
+ shardActor, shardStats, transactionID, txnClientVersion);
}
tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout());
private final DOMStoreTransactionChain chain;
private final DatastoreContext datastoreContext;
- private final SchemaContext schemaContext;
private final ShardStats shardStats;
- public ShardTransactionChain(DOMStoreTransactionChain chain, SchemaContext schemaContext,
- DatastoreContext datastoreContext, ShardStats shardStats) {
+ public ShardTransactionChain(DOMStoreTransactionChain chain, DatastoreContext datastoreContext,
+ ShardStats shardStats) {
this.chain = chain;
this.datastoreContext = datastoreContext;
- this.schemaContext = schemaContext;
this.shardStats = shardStats;
}
TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newReadOnlyTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId(),
+ datastoreContext, shardStats, createTransaction.getTransactionId(),
createTransaction.getVersion()), transactionName);
} else if (createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newReadWriteTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId(),
+ datastoreContext, shardStats, createTransaction.getTransactionId(),
createTransaction.getVersion()), transactionName);
} else if (createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newWriteOnlyTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId(),
+ datastoreContext, shardStats, createTransaction.getTransactionId(),
createTransaction.getVersion()), transactionName);
} else {
throw new IllegalArgumentException (
public static Props props(DOMStoreTransactionChain chain, SchemaContext schemaContext,
DatastoreContext datastoreContext, ShardStats shardStats) {
- return Props.create(new ShardTransactionChainCreator(chain, schemaContext,
- datastoreContext, shardStats));
+ return Props.create(new ShardTransactionChainCreator(chain, datastoreContext, shardStats));
}
private static class ShardTransactionChainCreator implements Creator<ShardTransactionChain> {
final DOMStoreTransactionChain chain;
final DatastoreContext datastoreContext;
- final SchemaContext schemaContext;
final ShardStats shardStats;
- ShardTransactionChainCreator(DOMStoreTransactionChain chain, SchemaContext schemaContext,
- DatastoreContext datastoreContext, ShardStats shardStats) {
+ ShardTransactionChainCreator(DOMStoreTransactionChain chain, DatastoreContext datastoreContext,
+ ShardStats shardStats) {
this.chain = chain;
this.datastoreContext = datastoreContext;
- this.schemaContext = schemaContext;
this.shardStats = shardStats;
}
@Override
public ShardTransactionChain create() throws Exception {
- return new ShardTransactionChain(chain, schemaContext, datastoreContext, shardStats);
+ return new ShardTransactionChain(chain, datastoreContext, shardStats);
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.UntypedActorContext;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
+
+/**
+ * A factory for creating ShardTransaction actors.
+ *
+ * @author Thomas Pantelis
+ */
+class ShardTransactionActorFactory {
+
+ private final DOMTransactionFactory domTransactionFactory;
+ private final DatastoreContext datastoreContext;
+ private final String txnDispatcherPath;
+ private final ShardStats shardMBean;
+ private final UntypedActorContext actorContext;
+ private final ActorRef shardActor;
+
+ ShardTransactionActorFactory(DOMTransactionFactory domTransactionFactory, DatastoreContext datastoreContext,
+ String txnDispatcherPath, ActorRef shardActor, UntypedActorContext actorContext, ShardStats shardMBean) {
+ this.domTransactionFactory = domTransactionFactory;
+ this.datastoreContext = datastoreContext;
+ this.txnDispatcherPath = txnDispatcherPath;
+ this.shardMBean = shardMBean;
+ this.actorContext = actorContext;
+ this.shardActor = shardActor;
+ }
+
+ ActorRef newShardTransaction(TransactionProxy.TransactionType type, ShardTransactionIdentifier transactionID,
+ String transactionChainID, short clientVersion) {
+
+ DOMStoreTransaction transaction = domTransactionFactory.newTransaction(type, transactionID.toString(),
+ transactionChainID);
+
+ return actorContext.actorOf(ShardTransaction.props(transaction, shardActor, datastoreContext, shardMBean,
+ transactionID.getRemoteTransactionId(), clientVersion).withDispatcher(txnDispatcherPath),
+ transactionID.toString());
+ }
+}
/*
- *
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* @author: syedbahm
public class ShardWriteTransaction extends ShardTransaction {
private final MutableCompositeModification compositeModification = new MutableCompositeModification();
+ private int totalBatchedModificationsReceived;
+ private Exception lastBatchedModificationsException;
private final DOMStoreWriteTransaction transaction;
public ShardWriteTransaction(DOMStoreWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID,
- short clientTxVersion) {
- super(shardActor, schemaContext, shardStats, transactionID, clientTxVersion);
+ ShardStats shardStats, String transactionID, short clientTxVersion) {
+ super(shardActor, shardStats, transactionID, clientTxVersion);
this.transaction = transaction;
}
modification.apply(transaction);
}
- getSender().tell(new BatchedModificationsReply(batched.getModifications().size()), getSelf());
+ totalBatchedModificationsReceived++;
+ if(batched.isReady()) {
+ if(lastBatchedModificationsException != null) {
+ throw lastBatchedModificationsException;
+ }
+
+ if(totalBatchedModificationsReceived != batched.getTotalMessagesSent()) {
+ throw new IllegalStateException(String.format(
+ "The total number of batched messages received %d does not match the number sent %d",
+ totalBatchedModificationsReceived, batched.getTotalMessagesSent()));
+ }
+
+ readyTransaction(transaction, false);
+ } else {
+ getSender().tell(new BatchedModificationsReply(batched.getModifications().size()), getSelf());
+ }
} catch (Exception e) {
+ lastBatchedModificationsException = e;
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+
+ if(batched.isReady()) {
+ getSelf().tell(PoisonPill.getInstance(), getSelf());
+ }
}
}
import akka.actor.ActorSelection;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collection;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import scala.concurrent.Future;
void readData(final YangInstanceIdentifier path, SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture);
void dataExists(YangInstanceIdentifier path, SettableFuture<Boolean> proxyFuture);
-
- void copyRecordedOperationFutures(Collection<Future<Object>> target);
}
/*
* Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import akka.dispatch.Mapper;
import akka.dispatch.OnComplete;
import com.google.common.base.Optional;
-import com.google.common.collect.Lists;
import com.google.common.util.concurrent.SettableFuture;
-import java.util.List;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.SerializableMessage;
import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
private final OperationCompleter operationCompleter;
private BatchedModifications batchedModifications;
+ private int totalBatchedModificationsSent;
protected TransactionContextImpl(ActorSelection actor, TransactionIdentifier identifier,
String transactionChainId, ActorContext actorContext, SchemaContext schemaContext, boolean isTxActorLocal,
@Override
public Future<ActorSelection> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
- getIdentifier(), recordedOperationCount());
+ LOG.debug("Tx {} readyTransaction called", getIdentifier());
- // Send the remaining batched modifications if any.
+ // Send the remaining batched modifications, if any, with the ready flag set.
- sendAndRecordBatchedModifications();
+ Future<Object> lastModificationsFuture = sendBatchedModifications(true);
- // Send the ReadyTransaction message to the Tx actor.
-
- Future<Object> readyReplyFuture = executeOperationAsync(ReadyTransaction.INSTANCE);
-
- return combineRecordedOperationsFutures(readyReplyFuture);
+ return transformReadyReply(lastModificationsFuture);
}
- protected Future<ActorSelection> combineRecordedOperationsFutures(final Future<Object> withLastReplyFuture) {
- // Combine all the previously recorded put/merge/delete operation reply Futures and the
- // ReadyTransactionReply Future into one Future. If any one fails then the combined
- // Future will fail. We need all prior operations and the ready operation to succeed
- // in order to attempt commit.
-
- List<Future<Object>> futureList = Lists.newArrayListWithCapacity(recordedOperationCount() + 1);
- copyRecordedOperationFutures(futureList);
- futureList.add(withLastReplyFuture);
-
- Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(futureList,
- actorContext.getClientDispatcher());
-
- // Transform the combined Future into a Future that returns the cohort actor path from
- // the ReadyTransactionReply. That's the end result of the ready operation.
+ protected Future<ActorSelection> transformReadyReply(final Future<Object> readyReplyFuture) {
+ // Transform the last reply Future into a Future that returns the cohort actor path from
+ // the last reply message. That's the end result of the ready operation.
- return combinedFutures.transform(new Mapper<Iterable<Object>, ActorSelection>() {
+ return readyReplyFuture.transform(new Mapper<Object, ActorSelection>() {
@Override
- public ActorSelection checkedApply(Iterable<Object> notUsed) {
- LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
- getIdentifier());
-
- // At this point all the Futures succeeded and we need to extract the cohort
- // actor path from the ReadyTransactionReply. For the recorded operations, they
- // don't return any data so we're only interested that they completed
- // successfully. We could be paranoid and verify the correct reply types but
- // that really should never happen so it's not worth the overhead of
- // de-serializing each reply.
-
- // Note the Future get call here won't block as it's complete.
- Object serializedReadyReply = withLastReplyFuture.value().get().get();
- if (serializedReadyReply instanceof ReadyTransactionReply) {
- return actorContext.actorSelection(((ReadyTransactionReply)serializedReadyReply).getCohortPath());
- } else if(serializedReadyReply instanceof BatchedModificationsReply) {
- return actorContext.actorSelection(((BatchedModificationsReply)serializedReadyReply).getCohortPath());
- } else if(serializedReadyReply.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)) {
- ReadyTransactionReply reply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
- String cohortPath = deserializeCohortPath(reply.getCohortPath());
- return actorContext.actorSelection(cohortPath);
- } else {
- // Throwing an exception here will fail the Future.
- throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
- getIdentifier(), serializedReadyReply.getClass()));
+ public ActorSelection checkedApply(Object serializedReadyReply) {
+ LOG.debug("Tx {} readyTransaction", getIdentifier());
+
+ // At this point the ready operation succeeded and we need to extract the cohort
+ // actor path from the reply.
+ if(ReadyTransactionReply.isSerializedType(serializedReadyReply)) {
+ ReadyTransactionReply readyTxReply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
+ return actorContext.actorSelection(extractCohortPathFrom(readyTxReply));
}
+
+ // Throwing an exception here will fail the Future.
+ throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
+ getIdentifier(), serializedReadyReply.getClass()));
}
}, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getClientDispatcher());
}
- protected String deserializeCohortPath(String cohortPath) {
- return cohortPath;
+ protected String extractCohortPathFrom(ReadyTransactionReply readyTxReply) {
+ return readyTxReply.getCohortPath();
+ }
+
+ private BatchedModifications newBatchedModifications() {
+ return new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion, transactionChainId);
}
private void batchModification(Modification modification) {
if(batchedModifications == null) {
- batchedModifications = new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion,
- transactionChainId);
+ batchedModifications = newBatchedModifications();
}
batchedModifications.addModification(modification);
if(batchedModifications.getModifications().size() >=
actorContext.getDatastoreContext().getShardBatchedModificationCount()) {
- sendAndRecordBatchedModifications();
- }
- }
-
- private void sendAndRecordBatchedModifications() {
- Future<Object> sentFuture = sendBatchedModifications();
- if(sentFuture != null) {
- recordOperationFuture(sentFuture);
+ sendBatchedModifications();
}
}
protected Future<Object> sendBatchedModifications(boolean ready) {
Future<Object> sent = null;
- if(batchedModifications != null) {
+ if(ready || (batchedModifications != null && !batchedModifications.getModifications().isEmpty())) {
+ if(batchedModifications == null) {
+ batchedModifications = newBatchedModifications();
+ }
+
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {} sending {} batched modifications, ready: {}", getIdentifier(),
batchedModifications.getModifications().size(), ready);
}
batchedModifications.setReady(ready);
+ batchedModifications.setTotalMessagesSent(++totalBatchedModificationsSent);
sent = executeOperationAsync(batchedModifications);
- batchedModifications = new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion,
- transactionChainId);
+ if(ready) {
+ batchedModifications = null;
+ } else {
+ batchedModifications = newBatchedModifications();
+ }
}
return sent;
// Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
// public API contract.
- sendAndRecordBatchedModifications();
+ sendBatchedModifications();
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
// Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
// public API contract.
- sendAndRecordBatchedModifications();
+ sendBatchedModifications();
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import akka.dispatch.OnComplete;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
+import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+
+/**
+ * Implements a Future OnComplete callback for a CreateTransaction message. This class handles
+ * retries, up to a limit, if the shard doesn't have a leader yet. This is done by scheduling a
+ * retry task after a short delay.
+ * <p>
+ * The end result from a completed CreateTransaction message is a TransactionContext that is
+ * used to perform transaction operations. Transaction operations that occur before the
+ * CreateTransaction completes are cache and executed once the CreateTransaction completes,
+ * successfully or not.
+ */
+final class TransactionFutureCallback extends OnComplete<Object> {
+ private static final Logger LOG = LoggerFactory.getLogger(TransactionFutureCallback.class);
+
+ /**
+ * Time interval in between transaction create retries.
+ */
+ private static final FiniteDuration CREATE_TX_TRY_INTERVAL = FiniteDuration.create(1, TimeUnit.SECONDS);
+
+ /**
+ * The list of transaction operations to execute once the CreateTransaction completes.
+ */
+ @GuardedBy("txOperationsOnComplete")
+ private final List<TransactionOperation> txOperationsOnComplete = Lists.newArrayList();
+ private final TransactionProxy proxy;
+ private final String shardName;
+
+ /**
+ * The TransactionContext resulting from the CreateTransaction reply.
+ */
+ private volatile TransactionContext transactionContext;
+
+ /**
+ * The target primary shard.
+ */
+ private volatile ActorSelection primaryShard;
+ private volatile int createTxTries;
+
+ TransactionFutureCallback(final TransactionProxy proxy, final String shardName) {
+ this.proxy = Preconditions.checkNotNull(proxy);
+ this.shardName = shardName;
+ createTxTries = (int) (proxy.getActorContext().getDatastoreContext().
+ getShardLeaderElectionTimeout().duration().toMillis() /
+ CREATE_TX_TRY_INTERVAL.toMillis());
+ }
+
+ String getShardName() {
+ return shardName;
+ }
+
+ TransactionContext getTransactionContext() {
+ return transactionContext;
+ }
+
+ private TransactionType getTransactionType() {
+ return proxy.getTransactionType();
+ }
+
+ private TransactionIdentifier getIdentifier() {
+ return proxy.getIdentifier();
+ }
+
+ private ActorContext getActorContext() {
+ return proxy.getActorContext();
+ }
+
+ private Semaphore getOperationLimiter() {
+ return proxy.getOperationLimiter();
+ }
+
+ /**
+ * Sets the target primary shard and initiates a CreateTransaction try.
+ */
+ void setPrimaryShard(ActorSelection primaryShard) {
+ this.primaryShard = primaryShard;
+
+ if (getTransactionType() == TransactionType.WRITE_ONLY &&
+ getActorContext().getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
+ LOG.debug("Tx {} Primary shard {} found - creating WRITE_ONLY transaction context",
+ getIdentifier(), primaryShard);
+
+ // For write-only Tx's we prepare the transaction modifications directly on the shard actor
+ // to avoid the overhead of creating a separate transaction actor.
+ // FIXME: can't assume the shard version is LITHIUM_VERSION - need to obtain it somehow.
+ executeTxOperatonsOnComplete(proxy.createValidTransactionContext(this.primaryShard,
+ this.primaryShard.path().toString(), DataStoreVersions.LITHIUM_VERSION));
+ } else {
+ tryCreateTransaction();
+ }
+ }
+
+ /**
+ * Adds a TransactionOperation to be executed after the CreateTransaction completes.
+ */
+ private void addTxOperationOnComplete(TransactionOperation operation) {
+ boolean invokeOperation = true;
+ synchronized(txOperationsOnComplete) {
+ if(transactionContext == null) {
+ LOG.debug("Tx {} Adding operation on complete", getIdentifier());
+
+ invokeOperation = false;
+ txOperationsOnComplete.add(operation);
+ }
+ }
+
+ if(invokeOperation) {
+ operation.invoke(transactionContext);
+ }
+ }
+
+ void enqueueTransactionOperation(final TransactionOperation op) {
+
+ if (transactionContext != null) {
+ op.invoke(transactionContext);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ addTxOperationOnComplete(op);
+ }
+ }
+
+ /**
+ * Performs a CreateTransaction try async.
+ */
+ private void tryCreateTransaction() {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} Primary shard {} found - trying create transaction", getIdentifier(), primaryShard);
+ }
+
+ Object serializedCreateMessage = new CreateTransaction(getIdentifier().toString(),
+ getTransactionType().ordinal(), proxy.getTransactionChainId()).toSerializable();
+
+ Future<Object> createTxFuture = getActorContext().executeOperationAsync(primaryShard, serializedCreateMessage);
+
+ createTxFuture.onComplete(this, getActorContext().getClientDispatcher());
+ }
+
+ @Override
+ public void onComplete(Throwable failure, Object response) {
+ if(failure instanceof NoShardLeaderException) {
+ // There's no leader for the shard yet - schedule and try again, unless we're out
+ // of retries. Note: createTxTries is volatile as it may be written by different
+ // threads however not concurrently, therefore decrementing it non-atomically here
+ // is ok.
+ if(--createTxTries > 0) {
+ LOG.debug("Tx {} Shard {} has no leader yet - scheduling create Tx retry",
+ getIdentifier(), shardName);
+
+ getActorContext().getActorSystem().scheduler().scheduleOnce(CREATE_TX_TRY_INTERVAL,
+ new Runnable() {
+ @Override
+ public void run() {
+ tryCreateTransaction();
+ }
+ }, getActorContext().getClientDispatcher());
+ return;
+ }
+ }
+
+ createTransactionContext(failure, response);
+ }
+
+ void createTransactionContext(Throwable failure, Object response) {
+ // Mainly checking for state violation here to perform a volatile read of "initialized" to
+ // ensure updates to operationLimter et al are visible to this thread (ie we're doing
+ // "piggy-back" synchronization here).
+ proxy.ensureInitializied();
+
+ // Create the TransactionContext from the response or failure. Store the new
+ // TransactionContext locally until we've completed invoking the
+ // TransactionOperations. This avoids thread timing issues which could cause
+ // out-of-order TransactionOperations. Eg, on a modification operation, if the
+ // TransactionContext is non-null, then we directly call the TransactionContext.
+ // However, at the same time, the code may be executing the cached
+ // TransactionOperations. So to avoid thus timing, we don't publish the
+ // TransactionContext until after we've executed all cached TransactionOperations.
+ TransactionContext localTransactionContext;
+ if(failure != null) {
+ LOG.debug("Tx {} Creating NoOpTransaction because of error", getIdentifier(), failure);
+
+ localTransactionContext = new NoOpTransactionContext(failure, getIdentifier(), getOperationLimiter());
+ } else if (CreateTransactionReply.SERIALIZABLE_CLASS.equals(response.getClass())) {
+ localTransactionContext = createValidTransactionContext(
+ CreateTransactionReply.fromSerializable(response));
+ } else {
+ IllegalArgumentException exception = new IllegalArgumentException(String.format(
+ "Invalid reply type %s for CreateTransaction", response.getClass()));
+
+ localTransactionContext = new NoOpTransactionContext(exception, getIdentifier(), getOperationLimiter());
+ }
+
+ executeTxOperatonsOnComplete(localTransactionContext);
+ }
+
+ private void executeTxOperatonsOnComplete(TransactionContext localTransactionContext) {
+ while(true) {
+ // Access to txOperationsOnComplete and transactionContext must be protected and atomic
+ // (ie synchronized) with respect to #addTxOperationOnComplete to handle timing
+ // issues and ensure no TransactionOperation is missed and that they are processed
+ // in the order they occurred.
+
+ // We'll make a local copy of the txOperationsOnComplete list to handle re-entrancy
+ // in case a TransactionOperation results in another transaction operation being
+ // queued (eg a put operation from a client read Future callback that is notified
+ // synchronously).
+ Collection<TransactionOperation> operationsBatch = null;
+ synchronized(txOperationsOnComplete) {
+ if(txOperationsOnComplete.isEmpty()) {
+ // We're done invoking the TransactionOperations so we can now publish the
+ // TransactionContext.
+ transactionContext = localTransactionContext;
+ break;
+ }
+
+ operationsBatch = new ArrayList<>(txOperationsOnComplete);
+ txOperationsOnComplete.clear();
+ }
+
+ // Invoke TransactionOperations outside the sync block to avoid unnecessary blocking.
+ // A slight down-side is that we need to re-acquire the lock below but this should
+ // be negligible.
+ for(TransactionOperation oper: operationsBatch) {
+ oper.invoke(localTransactionContext);
+ }
+ }
+ }
+
+ private TransactionContext createValidTransactionContext(CreateTransactionReply reply) {
+ LOG.debug("Tx {} Received {}", getIdentifier(), reply);
+
+ return proxy.createValidTransactionContext(getActorContext().actorSelection(reply.getTransactionPath()),
+ reply.getTransactionPath(), reply.getVersion());
+ }
+
+}
\ No newline at end of file
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import java.util.ArrayList;
-import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.concurrent.GuardedBy;
import org.opendaylight.controller.cluster.datastore.compat.PreLithiumTransactionContextImpl;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregator;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
import scala.concurrent.Promise;
-import scala.concurrent.duration.FiniteDuration;
/**
* TransactionProxy acts as a proxy for one or more transactions that were created on a remote shard
private static final Logger LOG = LoggerFactory.getLogger(TransactionProxy.class);
- /**
- * Time interval in between transaction create retries.
- */
- private static final FiniteDuration CREATE_TX_TRY_INTERVAL =
- FiniteDuration.create(1, TimeUnit.SECONDS);
-
/**
* Stores the remote Tx actors for each requested data store path to be used by the
* PhantomReference to close the remote Tx's. This is only used for read-only Tx's. The
return new TransactionIdentifier(memberName, counter.getAndIncrement());
}
- @VisibleForTesting
- List<Future<Object>> getRecordedOperationFutures() {
- List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
- for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
- TransactionContext transactionContext = txFutureCallback.getTransactionContext();
- if (transactionContext != null) {
- transactionContext.copyRecordedOperationFutures(recordedOperationFutures);
- }
- }
-
- return recordedOperationFutures;
- }
-
@VisibleForTesting
boolean hasTransactionContext() {
for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
return false;
}
+ private boolean isRootPath(YangInstanceIdentifier path){
+ return !path.getPathArguments().iterator().hasNext();
+ }
+
@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final YangInstanceIdentifier path) {
LOG.debug("Tx {} read {}", getIdentifier(), path);
- throttleOperation();
-
final SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture = SettableFuture.create();
- TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
- txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
- @Override
- public void invoke(TransactionContext transactionContext) {
- transactionContext.readData(path, proxyFuture);
- }
- });
+ if(isRootPath(path)){
+ readAllData(path, proxyFuture);
+ } else {
+ throttleOperation();
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.readData(path, proxyFuture);
+ }
+ });
+
+ }
return MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
}
+ private void readAllData(final YangInstanceIdentifier path,
+ final SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture) {
+ Set<String> allShardNames = actorContext.getConfiguration().getAllShardNames();
+ List<SettableFuture<Optional<NormalizedNode<?, ?>>>> futures = new ArrayList<>(allShardNames.size());
+
+ for(String shardName : allShardNames){
+ final SettableFuture<Optional<NormalizedNode<?, ?>>> subProxyFuture = SettableFuture.create();
+
+ throttleOperation();
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(shardName);
+ txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.readData(path, subProxyFuture);
+ }
+ });
+
+ futures.add(subProxyFuture);
+ }
+
+ final ListenableFuture<List<Optional<NormalizedNode<?, ?>>>> future = Futures.allAsList(futures);
+
+ future.addListener(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ proxyFuture.set(NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.builder().build(),
+ future.get(), actorContext.getSchemaContext()));
+ } catch (InterruptedException | ExecutionException e) {
+ proxyFuture.setException(e);
+ }
+ }
+ }, actorContext.getActorSystem().dispatcher());
+ }
+
@Override
public CheckedFuture<Boolean, ReadFailedException> exists(final YangInstanceIdentifier path) {
}
}
+ final void ensureInitializied() {
+ Preconditions.checkState(initialized, "Transaction %s was not propertly initialized.", getIdentifier());
+ }
+
@Override
public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
return actorContext.findPrimaryShardAsync(shardName);
}
+ final TransactionType getTransactionType() {
+ return transactionType;
+ }
+
+ final Semaphore getOperationLimiter() {
+ return operationLimiter;
+ }
+
private TransactionFutureCallback getOrCreateTxFutureCallback(YangInstanceIdentifier path) {
String shardName = shardNameFromIdentifier(path);
+ return getOrCreateTxFutureCallback(shardName);
+ }
+
+ private TransactionFutureCallback getOrCreateTxFutureCallback(String shardName) {
TransactionFutureCallback txFutureCallback = txFutureCallbackMap.get(shardName);
if(txFutureCallback == null) {
Future<ActorSelection> findPrimaryFuture = sendFindPrimaryShardAsync(shardName);
- final TransactionFutureCallback newTxFutureCallback = new TransactionFutureCallback(shardName);
+ final TransactionFutureCallback newTxFutureCallback = new TransactionFutureCallback(this, shardName);
txFutureCallback = newTxFutureCallback;
txFutureCallbackMap.put(shardName, txFutureCallback);
return txFutureCallback;
}
- public String getTransactionChainId() {
+ String getTransactionChainId() {
return transactionChainId;
}
return actorContext;
}
- /**
- * Implements a Future OnComplete callback for a CreateTransaction message. This class handles
- * retries, up to a limit, if the shard doesn't have a leader yet. This is done by scheduling a
- * retry task after a short delay.
- * <p>
- * The end result from a completed CreateTransaction message is a TransactionContext that is
- * used to perform transaction operations. Transaction operations that occur before the
- * CreateTransaction completes are cache and executed once the CreateTransaction completes,
- * successfully or not.
- */
- private class TransactionFutureCallback extends OnComplete<Object> {
-
- /**
- * The list of transaction operations to execute once the CreateTransaction completes.
- */
- @GuardedBy("txOperationsOnComplete")
- private final List<TransactionOperation> txOperationsOnComplete = Lists.newArrayList();
-
- /**
- * The TransactionContext resulting from the CreateTransaction reply.
- */
- private volatile TransactionContext transactionContext;
-
- /**
- * The target primary shard.
- */
- private volatile ActorSelection primaryShard;
+ TransactionContext createValidTransactionContext(ActorSelection transactionActor,
+ String transactionPath, short remoteTransactionVersion) {
- private volatile int createTxTries = (int) (actorContext.getDatastoreContext().
- getShardLeaderElectionTimeout().duration().toMillis() /
- CREATE_TX_TRY_INTERVAL.toMillis());
+ if (transactionType == TransactionType.READ_ONLY) {
+ // Read-only Tx's aren't explicitly closed by the client so we create a PhantomReference
+ // to close the remote Tx's when this instance is no longer in use and is garbage
+ // collected.
- private final String shardName;
+ if(remoteTransactionActorsMB == null) {
+ remoteTransactionActors = Lists.newArrayList();
+ remoteTransactionActorsMB = new AtomicBoolean();
- TransactionFutureCallback(String shardName) {
- this.shardName = shardName;
- }
-
- String getShardName() {
- return shardName;
- }
-
- TransactionContext getTransactionContext() {
- return transactionContext;
- }
-
-
- /**
- * Sets the target primary shard and initiates a CreateTransaction try.
- */
- void setPrimaryShard(ActorSelection primaryShard) {
- this.primaryShard = primaryShard;
-
- if(transactionType == TransactionType.WRITE_ONLY &&
- actorContext.getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
- LOG.debug("Tx {} Primary shard {} found - creating WRITE_ONLY transaction context",
- getIdentifier(), primaryShard);
-
- // For write-only Tx's we prepare the transaction modifications directly on the shard actor
- // to avoid the overhead of creating a separate transaction actor.
- // FIXME: can't assume the shard version is LITHIUM_VERSION - need to obtain it somehow.
- executeTxOperatonsOnComplete(createValidTransactionContext(this.primaryShard,
- this.primaryShard.path().toString(), DataStoreVersions.LITHIUM_VERSION));
- } else {
- tryCreateTransaction();
- }
- }
-
- /**
- * Adds a TransactionOperation to be executed after the CreateTransaction completes.
- */
- void addTxOperationOnComplete(TransactionOperation operation) {
- boolean invokeOperation = true;
- synchronized(txOperationsOnComplete) {
- if(transactionContext == null) {
- LOG.debug("Tx {} Adding operation on complete", getIdentifier());
-
- invokeOperation = false;
- txOperationsOnComplete.add(operation);
- }
+ TransactionProxyCleanupPhantomReference.track(TransactionProxy.this);
}
- if(invokeOperation) {
- operation.invoke(transactionContext);
- }
- }
+ // Add the actor to the remoteTransactionActors list for access by the
+ // cleanup PhantonReference.
+ remoteTransactionActors.add(transactionActor);
- void enqueueTransactionOperation(final TransactionOperation op) {
-
- if (transactionContext != null) {
- op.invoke(transactionContext);
- } else {
- // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
- // callback to be executed after the Tx is created.
- addTxOperationOnComplete(op);
- }
- }
-
- /**
- * Performs a CreateTransaction try async.
- */
- private void tryCreateTransaction() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} Primary shard {} found - trying create transaction", getIdentifier(), primaryShard);
- }
-
- Object serializedCreateMessage = new CreateTransaction(getIdentifier().toString(),
- TransactionProxy.this.transactionType.ordinal(),
- getTransactionChainId()).toSerializable();
-
- Future<Object> createTxFuture = actorContext.executeOperationAsync(primaryShard, serializedCreateMessage);
-
- createTxFuture.onComplete(this, actorContext.getClientDispatcher());
- }
-
- @Override
- public void onComplete(Throwable failure, Object response) {
- if(failure instanceof NoShardLeaderException) {
- // There's no leader for the shard yet - schedule and try again, unless we're out
- // of retries. Note: createTxTries is volatile as it may be written by different
- // threads however not concurrently, therefore decrementing it non-atomically here
- // is ok.
- if(--createTxTries > 0) {
- LOG.debug("Tx {} Shard {} has no leader yet - scheduling create Tx retry",
- getIdentifier(), shardName);
-
- actorContext.getActorSystem().scheduler().scheduleOnce(CREATE_TX_TRY_INTERVAL,
- new Runnable() {
- @Override
- public void run() {
- tryCreateTransaction();
- }
- }, actorContext.getClientDispatcher());
- return;
- }
- }
-
- createTransactionContext(failure, response);
- }
-
- private void createTransactionContext(Throwable failure, Object response) {
- // Mainly checking for state violation here to perform a volatile read of "initialized" to
- // ensure updates to operationLimter et al are visible to this thread (ie we're doing
- // "piggy-back" synchronization here).
- Preconditions.checkState(initialized, "Tx was not propertly initialized.");
-
- // Create the TransactionContext from the response or failure. Store the new
- // TransactionContext locally until we've completed invoking the
- // TransactionOperations. This avoids thread timing issues which could cause
- // out-of-order TransactionOperations. Eg, on a modification operation, if the
- // TransactionContext is non-null, then we directly call the TransactionContext.
- // However, at the same time, the code may be executing the cached
- // TransactionOperations. So to avoid thus timing, we don't publish the
- // TransactionContext until after we've executed all cached TransactionOperations.
- TransactionContext localTransactionContext;
- if(failure != null) {
- LOG.debug("Tx {} Creating NoOpTransaction because of error", getIdentifier(), failure);
-
- localTransactionContext = new NoOpTransactionContext(failure, getIdentifier(), operationLimiter);
- } else if (CreateTransactionReply.SERIALIZABLE_CLASS.equals(response.getClass())) {
- localTransactionContext = createValidTransactionContext(
- CreateTransactionReply.fromSerializable(response));
- } else {
- IllegalArgumentException exception = new IllegalArgumentException(String.format(
- "Invalid reply type %s for CreateTransaction", response.getClass()));
-
- localTransactionContext = new NoOpTransactionContext(exception, getIdentifier(), operationLimiter);
- }
-
- executeTxOperatonsOnComplete(localTransactionContext);
- }
-
- private void executeTxOperatonsOnComplete(TransactionContext localTransactionContext) {
- while(true) {
- // Access to txOperationsOnComplete and transactionContext must be protected and atomic
- // (ie synchronized) with respect to #addTxOperationOnComplete to handle timing
- // issues and ensure no TransactionOperation is missed and that they are processed
- // in the order they occurred.
-
- // We'll make a local copy of the txOperationsOnComplete list to handle re-entrancy
- // in case a TransactionOperation results in another transaction operation being
- // queued (eg a put operation from a client read Future callback that is notified
- // synchronously).
- Collection<TransactionOperation> operationsBatch = null;
- synchronized(txOperationsOnComplete) {
- if(txOperationsOnComplete.isEmpty()) {
- // We're done invoking the TransactionOperations so we can now publish the
- // TransactionContext.
- transactionContext = localTransactionContext;
- break;
- }
-
- operationsBatch = new ArrayList<>(txOperationsOnComplete);
- txOperationsOnComplete.clear();
- }
-
- // Invoke TransactionOperations outside the sync block to avoid unnecessary blocking.
- // A slight down-side is that we need to re-acquire the lock below but this should
- // be negligible.
- for(TransactionOperation oper: operationsBatch) {
- oper.invoke(localTransactionContext);
- }
- }
- }
-
- private TransactionContext createValidTransactionContext(CreateTransactionReply reply) {
- LOG.debug("Tx {} Received {}", getIdentifier(), reply);
-
- return createValidTransactionContext(actorContext.actorSelection(reply.getTransactionPath()),
- reply.getTransactionPath(), reply.getVersion());
+ // Write to the memory barrier volatile to publish the above update to the
+ // remoteTransactionActors list for thread visibility.
+ remoteTransactionActorsMB.set(true);
}
- private TransactionContext createValidTransactionContext(ActorSelection transactionActor,
- String transactionPath, short remoteTransactionVersion) {
-
- if (transactionType == TransactionType.READ_ONLY) {
- // Read-only Tx's aren't explicitly closed by the client so we create a PhantomReference
- // to close the remote Tx's when this instance is no longer in use and is garbage
- // collected.
+ // TxActor is always created where the leader of the shard is.
+ // Check if TxActor is created in the same node
+ boolean isTxActorLocal = actorContext.isPathLocal(transactionPath);
- if(remoteTransactionActorsMB == null) {
- remoteTransactionActors = Lists.newArrayList();
- remoteTransactionActorsMB = new AtomicBoolean();
-
- TransactionProxyCleanupPhantomReference.track(TransactionProxy.this);
- }
-
- // Add the actor to the remoteTransactionActors list for access by the
- // cleanup PhantonReference.
- remoteTransactionActors.add(transactionActor);
-
- // Write to the memory barrier volatile to publish the above update to the
- // remoteTransactionActors list for thread visibility.
- remoteTransactionActorsMB.set(true);
- }
-
- // TxActor is always created where the leader of the shard is.
- // Check if TxActor is created in the same node
- boolean isTxActorLocal = actorContext.isPathLocal(transactionPath);
-
- if(remoteTransactionVersion < DataStoreVersions.LITHIUM_VERSION) {
- return new PreLithiumTransactionContextImpl(transactionPath, transactionActor, getIdentifier(),
- transactionChainId, actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion,
- operationCompleter);
- } else if (transactionType == TransactionType.WRITE_ONLY &&
- actorContext.getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
- return new WriteOnlyTransactionContextImpl(transactionActor, getIdentifier(), transactionChainId,
+ if(remoteTransactionVersion < DataStoreVersions.LITHIUM_VERSION) {
+ return new PreLithiumTransactionContextImpl(transactionPath, transactionActor, getIdentifier(),
+ transactionChainId, actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion,
+ operationCompleter);
+ } else {
+ return new TransactionContextImpl(transactionActor, getIdentifier(), transactionChainId,
actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion, operationCompleter);
- } else {
- return new TransactionContextImpl(transactionActor, getIdentifier(), transactionChainId,
- actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion, operationCompleter);
- }
}
}
}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * Context for a write-only transaction.
- *
- * @author Thomas Pantelis
- */
-public class WriteOnlyTransactionContextImpl extends TransactionContextImpl {
- private static final Logger LOG = LoggerFactory.getLogger(WriteOnlyTransactionContextImpl.class);
-
- public WriteOnlyTransactionContextImpl(ActorSelection actor, TransactionIdentifier identifier,
- String transactionChainId, ActorContext actorContext, SchemaContext schemaContext, boolean isTxActorLocal,
- short remoteTransactionVersion, OperationCompleter operationCompleter) {
- super(actor, identifier, transactionChainId, actorContext, schemaContext, isTxActorLocal,
- remoteTransactionVersion, operationCompleter);
- }
-
- @Override
- public Future<ActorSelection> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
- getIdentifier(), recordedOperationCount());
-
- // Send the remaining batched modifications if any.
-
- Future<Object> lastModificationsFuture = sendBatchedModifications(true);
-
- return combineRecordedOperationsFutures(lastModificationsFuture);
- }
-}
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@Override
public void deleteData(YangInstanceIdentifier path) {
- recordOperationFuture(executeOperationAsync(
- new DeleteData(path, getRemoteTransactionVersion())));
+ executeOperationAsync(new DeleteData(path, getRemoteTransactionVersion()));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- recordOperationFuture(executeOperationAsync(
- new MergeData(path, data, getRemoteTransactionVersion())));
+ executeOperationAsync(new MergeData(path, data, getRemoteTransactionVersion()));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- recordOperationFuture(executeOperationAsync(
- new WriteData(path, data, getRemoteTransactionVersion())));
+ executeOperationAsync(new WriteData(path, data, getRemoteTransactionVersion()));
}
@Override
public Future<ActorSelection> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
- getIdentifier(), recordedOperationCount());
+ LOG.debug("Tx {} readyTransaction called", getIdentifier());
// Send the ReadyTransaction message to the Tx actor.
Future<Object> lastReplyFuture = executeOperationAsync(ReadyTransaction.INSTANCE);
- return combineRecordedOperationsFutures(lastReplyFuture);
+ return transformReadyReply(lastReplyFuture);
}
@Override
- protected String deserializeCohortPath(String cohortPath) {
+ protected String extractCohortPathFrom(ReadyTransactionReply readyTxReply) {
// In base Helium we used to return the local path of the actor which represented
// a remote ThreePhaseCommitCohort. The local path would then be converted to
// a remote path using this resolvePath method. To maintain compatibility with
// we could remove this code to resolvePath and just use the cohortPath as the
// resolved cohortPath
if(getRemoteTransactionVersion() < DataStoreVersions.HELIUM_1_VERSION) {
- return getActorContext().resolvePath(transactionPath, cohortPath);
+ return getActorContext().resolvePath(transactionPath, readyTxReply.getCohortPath());
}
- return cohortPath;
+ return readyTxReply.getCohortPath();
}
}
private String statRetrievalTime;
+ private long leadershipChangeCount;
+
+ private long lastLeadershipChangeTime;
+
public ShardStats(final String shardName, final String mxBeanType) {
super(shardName, mxBeanType, JMX_CATEGORY_SHARD);
}
getOnDemandRaftState();
return statRetrievalError;
}
+
+ @Override
+ public long getLeadershipChangeCount() {
+ return leadershipChangeCount;
+ }
+
+ public void incrementLeadershipChangeCount() {
+ leadershipChangeCount++;
+ lastLeadershipChangeTime = System.currentTimeMillis();
+ }
+
+ @Override
+ public String getLastLeadershipChangeTime() {
+ return DATE_FORMAT.format(new Date(lastLeadershipChangeTime));
+ }
}
List<FollowerInfo> getFollowerInfo();
String getPeerAddresses();
+
+ long getLeadershipChangeCount();
+
+ String getLastLeadershipChangeTime();
}
private static final long serialVersionUID = 1L;
private boolean ready;
+ private int totalMessagesSent;
private String transactionID;
private String transactionChainID;
this.ready = ready;
}
+ public int getTotalMessagesSent() {
+ return totalMessagesSent;
+ }
+
+ public void setTotalMessagesSent(int totalMessagesSent) {
+ this.totalMessagesSent = totalMessagesSent;
+ }
+
public String getTransactionID() {
return transactionID;
}
transactionID = in.readUTF();
transactionChainID = in.readUTF();
ready = in.readBoolean();
+ totalMessagesSent = in.readInt();
}
@Override
out.writeUTF(transactionID);
out.writeUTF(transactionChainID);
out.writeBoolean(ready);
+ out.writeInt(totalMessagesSent);
}
@Override
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
- builder.append("BatchedModifications [transactionID=").append(transactionID).append(", ready=").append(ready)
- .append(", modifications size=").append(getModifications().size()).append("]");
+ builder.append("BatchedModifications [transactionID=").append(transactionID).append(", transactionChainID=")
+ .append(transactionChainID).append(", ready=").append(ready).append(", totalMessagesSent=")
+ .append(totalMessagesSent).append(", modifications size=").append(getModifications().size())
+ .append("]");
return builder.toString();
}
}
public class BatchedModificationsReply extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
- private static final byte COHORT_PATH_NOT_PRESENT = 0;
- private static final byte COHORT_PATH_PRESENT = 1;
-
private int numBatched;
- private String cohortPath;
public BatchedModificationsReply() {
}
this.numBatched = numBatched;
}
- public BatchedModificationsReply(int numBatched, String cohortPath) {
- this.numBatched = numBatched;
- this.cohortPath = cohortPath;
- }
-
public int getNumBatched() {
return numBatched;
}
- public String getCohortPath() {
- return cohortPath;
- }
-
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
numBatched = in.readInt();
-
- if(in.readByte() == COHORT_PATH_PRESENT) {
- cohortPath = in.readUTF();
- }
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
super.writeExternal(out);
out.writeInt(numBatched);
-
- if(cohortPath != null) {
- out.writeByte(COHORT_PATH_PRESENT);
- out.writeUTF(cohortPath);
- } else {
- out.writeByte(COHORT_PATH_NOT_PRESENT);
- }
}
@Override
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
- builder.append("BatchedModificationsReply [numBatched=").append(numBatched).append(", cohortPath=")
- .append(cohortPath).append("]");
+ builder.append("BatchedModificationsReply [numBatched=").append(numBatched).append("]");
return builder.toString();
}
}
private final DOMStoreThreePhaseCommitCohort cohort;
private final Modification modification;
private final boolean returnSerialized;
- private final int txnClientVersion;
+ private final short txnClientVersion;
- public ForwardedReadyTransaction(String transactionID, int txnClientVersion,
+ public ForwardedReadyTransaction(String transactionID, short txnClientVersion,
DOMStoreThreePhaseCommitCohort cohort, Modification modification,
boolean returnSerialized) {
this.transactionID = transactionID;
return returnSerialized;
}
- public int getTxnClientVersion() {
+ public short getTxnClientVersion() {
return txnClientVersion;
}
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+@Deprecated
public class ReadyTransaction implements SerializableMessage{
public static final Class<ShardTransactionMessages.ReadyTransaction> SERIALIZABLE_CLASS =
ShardTransactionMessages.ReadyTransaction.class;
package org.opendaylight.controller.cluster.datastore.messages;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
-public class ReadyTransactionReply implements SerializableMessage {
+public class ReadyTransactionReply extends VersionedExternalizableMessage {
+ private static final long serialVersionUID = 1L;
+
public static final Class<ShardTransactionMessages.ReadyTransactionReply> SERIALIZABLE_CLASS =
ShardTransactionMessages.ReadyTransactionReply.class;
- private final String cohortPath;
+ private String cohortPath;
+
+ public ReadyTransactionReply() {
+ }
public ReadyTransactionReply(String cohortPath) {
+ this(cohortPath, DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public ReadyTransactionReply(String cohortPath, short version) {
+ super(version);
this.cohortPath = cohortPath;
}
}
@Override
- public ShardTransactionMessages.ReadyTransactionReply toSerializable() {
- return ShardTransactionMessages.ReadyTransactionReply.newBuilder()
- .setActorPath(cohortPath)
- .build();
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ super.readExternal(in);
+ cohortPath = in.readUTF();
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ super.writeExternal(out);
+ out.writeUTF(cohortPath);
+ }
+
+ @Override
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
+ return this;
+ } else {
+ return ShardTransactionMessages.ReadyTransactionReply.newBuilder().setActorPath(cohortPath).build();
+ }
}
public static ReadyTransactionReply fromSerializable(Object serializable) {
- ShardTransactionMessages.ReadyTransactionReply o =
- (ShardTransactionMessages.ReadyTransactionReply) serializable;
+ if(serializable instanceof ReadyTransactionReply) {
+ return (ReadyTransactionReply)serializable;
+ } else {
+ ShardTransactionMessages.ReadyTransactionReply o =
+ (ShardTransactionMessages.ReadyTransactionReply) serializable;
+ return new ReadyTransactionReply(o.getActorPath(), DataStoreVersions.HELIUM_2_VERSION);
+ }
+ }
- return new ReadyTransactionReply(o.getActorPath());
+ public static boolean isSerializedType(Object message) {
+ return message instanceof ReadyTransactionReply ||
+ message instanceof ShardTransactionMessages.ReadyTransactionReply;
}
}
out.write(serializedPayload);
}
- @SuppressWarnings("rawtypes")
@Override
@Deprecated
public <T> Map<GeneratedExtension, T> encode() {
return this.dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
}
+ public Configuration getConfiguration() {
+ return configuration;
+ }
+
protected Future<Object> doAsk(ActorRef actorRef, Object message, Timeout timeout){
return ask(actorRef, message, timeout);
}
private static final Context NO_OP_CONTEXT = new NoOpContext();
- private final Class expectedMessageClass;
+ private final Class<?> expectedMessageClass;
private final long expectedArrivalInterval;
* @param expectedArrivalIntervalInMillis The expected arrival interval between two instances of the expected
* message
*/
- public MessageTracker(Class expectedMessageClass, long expectedArrivalIntervalInMillis){
+ public MessageTracker(Class<?> expectedMessageClass, long expectedArrivalIntervalInMillis){
this.expectedMessageClass = expectedMessageClass;
this.expectedArrivalInterval = expectedArrivalIntervalInMillis;
}
}
public static class MessageProcessingTime {
- private final Class messageClass;
+ private final Class<?> messageClass;
private final long elapsedTimeInNanos;
- MessageProcessingTime(Class messageClass, long elapsedTimeInNanos){
+ MessageProcessingTime(Class<?> messageClass, long elapsedTimeInNanos){
this.messageClass = messageClass;
this.elapsedTimeInNanos = elapsedTimeInNanos;
}
'}';
}
- public Class getMessageClass() {
+ public Class<?> getMessageClass() {
return messageClass;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class NormalizedNodeAggregator {
+
+ private static final ExecutorService executorService = MoreExecutors.newDirectExecutorService();
+
+ private final YangInstanceIdentifier rootIdentifier;
+ private final List<Optional<NormalizedNode<?, ?>>> nodes;
+ private final InMemoryDOMDataStore dataStore;
+
+ NormalizedNodeAggregator(YangInstanceIdentifier rootIdentifier, List<Optional<NormalizedNode<?, ?>>> nodes,
+ SchemaContext schemaContext){
+
+ this.rootIdentifier = rootIdentifier;
+ this.nodes = nodes;
+ this.dataStore = new InMemoryDOMDataStore("aggregator", executorService);
+ this.dataStore.onGlobalContextUpdated(schemaContext);
+ }
+
+ /**
+ * Combine data from all the nodes in the list into a tree with root as rootIdentifier
+ *
+ * @param nodes
+ * @param schemaContext
+ * @return
+ * @throws ExecutionException
+ * @throws InterruptedException
+ */
+ public static Optional<NormalizedNode<?,?>> aggregate(YangInstanceIdentifier rootIdentifier,
+ List<Optional<NormalizedNode<?, ?>>> nodes,
+ SchemaContext schemaContext)
+ throws ExecutionException, InterruptedException {
+ return new NormalizedNodeAggregator(rootIdentifier, nodes, schemaContext).aggregate();
+ }
+
+ private Optional<NormalizedNode<?,?>> aggregate() throws ExecutionException, InterruptedException {
+ return combine().getRootNode();
+ }
+
+ private NormalizedNodeAggregator combine() throws InterruptedException, ExecutionException {
+ DOMStoreWriteTransaction domStoreWriteTransaction = dataStore.newWriteOnlyTransaction();
+
+ for(Optional<NormalizedNode<?,?>> node : nodes) {
+ if(node.isPresent()) {
+ domStoreWriteTransaction.merge(rootIdentifier, node.get());
+ }
+ }
+ DOMStoreThreePhaseCommitCohort ready = domStoreWriteTransaction.ready();
+ ready.canCommit().get();
+ ready.preCommit().get();
+ ready.commit().get();
+
+ return this;
+ }
+
+ private Optional<NormalizedNode<?, ?>> getRootNode() throws InterruptedException, ExecutionException {
+ DOMStoreReadTransaction readTransaction = dataStore.newReadOnlyTransaction();
+
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read =
+ readTransaction.read(rootIdentifier);
+
+ return read.get();
+ }
+
+
+}
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
return argThat(matcher);
}
- protected Future<Object> readySerializedTxReply(String path) {
- return Futures.successful((Object)new ReadyTransactionReply(path).toSerializable());
- }
-
protected Future<Object> readyTxReply(String path) {
return Futures.successful((Object)new ReadyTransactionReply(path));
}
eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
- protected void expectBatchedModificationsReady(ActorRef actorRef, int count) {
- Future<BatchedModificationsReply> replyFuture = Futures.successful(
- new BatchedModificationsReply(count, actorRef.path().toString()));
- doReturn(replyFuture).when(mockActorContext).executeOperationAsync(
+ protected void expectBatchedModificationsReady(ActorRef actorRef) {
+ doReturn(readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
any(ActorSelection.class), isA(BatchedModifications.class));
}
- protected void expectReadyTransaction(ActorRef actorRef) {
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
- }
-
protected void expectFailedBatchedModifications(ActorRef actorRef) {
doReturn(Futures.failed(new TestException())).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(BatchedModifications.class));
import akka.dispatch.OnComplete;
import akka.japi.Creator;
import akka.pattern.Patterns;
+import akka.persistence.SaveSnapshotSuccess;
import akka.testkit.TestActorRef;
import akka.util.Timeout;
import com.google.common.base.Function;
waitUntilLeader(shard);
- final String transactionID1 = "tx1";
- final String transactionID2 = "tx2";
- final String transactionID3 = "tx3";
+ // Setup 3 simulated transactions with mock cohorts backed by real cohorts.
- final AtomicReference<DOMStoreThreePhaseCommitCohort> mockCohort1 = new AtomicReference<>();
- final AtomicReference<DOMStoreThreePhaseCommitCohort> mockCohort2 = new AtomicReference<>();
- final AtomicReference<DOMStoreThreePhaseCommitCohort> mockCohort3 = new AtomicReference<>();
- ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
- @Override
- public DOMStoreThreePhaseCommitCohort decorate(String transactionID, DOMStoreThreePhaseCommitCohort actual) {
- if(transactionID.equals(transactionID1)) {
- mockCohort1.set(createDelegatingMockCohort("cohort1", actual));
- return mockCohort1.get();
- } else if(transactionID.equals(transactionID2)) {
- mockCohort2.set(createDelegatingMockCohort("cohort2", actual));
- return mockCohort2.get();
- } else {
- mockCohort3.set(createDelegatingMockCohort("cohort3", actual));
- return mockCohort3.get();
- }
- }
- };
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
- shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
+
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
+ modification2);
+
+ String transactionID3 = "tx3";
+ MutableCompositeModification modification3 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
+ modification3);
long timeoutSec = 5;
final FiniteDuration duration = FiniteDuration.create(timeoutSec, TimeUnit.SECONDS);
final Timeout timeout = new Timeout(duration);
- // Send a BatchedModifications message for the first transaction.
+ // Simulate the ForwardedReadyTransaction message for the first Tx that would be sent
+ // by the ShardTransaction.
- shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- BatchedModificationsReply batchedReply = expectMsgClass(duration, BatchedModificationsReply.class);
- assertEquals("getCohortPath", shard.path().toString(), batchedReply.getCohortPath());
- assertEquals("getNumBatched", 1, batchedReply.getNumBatched());
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
+ ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(
+ expectMsgClass(duration, ReadyTransactionReply.class));
+ assertEquals("Cohort path", shard.path().toString(), readyReply.getCohortPath());
// Send the CanCommitTransaction message for the first Tx.
expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
assertEquals("Can commit", true, canCommitReply.getCanCommit());
- // Send BatchedModifications for the next 2 Tx's.
+ // Send the ForwardedReadyTransaction for the next 2 Tx's.
- shard.tell(newBatchedModifications(transactionID2, TestModel.OUTER_LIST_PATH,
- ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(newBatchedModifications(transactionID3, YangInstanceIdentifier.builder(
- TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
- ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
+ cohort3, modification3, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the next 2 Tx's. These should get queued and
// processed after the first Tx completes.
assertEquals("Commits complete", true, done);
- InOrder inOrder = inOrder(mockCohort1.get(), mockCohort2.get(), mockCohort3.get());
- inOrder.verify(mockCohort1.get()).canCommit();
- inOrder.verify(mockCohort1.get()).preCommit();
- inOrder.verify(mockCohort1.get()).commit();
- inOrder.verify(mockCohort2.get()).canCommit();
- inOrder.verify(mockCohort2.get()).preCommit();
- inOrder.verify(mockCohort2.get()).commit();
- inOrder.verify(mockCohort3.get()).canCommit();
- inOrder.verify(mockCohort3.get()).preCommit();
- inOrder.verify(mockCohort3.get()).commit();
+ InOrder inOrder = inOrder(cohort1, cohort2, cohort3);
+ inOrder.verify(cohort1).canCommit();
+ inOrder.verify(cohort1).preCommit();
+ inOrder.verify(cohort1).commit();
+ inOrder.verify(cohort2).canCommit();
+ inOrder.verify(cohort2).preCommit();
+ inOrder.verify(cohort2).commit();
+ inOrder.verify(cohort3).canCommit();
+ inOrder.verify(cohort3).preCommit();
+ inOrder.verify(cohort3).commit();
// Verify data in the data store.
shard.tell(newBatchedModifications(transactionID, YangInstanceIdentifier.builder(
TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
YangInstanceIdentifier path = TestModel.TEST_PATH;
shard.tell(newBatchedModifications(transactionID1, transactionChainID, path,
containerNode, true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Create a read Tx on the same chain.
waitUntilLeader(shard);
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+
+ // Setup a simulated transactions with a mock cohort.
+
String transactionID = "tx";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ DOMStoreThreePhaseCommitCohort cohort = setupMockWriteTransaction("cohort", dataStore,
+ TestModel.TEST_PATH, containerNode, modification);
+
FiniteDuration duration = duration("5 seconds");
- // Send a BatchedModifications to start a transaction.
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH, containerNode, true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
shard.tell(new CommitTransaction(transactionID).toSerializable(), getRef());
expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+ InOrder inOrder = inOrder(cohort);
+ inOrder.verify(cohort).canCommit();
+ inOrder.verify(cohort).preCommit();
+ inOrder.verify(cohort).commit();
+
NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
cohort, modification, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
cohort, modification, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
waitUntilLeader(shard);
- // Setup 2 mock cohorts. The first one fails in the commit phase.
+ // Setup 2 simulated transactions with mock cohorts. The first one fails in the
+ // commit phase.
- final String transactionID1 = "tx1";
- final DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
doReturn(Futures.immediateFuture(null)).when(cohort1).preCommit();
doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort1).commit();
- final String transactionID2 = "tx2";
- final DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
- ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
- @Override
- public DOMStoreThreePhaseCommitCohort decorate(String transactionID,
- DOMStoreThreePhaseCommitCohort actual) {
- return transactionID1.equals(transactionID) ? cohort1 : cohort2;
- }
- };
-
- shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
-
FiniteDuration duration = duration("5 seconds");
final Timeout timeout = new Timeout(duration);
- // Send BatchedModifications to start and ready each transaction.
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the first Tx.
waitUntilLeader(shard);
String transactionID = "tx1";
- final DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ MutableCompositeModification modification = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).preCommit();
- ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
- @Override
- public DOMStoreThreePhaseCommitCohort decorate(String transactionID,
- DOMStoreThreePhaseCommitCohort actual) {
- return cohort;
- }
- };
-
- shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
-
FiniteDuration duration = duration("5 seconds");
- // Send BatchedModifications to start and ready a transaction.
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
final FiniteDuration duration = duration("5 seconds");
String transactionID = "tx1";
- final DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ MutableCompositeModification modification = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).canCommit();
- ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
- @Override
- public DOMStoreThreePhaseCommitCohort decorate(String transactionID,
- DOMStoreThreePhaseCommitCohort actual) {
- return cohort;
- }
- };
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
-
- // Send BatchedModifications to start and ready a transaction.
-
- shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
}
};
- shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ MutableCompositeModification modification = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort = setupMockWriteTransaction("cohort1", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME),
+ modification, preCommit);
+
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
final FiniteDuration duration = duration("5 seconds");
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+
writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
writeToStore(shard, TestModel.OUTER_LIST_PATH,
ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
- // Create and ready the 1st Tx - will timeout
+ // Create 1st Tx - will timeout
String transactionID1 = "tx1";
- shard.tell(newBatchedModifications(transactionID1, YangInstanceIdentifier.builder(
- TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
- ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
+ modification1);
- // Create and ready the 2nd Tx
+ // Create 2nd Tx
- String transactionID2 = "tx2";
+ String transactionID2 = "tx3";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
YangInstanceIdentifier listNodePath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
- .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build();
- shard.tell(newBatchedModifications(transactionID2, listNodePath,
- ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build();
+ DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort3", dataStore,
+ listNodePath,
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2),
+ modification2);
+
+ // Ready the Tx's
+
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// canCommit 1st Tx. We don't send the commit so it should timeout.
final FiniteDuration duration = duration("5 seconds");
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+
String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
+
String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
+ modification2);
+
String transactionID3 = "tx3";
+ MutableCompositeModification modification3 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification3);
- // Send a BatchedModifications to start transactions and ready them.
+ // Ready the Tx's
- shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(newBatchedModifications(transactionID2,TestModel.OUTER_LIST_PATH,
- ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(newBatchedModifications(transactionID3, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
+ cohort3, modification3, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// canCommit 1st Tx.
// Setup 2 simulated transactions with mock cohorts. The first one will be aborted.
- final String transactionID1 = "tx1";
- final DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
doReturn(Futures.immediateFuture(null)).when(cohort1).abort();
- final String transactionID2 = "tx2";
- final DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
FiniteDuration duration = duration("5 seconds");
final Timeout timeout = new Timeout(duration);
- ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
- @Override
- public DOMStoreThreePhaseCommitCohort decorate(String transactionID,
- DOMStoreThreePhaseCommitCohort actual) {
- return transactionID1.equals(transactionID) ? cohort1 : cohort2;
- }
- };
-
- shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
-
- // Send BatchedModifications to start and ready each transaction.
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the first Tx.
@SuppressWarnings("serial")
public void testCreateSnapshot(final boolean persistent, final String shardActorName) throws Exception{
+ final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
+
final AtomicReference<Object> savedSnapshot = new AtomicReference<>();
class TestPersistentDataProvider extends DelegatingPersistentDataProvider {
TestPersistentDataProvider(DataPersistenceProvider delegate) {
dataStoreContextBuilder.persistent(persistent);
new ShardTestKit(getSystem()) {{
- final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
-
class TestShard extends Shard {
protected TestShard(ShardIdentifier name, Map<String, String> peerAddresses,
}
@Override
- protected void commitSnapshot(final long sequenceNumber) {
- super.commitSnapshot(sequenceNumber);
- latch.get().countDown();
+ public void handleCommand(Object message) {
+ super.handleCommand(message);
+
+ if (message instanceof SaveSnapshotSuccess || message.equals("commit_snapshot")) {
+ latch.get().countDown();
+ }
}
@Override
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props, "testNegativeMergeTransactionReady");
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doThrow;
import akka.actor.ActorRef;
import akka.actor.Props;
+import akka.actor.Status.Failure;
import akka.actor.Terminated;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
private ActorRef newTransactionActor(DOMStoreTransaction transaction, ActorRef shard, String name,
short version) {
Props props = ShardTransaction.props(transaction, shard != null ? shard : createShard(),
- testSchemaContext, datastoreContext, shardStats, "txn", version);
+ datastoreContext, shardStats, "txn", version);
return getSystem().actorOf(props, name);
}
}
@Test
- public void testOnReceiveReadyTransaction() throws Exception {
+ public void testOnReceiveBatchedModificationsReady() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ "testOnReceiveBatchedModificationsReady");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.addModification(new WriteModification(writePath, writeData));
+
+ transaction.tell(batched, getRef());
+ BatchedModificationsReply reply = expectMsgClass(duration("5 seconds"), BatchedModificationsReply.class);
+ assertEquals("getNumBatched", 1, reply.getNumBatched());
+
+ batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.setReady(true);
+ batched.setTotalMessagesSent(2);
+
+ transaction.tell(batched, getRef());
+ expectMsgClass(duration("5 seconds"), ReadyTransactionReply.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+ }};
+ }
+
+ @Test(expected=TestException.class)
+ public void testOnReceiveBatchedModificationsFailure() throws Throwable {
+ new JavaTestKit(getSystem()) {{
+
+ DOMStoreWriteTransaction mockWriteTx = Mockito.mock(DOMStoreWriteTransaction.class);
+ final ActorRef transaction = newTransactionActor(mockWriteTx,
+ "testOnReceiveBatchedModificationsFailure");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ ContainerNode node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doThrow(new TestException()).when(mockWriteTx).write(path, node);
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.addModification(new WriteModification(path, node));
+
+ transaction.tell(batched, getRef());
+ expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+
+ batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.setReady(true);
+ batched.setTotalMessagesSent(2);
+
+ transaction.tell(batched, getRef());
+ Failure failure = expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+
+ if(failure != null) {
+ throw failure.cause();
+ }
+ }};
+ }
+
+ @Test(expected=IllegalStateException.class)
+ public void testOnReceiveBatchedModificationsReadyWithIncorrectTotalMessageCount() throws Throwable {
+ new JavaTestKit(getSystem()) {{
+
+ final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ "testOnReceiveBatchedModificationsReadyWithIncorrectTotalMessageCount");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.setReady(true);
+ batched.setTotalMessagesSent(2);
+
+ transaction.tell(batched, getRef());
+
+ Failure failure = expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+
+ if(failure != null) {
+ throw failure.cause();
+ }
+ }};
+ }
+
+ @Test
+ public void testOnReceivePreLithiumReadyTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
- "testReadyTransaction");
+ "testReadyTransaction", DataStoreVersions.HELIUM_2_VERSION);
- watch(transaction);
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
transaction.tell(new ReadyTransaction().toSerializable(), getRef());
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS,
- Terminated.class);
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS,
- Terminated.class);
+ expectMsgClass(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
}};
// test
new JavaTestKit(getSystem()) {{
final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
- "testReadyTransaction2");
+ "testReadyTransaction2", DataStoreVersions.HELIUM_2_VERSION);
- watch(transaction);
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
transaction.tell(new ReadyTransaction(), getRef());
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.class,
- Terminated.class);
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.class,
- Terminated.class);
+ expectMsgClass(duration("5 seconds"), ReadyTransactionReply.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
}};
}
public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> transaction = TestActorRef.apply(props,getSystem());
transaction.receive(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION).
expectMsgClass(duration("3 seconds"), Terminated.class);
}};
}
+
+ public static class TestException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+ }
}
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
}
- batchedReplyPromise1.success(new BatchedModificationsReply(1, txActorRef1.path().toString()));
+ batchedReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
// Tx 2 should've proceeded to find the primary shard.
verify(mockActorContext, timeout(5000).times(2)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
Promise<Object> readyReplyPromise1 = akka.dispatch.Futures.promise();
doReturn(readyReplyPromise1.future()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(txActorRef1)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ eq(actorSelection(txActorRef1)), isA(BatchedModifications.class));
DOMStoreWriteTransaction writeTx1 = txChainProxy.newReadWriteTransaction();
writeTx1.ready();
- verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), false);
+ verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), true);
String tx2MemberName = "tx2MemberName";
doReturn(tx2MemberName).when(mockActorContext).getCurrentMemberName();
fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
}
- readyReplyPromise1.success(readySerializedTxReply(txActorRef1.path().toString()).value().get().get());
+ readyReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
verify(mockActorContext, timeout(5000)).executeOperationAsync(eq(getSystem().actorSelection(shardActorRef2.path())),
eqCreateTransaction(tx2MemberName, READ_WRITE));
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_ONLY;
import akka.actor.Props;
import akka.dispatch.Futures;
import com.google.common.base.Optional;
+import com.google.common.collect.Sets;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.Collection;
import java.util.List;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Assert;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
+import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregatorTest;
import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
+import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.Promise;
-import scala.concurrent.duration.Duration;
@SuppressWarnings("resource")
public class TransactionProxyTest extends AbstractTransactionProxyTest {
transactionProxy.exists(TestModel.TEST_PATH);
}
- private void verifyRecordingOperationFutures(List<Future<Object>> futures,
- Class<?>... expResultTypes) throws Exception {
- assertEquals("getRecordingOperationFutures size", expResultTypes.length, futures.size());
-
- int i = 0;
- for( Future<Object> future: futures) {
- assertNotNull("Recording operation Future is null", future);
-
- Class<?> expResultType = expResultTypes[i++];
- if(Throwable.class.isAssignableFrom(expResultType)) {
- try {
- Await.result(future, Duration.create(5, TimeUnit.SECONDS));
- fail("Expected exception from recording operation Future");
- } catch(Exception e) {
- // Expected
- }
- } else {
- assertEquals(String.format("Recording operation %d Future result type", i +1 ), expResultType,
- Await.result(future, Duration.create(5, TimeUnit.SECONDS)).getClass());
- }
- }
- }
-
@Test
public void testWrite() throws Exception {
dataStoreContextBuilder.shardBatchedModificationCount(1);
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- expectBatchedModifications(actorRef, 1);
- expectReadyTransaction(actorRef);
+ expectBatchedModificationsReady(actorRef);
final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
// This sends the batched modification.
transactionProxy.ready();
- verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), false);
-
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
+ verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), true);
}
@Test(expected=IllegalStateException.class)
}
@Test
- public void testReadyWithReadWrite() throws Exception {
+ public void testReadWrite() throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
eq(actorSelection(actorRef)), eqSerializedReadData());
expectBatchedModifications(actorRef, 1);
- expectReadyTransaction(actorRef);
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
+
+ transactionProxy.read(TestModel.TEST_PATH);
+
+ transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+
+ transactionProxy.read(TestModel.TEST_PATH);
+
+ transactionProxy.read(TestModel.TEST_PATH);
+
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), false,
+ new WriteModification(TestModel.TEST_PATH, nodeToWrite));
+ }
+
+ @Test
+ public void testReadyWithReadWrite() throws Exception {
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
+
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
+
+ expectBatchedModificationsReady(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
-
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
- verify(mockActorContext).executeOperationAsync(eq(actorSelection(actorRef)),
- isA(BatchedModifications.class));
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verify(mockActorContext).executeOperationAsync(eq(actorSelection(actorRef)),
- isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ verifyBatchedModifications(batchedModifications.get(0), true,
+ new WriteModification(TestModel.TEST_PATH, nodeToWrite));
+
+ assertEquals("getTotalMessageCount", 1, batchedModifications.get(0).getTotalMessagesSent());
}
@Test
- public void testReadyWithWriteOnlyAndLastBatchPending() throws Exception {
- dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
+ public void testReadyWithNoModifications() throws Exception {
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
- expectBatchedModificationsReady(actorRef, 1);
+ expectBatchedModificationsReady(actorRef);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+ transactionProxy.read(TestModel.TEST_PATH);
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures());
-
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), true,
- new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-
- verify(mockActorContext, never()).executeOperationAsync(eq(actorSelection(actorRef)),
- isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ verifyBatchedModifications(batchedModifications.get(0), true);
}
@Test
- public void testReadyWithWriteOnlyAndLastBatchEmpty() throws Exception {
- dataStoreContextBuilder.shardBatchedModificationCount(1).writeOnlyTransactionOptimizationsEnabled(true);
+ public void testReadyWithWriteOnlyAndLastBatchPending() throws Exception {
+ dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
+
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- expectBatchedModificationsReady(actorRef, 1);
+ expectBatchedModificationsReady(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
-
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
- assertEquals("Captured BatchedModifications count", 2, batchedModifications.size());
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), false,
+ verifyBatchedModifications(batchedModifications.get(0), true,
new WriteModification(TestModel.TEST_PATH, nodeToWrite));
- verifyBatchedModifications(batchedModifications.get(1), true);
-
verify(mockActorContext, never()).executeOperationAsync(eq(actorSelection(actorRef)),
isA(ReadyTransaction.SERIALIZABLE_CLASS));
}
@Test
- public void testReadyWithRecordingOperationFailure() throws Exception {
+ public void testReadyWithWriteOnlyAndLastBatchEmpty() throws Exception {
dataStoreContextBuilder.shardBatchedModificationCount(1).writeOnlyTransactionOptimizationsEnabled(true);
-
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- expectFailedBatchedModifications(actorRef);
-
- doReturn(false).when(mockActorContext).isPathLocal(actorRef.path().toString());
+ expectBatchedModificationsReady(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
- verifyCohortFutures(proxy, TestException.class);
+ verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(), TestException.class);
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 2, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), false,
+ new WriteModification(TestModel.TEST_PATH, nodeToWrite));
+
+ verifyBatchedModifications(batchedModifications.get(1), true);
+
+ verify(mockActorContext, never()).executeOperationAsync(eq(actorSelection(actorRef)),
+ isA(ReadyTransaction.SERIALIZABLE_CLASS));
}
@Test
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
doReturn(true).when(mockActorContext).isPathLocal(anyString());
- doReturn(batchedModificationsReply(1)).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), isA(BatchedModifications.class));
+ expectBatchedModificationsReady(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- // testing ready
- doReturn(readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.class));
-
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
expectBatchedModifications(actorRef, shardBatchedModificationCount);
- expectReadyTransaction(actorRef);
-
YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
new MergeModification(mergePath2, mergeNode2), new WriteModification(writePath3, writeNode3));
- boolean optimizedWriteOnly = type == WRITE_ONLY && dataStoreContextBuilder.build().isWriteOnlyTransactionOptimizationsEnabled();
- verifyBatchedModifications(batchedModifications.get(2), optimizedWriteOnly, new MergeModification(mergePath3, mergeNode3),
+ verifyBatchedModifications(batchedModifications.get(2), true, new MergeModification(mergePath3, mergeNode3),
new DeleteModification(deletePath2));
- if(optimizedWriteOnly) {
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class, BatchedModificationsReply.class);
- } else {
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class, BatchedModificationsReply.class, BatchedModificationsReply.class);
- }
+ assertEquals("getTotalMessageCount", 3, batchedModifications.get(2).getTotalMessagesSent());
}
@Test
inOrder.verify(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedDataExists());
+ }
+
+ @Test
+ public void testReadRoot() throws ReadFailedException, InterruptedException, ExecutionException, java.util.concurrent.TimeoutException {
+
+ SchemaContext schemaContext = SchemaContextHelper.full();
+ Configuration configuration = mock(Configuration.class);
+ doReturn(configuration).when(mockActorContext).getConfiguration();
+ doReturn(schemaContext).when(mockActorContext).getSchemaContext();
+ doReturn(Sets.newHashSet("test", "cars")).when(configuration).getAllShardNames();
+
+ NormalizedNode<?, ?> expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ NormalizedNode<?, ?> expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
+
+ setUpReadData("test", NormalizedNodeAggregatorTest.getRootNode(expectedNode1, schemaContext));
+ setUpReadData("cars", NormalizedNodeAggregatorTest.getRootNode(expectedNode2, schemaContext));
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class, BatchedModificationsReply.class, BatchedModificationsReply.class);
+ doReturn(memberName).when(mockActorContext).getCurrentMemberName();
+
+ doReturn(10).when(mockActorContext).getTransactionOutstandingOperationLimit();
+
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
+
+ Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
+ YangInstanceIdentifier.builder().build()).get(5, TimeUnit.SECONDS);
+
+ assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
+
+ NormalizedNode<?, ?> normalizedNode = readOptional.get();
+
+ assertTrue("Expect value to be a Collection", normalizedNode.getValue() instanceof Collection);
+
+ Collection<NormalizedNode<?,?>> collection = (Collection<NormalizedNode<?,?>>) normalizedNode.getValue();
+
+ for(NormalizedNode<?,?> node : collection){
+ assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
+ }
+
+ assertTrue("Child with QName = " + TestModel.TEST_QNAME + " not found",
+ NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME) != null);
+
+ assertEquals(expectedNode1, NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME));
+
+ assertTrue("Child with QName = " + CarsModel.BASE_QNAME + " not found",
+ NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME) != null);
+
+ assertEquals(expectedNode2, NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME));
+ }
+
+
+ private void setUpReadData(String shardName, NormalizedNode<?, ?> expectedNode) {
+ ActorSystem actorSystem = getSystem();
+ ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(getSystem().actorSelection(shardActorRef.path())).
+ when(mockActorContext).actorSelection(shardActorRef.path().toString());
+
+ doReturn(Futures.successful(getSystem().actorSelection(shardActorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(eq(shardName));
+
+ doReturn(true).when(mockActorContext).isPathLocal(shardActorRef.path().toString());
+
+ ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(actorSystem.actorSelection(txActorRef.path())).
+ when(mockActorContext).actorSelection(txActorRef.path().toString());
+
+ doReturn(Futures.successful(createTransactionReply(txActorRef, DataStoreVersions.CURRENT_VERSION))).when(mockActorContext).
+ executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
+ eqCreateTransaction(memberName, TransactionType.READ_ONLY));
+
+ doReturn(readSerializedDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(txActorRef)), eqSerializedReadData(YangInstanceIdentifier.builder().build()));
}
}
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.inOrder;
-import static org.opendaylight.controller.cluster.datastore.DataStoreVersions.CURRENT_VERSION;
+import static org.opendaylight.controller.cluster.datastore.DataStoreVersions.HELIUM_2_VERSION;
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import akka.dispatch.Dispatchers;
// Simulate the ForwardedReadyTransaction message for the first Tx that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ shard.tell(new ForwardedReadyTransaction(transactionID1, HELIUM_2_VERSION,
cohort1, modification1, true), getRef());
ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS));
// Send the ForwardedReadyTransaction for the next 2 Tx's.
- shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ shard.tell(new ForwardedReadyTransaction(transactionID2, HELIUM_2_VERSION,
cohort2, modification2, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
- shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
+ shard.tell(new ForwardedReadyTransaction(transactionID3, HELIUM_2_VERSION,
cohort3, modification3, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.MergeDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import scala.concurrent.Future;
/**
* Unit tests for backwards compatibility with pre-Lithium versions.
return argThat(matcher);
}
+ private Future<Object> readySerializedTxReply(String path, short version) {
+ return Futures.successful(new ReadyTransactionReply(path, version).toSerializable());
+ }
+
private ActorRef testCompatibilityWithHeliumVersion(short version) throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE, version);
doReturn(Futures.successful(new DeleteDataReply().toSerializable(version))).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyDeleteData(TestModel.TEST_PATH));
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ doReturn(readySerializedTxReply(actorRef.path().toString(), version)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
doReturn(actorRef.path().toString()).when(mockActorContext).resolvePath(eq(actorRef.path().toString()),
doReturn(Futures.successful(new WriteDataReply().toSerializable(version))).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyWriteData(testNode));
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ doReturn(readySerializedTxReply(actorRef.path().toString(), version)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
doReturn(actorRef.path().toString()).when(mockActorContext).resolvePath(eq(actorRef.path().toString()),
batched.addModification(new MergeModification(mergePath, mergeData));
batched.addModification(new DeleteModification(deletePath));
batched.setReady(true);
+ batched.setTotalMessagesSent(5);
BatchedModifications clone = (BatchedModifications) SerializationUtils.clone(
(Serializable) batched.toSerializable());
assertEquals("getTransactionID", "tx1", clone.getTransactionID());
assertEquals("getTransactionChainID", "txChain", clone.getTransactionChainID());
assertEquals("isReady", true, clone.isReady());
+ assertEquals("getTotalMessagesSent", 5, clone.getTotalMessagesSent());
assertEquals("getModifications size", 3, clone.getModifications().size());
BatchedModificationsReply clone = (BatchedModificationsReply) SerializationUtils.clone(
(Serializable) new BatchedModificationsReply(100).toSerializable());
assertEquals("getNumBatched", 100, clone.getNumBatched());
- assertEquals("getCohortPath", null, clone.getCohortPath());
-
- clone = (BatchedModificationsReply) SerializationUtils.clone(
- (Serializable) new BatchedModificationsReply(50, "cohort path").toSerializable());
- assertEquals("getNumBatched", 50, clone.getNumBatched());
- assertEquals("getCohortPath", "cohort path", clone.getCohortPath());
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import static org.junit.Assert.assertEquals;
+import java.io.Serializable;
+import org.apache.commons.lang.SerializationUtils;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+
+/**
+ * Unit tests for ReadyTransactionReply.
+ *
+ * @author Thomas Pantelis
+ */
+public class ReadyTransactionReplyTest {
+
+ @Test
+ public void testSerialization() {
+ String cohortPath = "cohort path";
+ ReadyTransactionReply expected = new ReadyTransactionReply(cohortPath);
+
+ Object serialized = expected.toSerializable();
+ assertEquals("Serialized type", ReadyTransactionReply.class, serialized.getClass());
+
+ ReadyTransactionReply actual = ReadyTransactionReply.fromSerializable(SerializationUtils.clone(
+ (Serializable) serialized));
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
+ assertEquals("getCohortPath", cohortPath, actual.getCohortPath());
+ }
+
+ @Test
+ public void testSerializationWithPreLithiumVersion() throws Exception {
+ String cohortPath = "cohort path";
+ ReadyTransactionReply expected = new ReadyTransactionReply(cohortPath, DataStoreVersions.HELIUM_2_VERSION);
+
+ Object serialized = expected.toSerializable();
+ assertEquals("Serialized type", ShardTransactionMessages.ReadyTransactionReply.class, serialized.getClass());
+
+ ReadyTransactionReply actual = ReadyTransactionReply.fromSerializable(SerializationUtils.clone(
+ (Serializable) serialized));
+ assertEquals("getVersion", DataStoreVersions.HELIUM_2_VERSION, actual.getVersion());
+ assertEquals("getCohortPath", cohortPath, actual.getCohortPath());
+ }
+}
@Override public void onReceive(Object message) throws Exception {
if(message instanceof String){
if("messages".equals(message)){
- getSender().tell(new ArrayList(messages), getSelf());
+ getSender().tell(new ArrayList<>(messages), getSelf());
}
} else {
messages.add(message);
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import com.google.common.base.Optional;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Collection;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import org.junit.Test;
+import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
+import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class NormalizedNodeAggregatorTest {
+
+ @Test
+ public void testAggregate() throws InterruptedException, ExecutionException, ReadFailedException {
+ SchemaContext schemaContext = SchemaContextHelper.full();
+ NormalizedNode<?, ?> expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ NormalizedNode<?, ?> expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
+
+ Optional<NormalizedNode<?, ?>> optional = NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.builder().build(),
+ Lists.newArrayList(
+ Optional.<NormalizedNode<?, ?>>of(getRootNode(expectedNode1, schemaContext)),
+ Optional.<NormalizedNode<?, ?>>of(getRootNode(expectedNode2, schemaContext))),
+ schemaContext);
+
+
+ NormalizedNode<?,?> normalizedNode = optional.get();
+
+ assertTrue("Expect value to be a Collection", normalizedNode.getValue() instanceof Collection);
+
+ Collection<NormalizedNode<?,?>> collection = (Collection<NormalizedNode<?,?>>) normalizedNode.getValue();
+
+ for(NormalizedNode<?,?> node : collection){
+ assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
+ }
+
+ assertTrue("Child with QName = " + TestModel.TEST_QNAME + " not found",
+ findChildWithQName(collection, TestModel.TEST_QNAME) != null);
+
+ assertEquals(expectedNode1, findChildWithQName(collection, TestModel.TEST_QNAME));
+
+ assertTrue("Child with QName = " + CarsModel.BASE_QNAME + " not found",
+ findChildWithQName(collection, CarsModel.BASE_QNAME) != null);
+
+ assertEquals(expectedNode2, findChildWithQName(collection, CarsModel.BASE_QNAME));
+
+ }
+
+ public static NormalizedNode<?,?> getRootNode(NormalizedNode<?, ?> moduleNode, SchemaContext schemaContext) throws ReadFailedException, ExecutionException, InterruptedException {
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", Executors.newSingleThreadExecutor());
+ store.onGlobalContextUpdated(schemaContext);
+
+ DOMStoreWriteTransaction writeTransaction = store.newWriteOnlyTransaction();
+
+ writeTransaction.merge(YangInstanceIdentifier.builder().node(moduleNode.getNodeType()).build(), moduleNode);
+
+ DOMStoreThreePhaseCommitCohort ready = writeTransaction.ready();
+
+ ready.canCommit().get();
+ ready.preCommit().get();
+ ready.commit().get();
+
+ DOMStoreReadTransaction readTransaction = store.newReadOnlyTransaction();
+
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read = readTransaction.read(YangInstanceIdentifier.builder().build());
+
+ Optional<NormalizedNode<?, ?>> nodeOptional = read.checkedGet();
+
+ return nodeOptional.get();
+ }
+
+ public static NormalizedNode<?,?> findChildWithQName(Collection<NormalizedNode<?, ?>> collection, QName qName) {
+ for(NormalizedNode<?,?> node : collection){
+ if(node.getNodeType().equals(qName)){
+ return node;
+ }
+ }
+
+ return null;
+ }
+
+}
\ No newline at end of file
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AbstractCheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
* A {@link Future} used to report the status of an future {@link java.util.concurrent.Future}.
*/
final class PingPongFuture extends AbstractCheckedFuture<Void, TransactionCommitFailedException> {
- protected PingPongFuture(final ListenableFuture<Void> delegate) {
- super(delegate);
- }
+ protected PingPongFuture(final ListenableFuture<Void> delegate) {
+ super(delegate);
+ }
- @Override
- protected TransactionCommitFailedException mapException(final Exception e) {
- Preconditions.checkArgument(e instanceof TransactionCommitFailedException);
- return (TransactionCommitFailedException) e;
+ @Override
+ protected TransactionCommitFailedException mapException(final Exception e) {
+ if (e.getCause() instanceof TransactionCommitFailedException){
+ return (TransactionCommitFailedException) e.getCause();
+ } else {
+ return new TransactionCommitFailedException(e.getMessage(), e.getCause(), null);
}
+ }
}
+
import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.OrderedNormalizedNodeWriter;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XmlUtils;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
}
private void writeNormalizedRpc(final ContainerNode normalized, final DOMResult result, final SchemaPath schemaPath, final SchemaContext baseNetconfCtx) throws IOException, XMLStreamException {
- final NormalizedNodeWriter normalizedNodeWriter;
+ final OrderedNormalizedNodeWriter normalizedNodeWriter;
NormalizedNodeStreamWriter normalizedNodeStreamWriter = null;
XMLStreamWriter writer = null;
try {
writer = NetconfMessageTransformUtil.XML_FACTORY.createXMLStreamWriter(result);
normalizedNodeStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(writer, baseNetconfCtx, schemaPath);
- normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(normalizedNodeStreamWriter);
-
- for (final DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?> editElement : normalized.getValue()) {
- normalizedNodeWriter.write(editElement);
- }
+ normalizedNodeWriter = new OrderedNormalizedNodeWriter(normalizedNodeStreamWriter, baseNetconfCtx, schemaPath);
+ Collection<DataContainerChild<?, ?>> value = (Collection) normalized.getValue();
+ normalizedNodeWriter.write(value);
normalizedNodeWriter.flush();
} finally {
try {
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
+import io.netty.channel.Channel;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreService;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+import org.opendaylight.controller.netconf.impl.NetconfServerSessionListener;
import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultCloseSession;
import org.opendaylight.controller.netconf.impl.osgi.AggregatedNetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
private void closeSession() throws NetconfDocumentedException, ParserConfigurationException, SAXException,
IOException {
+ final Channel channel = mock(Channel.class);
+ doReturn("channel").when(channel).toString();
+ final NetconfServerSessionListener listener = mock(NetconfServerSessionListener.class);
+ final NetconfServerSession session =
+ new NetconfServerSession(listener, channel, 1L,
+ NetconfHelloMessageAdditionalHeader.fromString("[netconf;10.12.0.102:48528;ssh;;;;;;]"));
DefaultCloseSession closeOp = new DefaultCloseSession(NETCONF_SESSION_ID, sessionCloseable);
+ closeOp.setNetconfSession(session);
executeOp(closeOp, "netconfMessages/closeSession.xml");
}
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
+import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import javax.annotation.Nullable;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.util.OrderedNormalizedNodeWriter;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
final NormalizedNodeStreamWriter nnStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(xmlWriter,
schemaContext.getCurrentContext(), rpcOutputPath);
- final NormalizedNodeWriter nnWriter = NormalizedNodeWriter.forStreamWriter(nnStreamWriter);
+ final OrderedNormalizedNodeWriter nnWriter = new OrderedNormalizedNodeWriter(nnStreamWriter, schemaContext.getCurrentContext(), rpcOutputPath);
writeRootElement(xmlWriter, nnWriter, (ContainerNode) data);
try {
}
}
- private void writeRootElement(final XMLStreamWriter xmlWriter, final NormalizedNodeWriter nnWriter, final ContainerNode data) {
+ private void writeRootElement(final XMLStreamWriter xmlWriter, final OrderedNormalizedNodeWriter nnWriter, final ContainerNode data) {
try {
- for (final DataContainerChild<? extends PathArgument, ?> child : data.getValue()) {
- nnWriter.write(child);
- }
+ Collection<DataContainerChild<?, ?>> value = (Collection) data.getValue();
+ nnWriter.write(value);
nnWriter.flush();
xmlWriter.flush();
} catch (XMLStreamException | IOException e) {
import java.util.List;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
+import javax.xml.transform.TransformerException;
import org.custommonkey.xmlunit.DetailedDiff;
import org.custommonkey.xmlunit.Diff;
import org.custommonkey.xmlunit.XMLUnit;
verifyResponse(response, XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid-control.xml"));
}
+ @Test
+ public void testSuccesfullContainerInvocation() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceSuccesfullInvocation);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-container.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+ verifyResponse(response, XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-container-control.xml"));
+ }
+
@Test
public void testFailedInvocation() throws Exception {
RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceFailedInvocation);
verifyResponse(response, RPC_REPLY_OK);
}
- private void verifyResponse(Document response, Document template) {
+ private void verifyResponse(Document response, Document template) throws IOException, TransformerException {
DetailedDiff dd = new DetailedDiff(new Diff(response, template));
dd.overrideElementQualifier(new RecursiveElementNameAndTextQualifier());
- assertTrue(dd.similar());
+ //we care about order so response has to be identical
+ assertTrue(dd.identical());
}
private RpcDefinition getRpcDefinitionFromModule(Module module, URI namespaceURI, String name) {
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <cont1 xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <test-string>
+ cont1 input string 1
+ </test-string>
+ <test-string2>
+ cont1 input string 2
+ </test-string2>
+ </cont1>
+ <cont2 xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <test-string>
+ cont2 input string 1
+ </test-string>
+ <test-string2>
+ cont2 input string 2
+ </test-string2>
+ </cont2>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <container-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <cont1>
+ <test-string>
+ cont1 input string 1
+ </test-string>
+ <test-string2>
+ cont1 input string 2
+ </test-string2>
+ </cont1>
+ <cont2>
+ <test-string>
+ cont2 input string 1
+ </test-string>
+ <test-string2>
+ cont2 input string 2
+ </test-string2>
+ </cont2>
+ </container-rpc>
+</rpc>
\ No newline at end of file
}
}
}
+
+ rpc container-rpc {
+ input {
+ container cont1 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+
+ container cont2 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+ }
+
+ output {
+ container cont1 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+
+ container cont2 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+ }
+ }
}
import com.google.common.base.Preconditions;
import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelFutureListener;
import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.MessageToByteEncoder;
import java.text.SimpleDateFormat;
private Date loginTime;
private long inRpcSuccess, inRpcFail, outRpcError;
+ private volatile boolean delayedClose;
public NetconfServerSession(final NetconfServerSessionListener sessionListener, final Channel channel, final long sessionId,
final NetconfHelloMessageAdditionalHeader header) {
super.sessionUp();
}
+ /**
+ * Close this session after next message is sent.
+ * Suitable for close rpc that needs to send ok response before the session is closed.
+ */
+ public void delayedClose() {
+ this.delayedClose = true;
+ }
+
+ @Override
+ public ChannelFuture sendMessage(final NetconfMessage netconfMessage) {
+ final ChannelFuture channelFuture = super.sendMessage(netconfMessage);
+ // delayed close was set, close after the message was sent
+ if(delayedClose) {
+ channelFuture.addListener(new ChannelFutureListener() {
+ @Override
+ public void operationComplete(final ChannelFuture future) throws Exception {
+ close();
+ }
+ });
+ }
+ return channelFuture;
+ }
+
public void onIncommingRpcSuccess() {
inRpcSuccess++;
}
import org.opendaylight.controller.netconf.api.NetconfTerminationReason;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
-import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultCloseSession;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
import org.opendaylight.controller.netconf.util.messages.SendErrorExceptionUtil;
-import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
session);
LOG.debug("Responding with message {}", message);
session.sendMessage(message);
-
- if (isCloseSession(netconfMessage)) {
- closeNetconfSession(session);
- }
-
} catch (final RuntimeException e) {
// TODO: should send generic error or close session?
LOG.error("Unexpected exception", e);
}
}
- private void closeNetconfSession(final NetconfServerSession session) {
- // destroy NetconfOperationService
- session.close();
- LOG.info("Session {} closed successfully", session.getSessionId());
- }
-
-
-
private NetconfMessage processDocument(final NetconfMessage netconfMessage, final NetconfServerSession session)
throws NetconfDocumentedException {
ImmutableMap.of(NetconfDocumentedException.ErrorTag.missing_attribute.toString(),
XmlNetconfConstants.MESSAGE_ID));
}
-
- private static boolean isCloseSession(final NetconfMessage incomingDocument) {
- final Document document = incomingDocument.getDocument();
- XmlElement rpcElement = XmlElement.fromDomDocument(document);
- if (rpcElement.getOnlyChildElementOptionally(DefaultCloseSession.CLOSE_SESSION).isPresent()) {
- return true;
- }
-
- return false;
- }
}
package org.opendaylight.controller.netconf.impl.mapping.operations;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import java.util.Collections;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class DefaultCloseSession extends AbstractSingletonNetconfOperation {
+public class DefaultCloseSession extends AbstractSingletonNetconfOperation implements DefaultNetconfOperation {
+ private static final Logger LOG = LoggerFactory.getLogger(DefaultCloseSession.class);
+
public static final String CLOSE_SESSION = "close-session";
+
private final AutoCloseable sessionResources;
+ private NetconfServerSession session;
public DefaultCloseSession(String netconfSessionIdForReporting, AutoCloseable sessionResources) {
super(netconfSessionIdForReporting);
throws NetconfDocumentedException {
try {
sessionResources.close();
+ Preconditions.checkNotNull(session, "Session was not set").delayedClose();
+ LOG.info("Session {} closing", session.getSessionId());
} catch (Exception e) {
throw new NetconfDocumentedException("Unable to properly close session "
+ getNetconfSessionIdForReporting(), NetconfDocumentedException.ErrorType.application,
}
return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
}
+
+ @Override
+ public void setNetconfSession(final NetconfServerSession s) {
+ this.session = s;
+ }
}
package org.opendaylight.controller.netconf.impl.mapping.operations;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.util.concurrent.GenericFutureListener;
import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.NetconfTerminationReason;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+import org.opendaylight.controller.netconf.impl.NetconfServerSessionListener;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.w3c.dom.Document;
public class DefaultCloseSessionTest {
+
@Test
public void testDefaultCloseSession() throws Exception {
AutoCloseable res = mock(AutoCloseable.class);
doNothing().when(res).close();
- DefaultCloseSession session = new DefaultCloseSession("", res);
+ DefaultCloseSession close = new DefaultCloseSession("", res);
Document doc = XmlUtil.newDocument();
XmlElement elem = XmlElement.fromDomElement(XmlUtil.readXmlToElement("<elem/>"));
- session.handleWithNoSubsequentOperations(doc, elem);
+ final Channel channel = mock(Channel.class);
+ doReturn("channel").when(channel).toString();
+ doReturn(mock(ChannelFuture.class)).when(channel).close();
+
+ final ChannelFuture sendFuture = mock(ChannelFuture.class);
+ doAnswer(new Answer() {
+ @Override
+ public Object answer(final InvocationOnMock invocation) throws Throwable {
+ ((GenericFutureListener) invocation.getArguments()[0]).operationComplete(sendFuture);
+ return null;
+ }
+ }).when(sendFuture).addListener(any(GenericFutureListener.class));
+ doReturn(sendFuture).when(channel).writeAndFlush(anyObject());
+ final NetconfServerSessionListener listener = mock(NetconfServerSessionListener.class);
+ doNothing().when(listener).onSessionTerminated(any(NetconfServerSession.class), any(NetconfTerminationReason.class));
+ final NetconfServerSession session =
+ new NetconfServerSession(listener, channel, 1L,
+ NetconfHelloMessageAdditionalHeader.fromString("[netconf;10.12.0.102:48528;ssh;;;;;;]"));
+ close.setNetconfSession(session);
+ close.handleWithNoSubsequentOperations(doc, elem);
+ // Fake close response to trigger delayed close
+ session.sendMessage(new NetconfMessage(XmlUtil.readXmlToDocument("<rpc-reply message-id=\"101\"\n" +
+ "xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ "<ok/>\n" +
+ "</rpc-reply>")));
+ verify(channel).close();
+ verify(listener).onSessionTerminated(any(NetconfServerSession.class), any(NetconfTerminationReason.class));
}
@Test(expected = NetconfDocumentedException.class)
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-model-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ </dependency>
</dependencies>
<build>
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util;
+
+import static org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter.UNKNOWN_SIZE;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Iterables;
+import java.io.Closeable;
+import java.io.Flushable;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.OrderedMapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamAttributeWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
+import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+//TODO this does not extend NormalizedNodeWriter from yangtools due to api freeze, make this inherit common methods to avoid code duplication
+//TODO move this to yangtools, since this is in netconf-util due to api freeze in lithium
+public class OrderedNormalizedNodeWriter implements Closeable, Flushable{
+
+ private final SchemaContext schemaContext;
+ private final SchemaNode root;
+ private final NormalizedNodeStreamWriter writer;
+
+ public OrderedNormalizedNodeWriter(NormalizedNodeStreamWriter writer, SchemaContext schemaContext, SchemaPath path) {
+ this.writer = writer;
+ this.schemaContext = schemaContext;
+ this.root = findParentSchemaOnPath(schemaContext, path);
+ }
+
+ public OrderedNormalizedNodeWriter write(final NormalizedNode<?, ?> node) throws IOException {
+ if (root == schemaContext) {
+ return write(node, schemaContext.getDataChildByName(node.getNodeType()));
+ }
+
+ return write(node, root);
+ }
+
+ public OrderedNormalizedNodeWriter write(final Collection<DataContainerChild<?,?>> nodes) throws IOException {
+ if (writeChildren(nodes, root, false)) {
+ return this;
+ }
+
+ throw new IllegalStateException("It wasn't possible to serialize nodes " + nodes);
+
+ }
+
+ private OrderedNormalizedNodeWriter write(NormalizedNode<?, ?> node, SchemaNode dataSchemaNode) throws IOException {
+ if (node == null) {
+ return this;
+ }
+
+ if (wasProcessedAsCompositeNode(node, dataSchemaNode)) {
+ return this;
+ }
+
+ if (wasProcessAsSimpleNode(node)) {
+ return this;
+ }
+
+ throw new IllegalStateException("It wasn't possible to serialize node " + node);
+ }
+
+ private void write(List<NormalizedNode<?, ?>> nodes, SchemaNode dataSchemaNode) throws IOException {
+ for (NormalizedNode<?, ?> node : nodes) {
+ write(node, dataSchemaNode);
+ }
+ }
+
+ private OrderedNormalizedNodeWriter writeLeaf(final NormalizedNode<?, ?> node) throws IOException {
+ if (wasProcessAsSimpleNode(node)) {
+ return this;
+ }
+
+ throw new IllegalStateException("It wasn't possible to serialize node " + node);
+ }
+
+ private boolean writeChildren(final Iterable<? extends NormalizedNode<?, ?>> children, SchemaNode parentSchemaNode, boolean endParent) throws IOException {
+ //Augmentations cannot be gotten with node.getChild so create our own structure with augmentations resolved
+ ArrayListMultimap<QName, NormalizedNode<?, ?>> qNameToNodes = ArrayListMultimap.create();
+ for (NormalizedNode<?, ?> child : children) {
+ if (child instanceof AugmentationNode) {
+ qNameToNodes.putAll(resolveAugmentations(child));
+ } else {
+ qNameToNodes.put(child.getNodeType(), child);
+ }
+ }
+
+ if (parentSchemaNode instanceof DataNodeContainer) {
+ if (parentSchemaNode instanceof ListSchemaNode && qNameToNodes.containsKey(parentSchemaNode.getQName())) {
+ write(qNameToNodes.get(parentSchemaNode.getQName()), parentSchemaNode);
+ } else {
+ for (DataSchemaNode schemaNode : ((DataNodeContainer) parentSchemaNode).getChildNodes()) {
+ write(qNameToNodes.get(schemaNode.getQName()), schemaNode);
+ }
+ }
+ } else if(parentSchemaNode instanceof ChoiceSchemaNode) {
+ for (ChoiceCaseNode ccNode : ((ChoiceSchemaNode) parentSchemaNode).getCases()) {
+ for (DataSchemaNode dsn : ccNode.getChildNodes()) {
+ if (qNameToNodes.containsKey(dsn.getQName())) {
+ write(qNameToNodes.get(dsn.getQName()), dsn);
+ }
+ }
+ }
+ } else {
+ for (NormalizedNode<?, ?> child : children) {
+ writeLeaf(child);
+ }
+ }
+ if (endParent) {
+ writer.endNode();
+ }
+ return true;
+ }
+
+ private ArrayListMultimap<QName, NormalizedNode<?, ?>> resolveAugmentations(NormalizedNode<?, ?> child) {
+ final ArrayListMultimap<QName, NormalizedNode<?, ?>> resolvedAugs = ArrayListMultimap.create();
+ for (NormalizedNode<?, ?> node : ((AugmentationNode) child).getValue()) {
+ if (node instanceof AugmentationNode) {
+ resolvedAugs.putAll(resolveAugmentations(node));
+ } else {
+ resolvedAugs.put(node.getNodeType(), node);
+ }
+ }
+ return resolvedAugs;
+ }
+
+ private boolean writeMapEntryNode(final MapEntryNode node, final SchemaNode dataSchemaNode) throws IOException {
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer)
+ .startMapEntryNode(node.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(node.getValue()), node.getAttributes());
+ } else {
+ writer.startMapEntryNode(node.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(node.getValue()));
+ }
+ return writeChildren(node.getValue(), dataSchemaNode, true);
+ }
+
+ private boolean wasProcessAsSimpleNode(final NormalizedNode<?, ?> node) throws IOException {
+ if (node instanceof LeafSetEntryNode) {
+ final LeafSetEntryNode<?> nodeAsLeafList = (LeafSetEntryNode<?>)node;
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer).leafSetEntryNode(nodeAsLeafList.getValue(), nodeAsLeafList.getAttributes());
+ } else {
+ writer.leafSetEntryNode(nodeAsLeafList.getValue());
+ }
+ return true;
+ } else if (node instanceof LeafNode) {
+ final LeafNode<?> nodeAsLeaf = (LeafNode<?>)node;
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer).leafNode(nodeAsLeaf.getIdentifier(), nodeAsLeaf.getValue(), nodeAsLeaf.getAttributes());
+ } else {
+ writer.leafNode(nodeAsLeaf.getIdentifier(), nodeAsLeaf.getValue());
+ }
+ return true;
+ } else if (node instanceof AnyXmlNode) {
+ final AnyXmlNode anyXmlNode = (AnyXmlNode)node;
+ writer.anyxmlNode(anyXmlNode.getIdentifier(), anyXmlNode.getValue());
+ return true;
+ }
+
+ return false;
+ }
+
+ private boolean wasProcessedAsCompositeNode(final NormalizedNode<?, ?> node, SchemaNode dataSchemaNode) throws IOException {
+ if (node instanceof ContainerNode) {
+ final ContainerNode n = (ContainerNode) node;
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer).startContainerNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()), n.getAttributes());
+ } else {
+ writer.startContainerNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ }
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof MapEntryNode) {
+ return writeMapEntryNode((MapEntryNode) node, dataSchemaNode);
+ }
+ if (node instanceof UnkeyedListEntryNode) {
+ final UnkeyedListEntryNode n = (UnkeyedListEntryNode) node;
+ writer.startUnkeyedListItem(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof ChoiceNode) {
+ final ChoiceNode n = (ChoiceNode) node;
+ writer.startChoiceNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof AugmentationNode) {
+ final AugmentationNode n = (AugmentationNode) node;
+ writer.startAugmentationNode(n.getIdentifier());
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof UnkeyedListNode) {
+ final UnkeyedListNode n = (UnkeyedListNode) node;
+ writer.startUnkeyedList(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof OrderedMapNode) {
+ final OrderedMapNode n = (OrderedMapNode) node;
+ writer.startOrderedMapNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof MapNode) {
+ final MapNode n = (MapNode) node;
+ writer.startMapNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof LeafSetNode) {
+ //covers also OrderedLeafSetNode for which doesn't exist start* method
+ final LeafSetNode<?> n = (LeafSetNode<?>) node;
+ writer.startLeafSet(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+
+ return false;
+ }
+
+ private static final int childSizeHint(final Iterable<?> children) {
+ return (children instanceof Collection) ? ((Collection<?>) children).size() : UNKNOWN_SIZE;
+ }
+
+ //TODO similar code is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private SchemaNode findParentSchemaOnPath(SchemaContext schemaContext, SchemaPath path) {
+ SchemaNode current = Preconditions.checkNotNull(schemaContext);
+ for (final QName qname : path.getPathFromRoot()) {
+ SchemaNode child;
+ if(current instanceof DataNodeContainer) {
+ child = ((DataNodeContainer) current).getDataChildByName(qname);
+
+ if (child == null && current instanceof SchemaContext) {
+ child = tryFindGroupings((SchemaContext) current, qname).orNull();
+ }
+
+ if(child == null && current instanceof SchemaContext) {
+ child = tryFindNotification((SchemaContext) current, qname)
+ .or(tryFindRpc(((SchemaContext) current), qname)).orNull();
+ }
+ } else if (current instanceof ChoiceSchemaNode) {
+ child = ((ChoiceSchemaNode) current).getCaseNodeByName(qname);
+ } else if (current instanceof RpcDefinition) {
+ switch (qname.getLocalName()) {
+ case "input":
+ child = ((RpcDefinition) current).getInput();
+ break;
+ case "output":
+ child = ((RpcDefinition) current).getOutput();
+ break;
+ default:
+ child = null;
+ break;
+ }
+ } else {
+ throw new IllegalArgumentException(String.format("Schema node %s does not allow children.", current));
+ }
+ current = child;
+ }
+ return current;
+ }
+
+ //TODO this method is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private Optional<SchemaNode> tryFindGroupings(final SchemaContext ctx, final QName qname) {
+ return Optional.<SchemaNode> fromNullable(Iterables.find(ctx.getGroupings(), new SchemaNodePredicate(qname), null));
+ }
+
+ //TODO this method is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private Optional<SchemaNode> tryFindRpc(final SchemaContext ctx, final QName qname) {
+ return Optional.<SchemaNode>fromNullable(Iterables.find(ctx.getOperations(), new SchemaNodePredicate(qname), null));
+ }
+
+ //TODO this method is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private Optional<SchemaNode> tryFindNotification(final SchemaContext ctx, final QName qname) {
+ return Optional.<SchemaNode>fromNullable(Iterables.find(ctx.getNotifications(), new SchemaNodePredicate(qname), null));
+ }
+
+ @Override
+ public void flush() throws IOException {
+ writer.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ writer.flush();
+ writer.close();
+ }
+
+ //TODO this class is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private static final class SchemaNodePredicate implements Predicate<SchemaNode> {
+ private final QName qname;
+
+ public SchemaNodePredicate(final QName qname) {
+ this.qname = qname;
+ }
+
+ @Override
+ public boolean apply(final SchemaNode input) {
+ return input.getQName().equals(qname);
+ }
+ }
+}
\ No newline at end of file