<feature name='odl-netconf-util' version='${project.version}'>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<bundle>mvn:org.opendaylight.yangtools/yang-model-api/${yangtools.version}</bundle>
+ <bundle>mvn:org.opendaylight.yangtools/yang-data-api/${yangtools.version}</bundle>
<bundle>mvn:org.opendaylight.controller/netconf-util/${project.version}</bundle>
</feature>
<feature name='odl-netconf-impl' version='${project.version}' description="OpenDaylight :: Netconf :: Impl">
hosttracker.keyscheme=IP
# LISP Flow Mapping configuration
-# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings
+# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings (default: true)
lisp.mappingOverwrite = true
-# Enable the Solicit-Map-Request (SMR) mechanism
-lisp.smr = false
+# Enable the Solicit-Map-Request (SMR) mechanism (default: true)
+lisp.smr = true
+# Choose policy for Explicit Locator Path (ELP) handling
+# There are three options:
+# default: don't add or remove locator records, return mapping as-is
+# both: keep the ELP, but add the next hop as a standalone non-LCAF locator with a lower priority
+# replace: remove the ELP, add the next hop as a standalone non-LCAF locator
+lisp.elpPolicy = default
import java.io.ObjectOutputStream;
import java.util.HashMap;
import java.util.Map;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
import org.opendaylight.controller.cluster.example.messages.PrintRole;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.RaftActor;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
/**
* A sample actor showing how the RaftActor is to be extended
*/
-public class ExampleActor extends RaftActor {
+public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort, RaftActorSnapshotCohort {
- private final Map<String, String> state = new HashMap();
+ private final Map<String, String> state = new HashMap<>();
private long persistIdentifier = 1;
private final Optional<ActorRef> roleChangeNotifier;
}
}
- @Override protected void createSnapshot() {
+ @Override
+ public void createSnapshot(ActorRef actorRef) {
ByteString bs = null;
try {
bs = fromObject(state);
getSelf().tell(new CaptureSnapshotReply(bs.toByteArray()), null);
}
- @Override protected void applySnapshot(byte [] snapshot) {
+ @Override
+ public void applySnapshot(byte [] snapshot) {
state.clear();
try {
- state.putAll((HashMap) toObject(snapshot));
+ state.putAll((HashMap<String, String>) toObject(snapshot));
} catch (Exception e) {
LOG.error("Exception in applying snapshot", e);
}
if(LOG.isDebugEnabled()) {
- LOG.debug("Snapshot applied to state : {}", ((HashMap) state).size());
+ LOG.debug("Snapshot applied to state : {}", ((HashMap<?, ?>) state).size());
}
}
}
@Override
- protected void startLogRecoveryBatch(int maxBatchSize) {
+ @Nonnull
+ protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+ return this;
+ }
+
+ @Override
+ public void startLogRecoveryBatch(int maxBatchSize) {
+ }
+
+ @Override
+ public void appendRecoveredLogEntry(Payload data) {
}
@Override
- protected void appendRecoveredLogEntry(Payload data) {
+ public void applyCurrentLogRecoveryBatch() {
}
@Override
- protected void applyCurrentLogRecoveryBatch() {
+ public void onRecoveryComplete() {
}
@Override
- protected void onRecoveryComplete() {
+ public void applyRecoverySnapshot(byte[] snapshot) {
}
@Override
- protected void applyRecoverySnapshot(byte[] snapshot) {
+ protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+ return this;
}
}
}
// override this method to return the protobuff related extension fields and their values
- @Override public Map<GeneratedMessage.GeneratedExtension, String> encode() {
- Map<GeneratedMessage.GeneratedExtension, String> map = new HashMap<>();
+ @Override public Map<GeneratedMessage.GeneratedExtension<?, ?>, String> encode() {
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, String> map = new HashMap<>();
map.put(KeyValueMessages.key, getKey());
map.put(KeyValueMessages.value, getValue());
return map;
*/
package org.opendaylight.controller.cluster.raft;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
// We define this as ArrayList so we can use ensureCapacity.
- protected ArrayList<ReplicatedLogEntry> journal;
+ private ArrayList<ReplicatedLogEntry> journal;
private long snapshotIndex = -1;
private long snapshotTerm = -1;
private ArrayList<ReplicatedLogEntry> snapshottedJournal;
private long previousSnapshotIndex = -1;
private long previousSnapshotTerm = -1;
- protected int dataSize = 0;
+ private int dataSize = 0;
public AbstractReplicatedLogImpl(long snapshotIndex,
long snapshotTerm, List<ReplicatedLogEntry> unAppliedEntries) {
this.snapshotIndex = snapshotIndex;
this.snapshotTerm = snapshotTerm;
this.journal = new ArrayList<>(unAppliedEntries);
+
+ for(ReplicatedLogEntry entry: journal) {
+ dataSize += entry.size();
+ }
}
public AbstractReplicatedLogImpl() {
}
@Override
- public void removeFrom(long logEntryIndex) {
+ public long removeFrom(long logEntryIndex) {
int adjustedIndex = adjustedIndex(logEntryIndex);
if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
// physical index should be less than list size and >= 0
- return;
+ return -1;
+ }
+
+ for(int i = adjustedIndex; i < journal.size(); i++) {
+ dataSize -= journal.get(i).size();
}
+
journal.subList(adjustedIndex , journal.size()).clear();
+
+ return adjustedIndex;
}
@Override
public void append(ReplicatedLogEntry replicatedLogEntry) {
journal.add(replicatedLogEntry);
+ dataSize += replicatedLogEntry.size();
}
@Override
snapshotTerm = previousSnapshotTerm;
previousSnapshotTerm = -1;
}
+
+ @VisibleForTesting
+ ReplicatedLogEntry getAtPhysicalIndex(int index) {
+ return journal.get(index);
+ }
}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.japi.Procedure;
-import akka.persistence.RecoveryCompleted;
-import akka.persistence.SaveSnapshotFailure;
-import akka.persistence.SaveSnapshotSuccess;
-import akka.persistence.SnapshotOffer;
import akka.persistence.SnapshotSelectionCriteria;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Optional;
-import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.io.Serializable;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
+import javax.annotation.Nonnull;
import org.apache.commons.lang3.time.DurationFormatUtils;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
import org.opendaylight.controller.cluster.notifications.RoleChanged;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
-import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
-import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.behaviors.AbstractLeader;
import org.opendaylight.controller.cluster.raft.behaviors.DelegatingRaftActorBehavior;
private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50L); // 50 millis
- private static final String COMMIT_SNAPSHOT = "commit_snapshot";
-
protected final Logger LOG = LoggerFactory.getLogger(getClass());
/**
private final DelegatingPersistentDataProvider delegatingPersistenceProvider = new DelegatingPersistentDataProvider(null);
- private final Procedure<Void> createSnapshotProcedure = new CreateSnapshotProcedure();
-
- private Stopwatch recoveryTimer;
+ private RaftActorRecoverySupport raftRecovery;
- private int currentRecoveryBatchCount;
+ private RaftActorSnapshotMessageSupport snapshotSupport;
private final BehaviorStateHolder reusableBehaviorStateHolder = new BehaviorStateHolder();
context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, delegatingPersistenceProvider, currentBehavior));
}
- private void initRecoveryTimer() {
- if(recoveryTimer == null) {
- recoveryTimer = Stopwatch.createStarted();
- }
- }
-
@Override
public void preStart() throws Exception {
LOG.info("Starting recovery for {} with journal batch size {}", persistenceId(),
@Override
public void postStop() {
- if(currentBehavior != null) {
+ if(currentBehavior.getDelegate() != null) {
try {
currentBehavior.close();
} catch (Exception e) {
@Override
public void handleRecover(Object message) {
- if(persistence().isRecoveryApplicable()) {
- if (message instanceof SnapshotOffer) {
- onRecoveredSnapshot((SnapshotOffer) message);
- } else if (message instanceof ReplicatedLogEntry) {
- onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
- } else if (message instanceof ApplyLogEntries) {
- // Handle this message for backwards compatibility with pre-Lithium versions.
- onRecoveredApplyLogEntries(((ApplyLogEntries) message).getToIndex());
- } else if (message instanceof ApplyJournalEntries) {
- onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
- } else if (message instanceof DeleteEntries) {
- replicatedLog().removeFrom(((DeleteEntries) message).getFromIndex());
- } else if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
- ((UpdateElectionTerm) message).getVotedFor());
- } else if (message instanceof RecoveryCompleted) {
- onRecoveryCompletedMessage();
- }
- } else {
- if (message instanceof RecoveryCompleted) {
+ if(raftRecovery == null) {
+ raftRecovery = new RaftActorRecoverySupport(delegatingPersistenceProvider, context, currentBehavior,
+ getRaftActorRecoveryCohort());
+ }
+
+ boolean recoveryComplete = raftRecovery.handleRecoveryMessage(message);
+ if(recoveryComplete) {
+ if(!persistence().isRecoveryApplicable()) {
// Delete all the messages from the akka journal so that we do not end up with consistency issues
// Note I am not using the dataPersistenceProvider and directly using the akka api here
deleteMessages(lastSequenceNr());
// Delete all the akka snapshots as they will not be needed
deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), scala.Long.MaxValue()));
-
- onRecoveryComplete();
-
- initializeBehavior();
}
- }
- }
-
- private void onRecoveredSnapshot(SnapshotOffer offer) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: SnapshotOffer called..", persistenceId());
- }
-
- initRecoveryTimer();
-
- Snapshot snapshot = (Snapshot) offer.snapshot();
- // Create a replicated log with the snapshot information
- // The replicated log can be used later on to retrieve this snapshot
- // when we need to install it on a peer
+ onRecoveryComplete();
- context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, delegatingPersistenceProvider,
- currentBehavior));
- context.setLastApplied(snapshot.getLastAppliedIndex());
- context.setCommitIndex(snapshot.getLastAppliedIndex());
+ initializeBehavior();
- Stopwatch timer = Stopwatch.createStarted();
-
- // Apply the snapshot to the actors state
- applyRecoverySnapshot(snapshot.getState());
-
- timer.stop();
- LOG.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size=" +
- replicatedLog().size(), persistenceId(), timer.toString(),
- replicatedLog().getSnapshotIndex(), replicatedLog().getSnapshotTerm());
- }
-
- private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Received ReplicatedLogEntry for recovery: {}", persistenceId(), logEntry.getIndex());
+ raftRecovery = null;
}
-
- replicatedLog().append(logEntry);
- }
-
- private void onRecoveredApplyLogEntries(long toIndex) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Received ApplyLogEntries for recovery, applying to state: {} to {}",
- persistenceId(), context.getLastApplied() + 1, toIndex);
- }
-
- for (long i = context.getLastApplied() + 1; i <= toIndex; i++) {
- batchRecoveredLogEntry(replicatedLog().get(i));
- }
-
- context.setLastApplied(toIndex);
- context.setCommitIndex(toIndex);
- }
-
- private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
- initRecoveryTimer();
-
- int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
- if(currentRecoveryBatchCount == 0) {
- startLogRecoveryBatch(batchSize);
- }
-
- appendRecoveredLogEntry(logEntry.getData());
-
- if(++currentRecoveryBatchCount >= batchSize) {
- endCurrentLogRecoveryBatch();
- }
- }
-
- private void endCurrentLogRecoveryBatch() {
- applyCurrentLogRecoveryBatch();
- currentRecoveryBatchCount = 0;
- }
-
- private void onRecoveryCompletedMessage() {
- if(currentRecoveryBatchCount > 0) {
- endCurrentLogRecoveryBatch();
- }
-
- onRecoveryComplete();
-
- String recoveryTime = "";
- if(recoveryTimer != null) {
- recoveryTimer.stop();
- recoveryTime = " in " + recoveryTimer.toString();
- recoveryTimer = null;
- }
-
- LOG.info(
- "Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
- "Persistence Id = " + persistenceId() +
- " Last index in log={}, snapshotIndex={}, snapshotTerm={}, " +
- "journal-size={}",
- replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
- replicatedLog().getSnapshotTerm(), replicatedLog().size());
-
- initializeBehavior();
}
protected void initializeBehavior(){
handleBehaviorChange(reusableBehaviorStateHolder, getCurrentBehavior());
}
- @Override public void handleCommand(Object message) {
+ @Override
+ public void handleCommand(Object message) {
+ if(snapshotSupport == null) {
+ snapshotSupport = new RaftActorSnapshotMessageSupport(delegatingPersistenceProvider, context,
+ currentBehavior, getRaftActorSnapshotCohort(), self());
+ }
+
+ boolean handled = snapshotSupport.handleSnapshotMessage(message);
+ if(handled) {
+ return;
+ }
+
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
persistence().persist(applyEntries, NoopProcedure.instance());
- } else if(message instanceof ApplySnapshot ) {
- Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: ApplySnapshot called on Follower Actor " +
- "snapshotIndex:{}, snapshotTerm:{}", persistenceId(), snapshot.getLastAppliedIndex(),
- snapshot.getLastAppliedTerm()
- );
- }
-
- applySnapshot(snapshot.getState());
-
- //clears the followers log, sets the snapshot index to ensure adjusted-index works
- context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, delegatingPersistenceProvider,
- currentBehavior));
- context.setLastApplied(snapshot.getLastAppliedIndex());
-
} else if (message instanceof FindLeader) {
getSender().tell(
new FindLeaderReply(getLeaderAddress()),
getSelf()
);
-
- } else if (message instanceof SaveSnapshotSuccess) {
- SaveSnapshotSuccess success = (SaveSnapshotSuccess) message;
- LOG.info("{}: SaveSnapshotSuccess received for snapshot", persistenceId());
-
- long sequenceNumber = success.metadata().sequenceNr();
-
- commitSnapshot(sequenceNumber);
-
- } else if (message instanceof SaveSnapshotFailure) {
- SaveSnapshotFailure saveSnapshotFailure = (SaveSnapshotFailure) message;
-
- LOG.error("{}: SaveSnapshotFailure received for snapshot Cause:",
- persistenceId(), saveSnapshotFailure.cause());
-
- context.getSnapshotManager().rollback();
-
- } else if (message instanceof CaptureSnapshot) {
- LOG.debug("{}: CaptureSnapshot received by actor: {}", persistenceId(), message);
-
- context.getSnapshotManager().create(createSnapshotProcedure);
-
- } else if (message instanceof CaptureSnapshotReply) {
- handleCaptureSnapshotReply(((CaptureSnapshotReply) message).getSnapshot());
} else if(message instanceof GetOnDemandRaftState) {
onGetOnDemandRaftStats();
- } else if (message.equals(COMMIT_SNAPSHOT)) {
- commitSnapshot(-1);
} else {
reusableBehaviorStateHolder.init(getCurrentBehavior());
// Make saving Snapshot successful
// Committing the snapshot here would end up calling commit in the creating state which would
// be a state violation. That's why now we send a message to commit the snapshot.
- self().tell(COMMIT_SNAPSHOT, self());
+ self().tell(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT, self());
}
});
}
context.setPeerAddress(peerId, peerAddress);
}
- protected void commitSnapshot(long sequenceNumber) {
- context.getSnapshotManager().commit(persistence(), sequenceNumber);
- }
-
/**
* The applyState method will be called by the RaftActor when some data
* needs to be applied to the actor's state
Object data);
/**
- * This method is called during recovery at the start of a batch of state entries. Derived
- * classes should perform any initialization needed to start a batch.
+ * Returns the RaftActorRecoveryCohort to participate in persistence recovery.
*/
- protected abstract void startLogRecoveryBatch(int maxBatchSize);
-
- /**
- * This method is called during recovery to append state data to the current batch. This method
- * is called 1 or more times after {@link #startLogRecoveryBatch}.
- *
- * @param data the state data
- */
- protected abstract void appendRecoveredLogEntry(Payload data);
-
- /**
- * This method is called during recovery to reconstruct the state of the actor.
- *
- * @param snapshotBytes A snapshot of the state of the actor
- */
- protected abstract void applyRecoverySnapshot(byte[] snapshotBytes);
-
- /**
- * This method is called during recovery at the end of a batch to apply the current batched
- * log entries. This method is called after {@link #appendRecoveredLogEntry}.
- */
- protected abstract void applyCurrentLogRecoveryBatch();
+ @Nonnull
+ protected abstract RaftActorRecoveryCohort getRaftActorRecoveryCohort();
/**
* This method is called when recovery is complete.
protected abstract void onRecoveryComplete();
/**
- * This method will be called by the RaftActor when a snapshot needs to be
- * created. The derived actor should respond with its current state.
- * <p/>
- * During recovery the state that is returned by the derived actor will
- * be passed back to it by calling the applySnapshot method
- *
- * @return The current state of the actor
+ * Returns the RaftActorSnapshotCohort to participate in persistence recovery.
*/
- protected abstract void createSnapshot();
-
- /**
- * This method can be called at any other point during normal
- * operations when the derived actor is out of sync with it's peers
- * and the only way to bring it in sync is by applying a snapshot
- *
- * @param snapshotBytes A snapshot of the state of the actor
- */
- protected abstract void applySnapshot(byte[] snapshotBytes);
+ @Nonnull
+ protected abstract RaftActorSnapshotCohort getRaftActorSnapshotCohort();
/**
* This method will be called by the RaftActor when the state of the
return peerAddress;
}
- private void handleCaptureSnapshotReply(byte[] snapshotBytes) {
- LOG.debug("{}: CaptureSnapshotReply received by actor: snapshot size {}", persistenceId(), snapshotBytes.length);
-
- context.getSnapshotManager().persist(persistence(), snapshotBytes, currentBehavior, context.getTotalMemory());
- }
-
protected boolean hasFollowers(){
return getRaftActorContext().hasFollowers();
}
}
}
- private class CreateSnapshotProcedure implements Procedure<Void> {
-
- @Override
- public void apply(Void aVoid) throws Exception {
- createSnapshot();
- }
- }
-
private static class BehaviorStateHolder {
private RaftActorBehavior behavior;
private String leaderId;
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+
+/**
+ * Interface for a class that participates in raft actor persistence recovery.
+ *
+ * @author Thomas Pantelis
+ */
+public interface RaftActorRecoveryCohort {
+
+ /**
+ * This method is called during recovery at the start of a batch of state entries. Derived
+ * classes should perform any initialization needed to start a batch.
+ */
+ void startLogRecoveryBatch(int maxBatchSize);
+
+ /**
+ * This method is called during recovery to append state data to the current batch. This method
+ * is called 1 or more times after {@link #startLogRecoveryBatch}.
+ *
+ * @param data the state data
+ */
+ void appendRecoveredLogEntry(Payload data);
+
+ /**
+ * This method is called during recovery to reconstruct the state of the actor.
+ *
+ * @param snapshotBytes A snapshot of the state of the actor
+ */
+ void applyRecoverySnapshot(byte[] snapshotBytes);
+
+ /**
+ * This method is called during recovery at the end of a batch to apply the current batched
+ * log entries. This method is called after {@link #appendRecoveredLogEntry}.
+ */
+ void applyCurrentLogRecoveryBatch();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.SnapshotOffer;
+import com.google.common.base.Stopwatch;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+
+/**
+ * Support class that handles persistence recovery for a RaftActor.
+ *
+ * @author Thomas Pantelis
+ */
+class RaftActorRecoverySupport {
+ private final DataPersistenceProvider persistence;
+ private final RaftActorContext context;
+ private final RaftActorBehavior currentBehavior;
+ private final RaftActorRecoveryCohort cohort;
+
+ private int currentRecoveryBatchCount;
+
+ private Stopwatch recoveryTimer;
+ private final Logger log;
+
+ RaftActorRecoverySupport(DataPersistenceProvider persistence, RaftActorContext context,
+ RaftActorBehavior currentBehavior, RaftActorRecoveryCohort cohort) {
+ this.persistence = persistence;
+ this.context = context;
+ this.currentBehavior = currentBehavior;
+ this.cohort = cohort;
+ this.log = context.getLogger();
+ }
+
+ boolean handleRecoveryMessage(Object message) {
+ boolean recoveryComplete = false;
+ if(persistence.isRecoveryApplicable()) {
+ if (message instanceof SnapshotOffer) {
+ onRecoveredSnapshot((SnapshotOffer) message);
+ } else if (message instanceof ReplicatedLogEntry) {
+ onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
+ } else if (message instanceof ApplyLogEntries) {
+ // Handle this message for backwards compatibility with pre-Lithium versions.
+ onRecoveredApplyLogEntries(((ApplyLogEntries) message).getToIndex());
+ } else if (message instanceof ApplyJournalEntries) {
+ onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
+ } else if (message instanceof DeleteEntries) {
+ replicatedLog().removeFrom(((DeleteEntries) message).getFromIndex());
+ } else if (message instanceof UpdateElectionTerm) {
+ context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+ ((UpdateElectionTerm) message).getVotedFor());
+ } else if (message instanceof RecoveryCompleted) {
+ onRecoveryCompletedMessage();
+ recoveryComplete = true;
+ }
+ } else if (message instanceof RecoveryCompleted) {
+ recoveryComplete = true;
+ }
+
+ return recoveryComplete;
+ }
+
+ private ReplicatedLog replicatedLog() {
+ return context.getReplicatedLog();
+ }
+
+ private void initRecoveryTimer() {
+ if(recoveryTimer == null) {
+ recoveryTimer = Stopwatch.createStarted();
+ }
+ }
+
+ private void onRecoveredSnapshot(SnapshotOffer offer) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: SnapshotOffer called..", context.getId());
+ }
+
+ initRecoveryTimer();
+
+ Snapshot snapshot = (Snapshot) offer.snapshot();
+
+ // Create a replicated log with the snapshot information
+ // The replicated log can be used later on to retrieve this snapshot
+ // when we need to install it on a peer
+
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, persistence, currentBehavior));
+ context.setLastApplied(snapshot.getLastAppliedIndex());
+ context.setCommitIndex(snapshot.getLastAppliedIndex());
+
+ Stopwatch timer = Stopwatch.createStarted();
+
+ // Apply the snapshot to the actors state
+ cohort.applyRecoverySnapshot(snapshot.getState());
+
+ timer.stop();
+ log.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size={}",
+ context.getId(), timer.toString(), replicatedLog().getSnapshotIndex(),
+ replicatedLog().getSnapshotTerm(), replicatedLog().size());
+ }
+
+ private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: Received ReplicatedLogEntry for recovery: index: {}, size: {}", context.getId(),
+ logEntry.getIndex(), logEntry.size());
+ }
+
+ replicatedLog().append(logEntry);
+ }
+
+ private void onRecoveredApplyLogEntries(long toIndex) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: Received ApplyLogEntries for recovery, applying to state: {} to {}",
+ context.getId(), context.getLastApplied() + 1, toIndex);
+ }
+
+ for (long i = context.getLastApplied() + 1; i <= toIndex; i++) {
+ batchRecoveredLogEntry(replicatedLog().get(i));
+ }
+
+ context.setLastApplied(toIndex);
+ context.setCommitIndex(toIndex);
+ }
+
+ private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
+ initRecoveryTimer();
+
+ int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
+ if(currentRecoveryBatchCount == 0) {
+ cohort.startLogRecoveryBatch(batchSize);
+ }
+
+ cohort.appendRecoveredLogEntry(logEntry.getData());
+
+ if(++currentRecoveryBatchCount >= batchSize) {
+ endCurrentLogRecoveryBatch();
+ }
+ }
+
+ private void endCurrentLogRecoveryBatch() {
+ cohort.applyCurrentLogRecoveryBatch();
+ currentRecoveryBatchCount = 0;
+ }
+
+ private void onRecoveryCompletedMessage() {
+ if(currentRecoveryBatchCount > 0) {
+ endCurrentLogRecoveryBatch();
+ }
+
+ String recoveryTime = "";
+ if(recoveryTimer != null) {
+ recoveryTimer.stop();
+ recoveryTime = " in " + recoveryTimer.toString();
+ recoveryTimer = null;
+ }
+
+ log.info("Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
+ "Persistence Id = " + context.getId() +
+ " Last index in log = {}, snapshotIndex = {}, snapshotTerm = {}, " +
+ "journal-size = {}", replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
+ replicatedLog().getSnapshotTerm(), replicatedLog().size());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+
+/**
+ * Interface for a class that participates in raft actor snapshotting.
+ *
+ * @author Thomas Pantelis
+ */
+public interface RaftActorSnapshotCohort {
+
+ /**
+ * This method is called by the RaftActor when a snapshot needs to be
+ * created. The implementation should send a CaptureSnapshotReply to the given actor.
+ *
+ * @param actorRef the actor to which to respond
+ */
+ void createSnapshot(ActorRef actorRef);
+
+ /**
+ * This method is called to apply a snapshot installed by the leader.
+ *
+ * @param snapshotBytes a snapshot of the state of the actor
+ */
+ void applySnapshot(byte[] snapshotBytes);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+import akka.japi.Procedure;
+import akka.persistence.SaveSnapshotFailure;
+import akka.persistence.SaveSnapshotSuccess;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+
+/**
+ * Handles snapshot related messages for a RaftActor.
+ *
+ * @author Thomas Pantelis
+ */
+class RaftActorSnapshotMessageSupport {
+ static final String COMMIT_SNAPSHOT = "commit_snapshot";
+
+ private final DataPersistenceProvider persistence;
+ private final RaftActorContext context;
+ private final RaftActorBehavior currentBehavior;
+ private final RaftActorSnapshotCohort cohort;
+ private final ActorRef raftActorRef;
+ private final Logger log;
+
+ private final Procedure<Void> createSnapshotProcedure = new Procedure<Void>() {
+ @Override
+ public void apply(Void notUsed) throws Exception {
+ cohort.createSnapshot(raftActorRef);
+ }
+ };
+
+ RaftActorSnapshotMessageSupport(DataPersistenceProvider persistence, RaftActorContext context,
+ RaftActorBehavior currentBehavior, RaftActorSnapshotCohort cohort, ActorRef raftActorRef) {
+ this.persistence = persistence;
+ this.context = context;
+ this.currentBehavior = currentBehavior;
+ this.cohort = cohort;
+ this.raftActorRef = raftActorRef;
+ this.log = context.getLogger();
+ }
+
+ boolean handleSnapshotMessage(Object message) {
+ if(message instanceof ApplySnapshot ) {
+ onApplySnapshot(((ApplySnapshot) message).getSnapshot());
+ return true;
+ } else if (message instanceof SaveSnapshotSuccess) {
+ onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
+ return true;
+ } else if (message instanceof SaveSnapshotFailure) {
+ onSaveSnapshotFailure((SaveSnapshotFailure) message);
+ return true;
+ } else if (message instanceof CaptureSnapshot) {
+ onCaptureSnapshot(message);
+ return true;
+ } else if (message instanceof CaptureSnapshotReply) {
+ onCaptureSnapshotReply(((CaptureSnapshotReply) message).getSnapshot());
+ return true;
+ } else if (message.equals(COMMIT_SNAPSHOT)) {
+ context.getSnapshotManager().commit(persistence, -1);
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private void onCaptureSnapshotReply(byte[] snapshotBytes) {
+ log.debug("{}: CaptureSnapshotReply received by actor: snapshot size {}", context.getId(), snapshotBytes.length);
+
+ context.getSnapshotManager().persist(persistence, snapshotBytes, currentBehavior, context.getTotalMemory());
+ }
+
+ private void onCaptureSnapshot(Object message) {
+ log.debug("{}: CaptureSnapshot received by actor: {}", context.getId(), message);
+
+ context.getSnapshotManager().create(createSnapshotProcedure);
+ }
+
+ private void onSaveSnapshotFailure(SaveSnapshotFailure saveSnapshotFailure) {
+ log.error("{}: SaveSnapshotFailure received for snapshot Cause:",
+ context.getId(), saveSnapshotFailure.cause());
+
+ context.getSnapshotManager().rollback();
+ }
+
+ private void onSaveSnapshotSuccess(SaveSnapshotSuccess success) {
+ log.info("{}: SaveSnapshotSuccess received for snapshot", context.getId());
+
+ long sequenceNumber = success.metadata().sequenceNr();
+
+ context.getSnapshotManager().commit(persistence, sequenceNumber);
+ }
+
+ private void onApplySnapshot(Snapshot snapshot) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: ApplySnapshot called on Follower Actor " +
+ "snapshotIndex:{}, snapshotTerm:{}", context.getId(), snapshot.getLastAppliedIndex(),
+ snapshot.getLastAppliedTerm());
+ }
+
+ cohort.applySnapshot(snapshot.getState());
+
+ //clears the followers log, sets the snapshot index to ensure adjusted-index works
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, persistence,
+ currentBehavior));
+ context.setLastApplied(snapshot.getLastAppliedIndex());
+ }
+}
* information
*
* @param index the index of the log entry
+ * @return the adjusted index of the first log entry removed or -1 if log entry not found.
*/
- void removeFrom(long index);
+ long removeFrom(long index);
/**
private final Procedure<DeleteEntries> deleteProcedure = new Procedure<DeleteEntries>() {
@Override
public void apply(DeleteEntries param) {
- dataSize = 0;
- for (ReplicatedLogEntry entry : journal) {
- dataSize += entry.size();
- }
}
};
@Override
public void removeFromAndPersist(long logEntryIndex) {
- int adjustedIndex = adjustedIndex(logEntryIndex);
-
- if (adjustedIndex < 0) {
- return;
- }
-
// FIXME: Maybe this should be done after the command is saved
- journal.subList(adjustedIndex , journal.size()).clear();
-
- persistence.persist(new DeleteEntries(adjustedIndex), deleteProcedure);
+ long adjustedIndex = removeFrom(logEntryIndex);
+ if(adjustedIndex >= 0) {
+ persistence.persist(new DeleteEntries((int)adjustedIndex), deleteProcedure);
+ }
}
@Override
}
// FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
- journal.add(replicatedLogEntry);
+ append(replicatedLogEntry);
// When persisting events with persist it is guaranteed that the
// persistent actor will not receive further commands between the
public void apply(ReplicatedLogEntry evt) throws Exception {
int logEntrySize = replicatedLogEntry.size();
- dataSize += logEntrySize;
- long dataSizeForCheck = dataSize;
+ long dataSizeForCheck = dataSize();
dataSizeSinceLastSnapshot += logEntrySize;
}
@Override
- protected void createSnapshot() {
+ public void createSnapshot(ActorRef actorRef) {
if(snapshot != null) {
getSelf().tell(new CaptureSnapshotReply(snapshot), ActorRef.noSender());
}
}
@Override
- protected void applyRecoverySnapshot(byte[] bytes) {
+ public void applyRecoverySnapshot(byte[] bytes) {
}
void setSnapshot(byte[] snapshot) {
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
}
- @After
- public void tearDown() {
- replicatedLogImpl.journal.clear();
- replicatedLogImpl.setSnapshotIndex(-1);
- replicatedLogImpl.setSnapshotTerm(-1);
- replicatedLogImpl = null;
- }
-
@Test
public void testIndexOperations() {
// now create a snapshot of 3 entries, with 1 unapplied entry left in the log
// It removes the entries which have made it to snapshot
// and updates the snapshot index and term
- Map<Long, String> state = takeSnapshot(3);
+ takeSnapshot(3);
// check the values after the snapshot.
// each index value passed in the test is the logical index (log entry index)
assertEquals(2, replicatedLogImpl.getFrom(6).size());
// take a second snapshot with 5 entries with 0 unapplied entries left in the log
- state = takeSnapshot(5);
+ takeSnapshot(5);
assertEquals(0, replicatedLogImpl.size());
assertNull(replicatedLogImpl.last());
assertTrue(replicatedLogImpl.isPresent(5));
}
+ @Test
+ public void testRemoveFrom() {
+
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 4, new MockPayload("E", 2)));
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 5, new MockPayload("F", 3)));
+
+ assertEquals("dataSize", 9, replicatedLogImpl.dataSize());
+
+ long adjusted = replicatedLogImpl.removeFrom(4);
+ assertEquals("removeFrom - adjusted", 4, adjusted);
+ assertEquals("size", 4, replicatedLogImpl.size());
+ assertEquals("dataSize", 4, replicatedLogImpl.dataSize());
+
+ takeSnapshot(1);
+
+ adjusted = replicatedLogImpl.removeFrom(2);
+ assertEquals("removeFrom - adjusted", 1, adjusted);
+ assertEquals("size", 1, replicatedLogImpl.size());
+ assertEquals("dataSize", 1, replicatedLogImpl.dataSize());
+
+ assertEquals("removeFrom - adjusted", -1, replicatedLogImpl.removeFrom(0));
+ assertEquals("removeFrom - adjusted", -1, replicatedLogImpl.removeFrom(100));
+ }
+
// create a snapshot for test
public Map<Long, String> takeSnapshot(final int numEntries) {
Map<Long, String> map = new HashMap<>(numEntries);
- List<ReplicatedLogEntry> entries = replicatedLogImpl.getEntriesTill(numEntries);
- for (ReplicatedLogEntry entry : entries) {
+
+ long lastIndex = 0;
+ long lastTerm = 0;
+ for(int i = 0; i < numEntries; i++) {
+ ReplicatedLogEntry entry = replicatedLogImpl.getAtPhysicalIndex(i);
map.put(entry.getIndex(), entry.getData().toString());
+ lastIndex = entry.getIndex();
+ lastTerm = entry.getTerm();
}
- int term = (int) replicatedLogImpl.lastTerm();
- int lastIndex = (int) entries.get(entries.size() - 1).getIndex();
- entries.clear();
- replicatedLogImpl.setSnapshotTerm(term);
- replicatedLogImpl.setSnapshotIndex(lastIndex);
+ replicatedLogImpl.snapshotPreCommit(lastIndex, lastTerm);
+ replicatedLogImpl.snapshotCommit();
return map;
public void removeFromAndPersist(final long index) {
}
- @Override
- public int dataSize() {
- return -1;
- }
-
- public List<ReplicatedLogEntry> getEntriesTill(final int index) {
- return journal.subList(0, index);
- }
-
@Override
public void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback) {
}
this.size = size;
}
- @Override public Map<GeneratedMessage.GeneratedExtension, String> encode() {
- Map<GeneratedMessage.GeneratedExtension, String> map = new HashMap<GeneratedMessage.GeneratedExtension, String>();
+ @Override public Map<GeneratedMessage.GeneratedExtension<?, ?>, String> encode() {
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, String> map = new HashMap<>();
map.put(MockPayloadMessages.value, value);
return map;
}
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
+import javax.annotation.Nonnull;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
InMemorySnapshotStore.clear();
}
- public static class MockRaftActor extends RaftActor {
+ public static class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, RaftActorSnapshotCohort {
- private final RaftActor delegate;
+ private final RaftActor actorDelegate;
+ private final RaftActorRecoveryCohort recoveryCohortDelegate;
+ private final RaftActorSnapshotCohort snapshotCohortDelegate;
private final CountDownLatch recoveryComplete = new CountDownLatch(1);
private final List<Object> state;
private ActorRef roleChangeNotifier;
DataPersistenceProvider dataPersistenceProvider) {
super(id, peerAddresses, config);
state = new ArrayList<>();
- this.delegate = mock(RaftActor.class);
+ this.actorDelegate = mock(RaftActor.class);
+ this.recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class);
+ this.snapshotCohortDelegate = mock(RaftActorSnapshotCohort.class);
if(dataPersistenceProvider == null){
setPersistence(true);
} else {
@Override protected void applyState(ActorRef clientActor, String identifier, Object data) {
- delegate.applyState(clientActor, identifier, data);
+ actorDelegate.applyState(clientActor, identifier, data);
LOG.info("{}: applyState called", persistenceId());
}
@Override
- protected void startLogRecoveryBatch(int maxBatchSize) {
+ @Nonnull
+ protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+ return this;
}
@Override
- protected void appendRecoveredLogEntry(Payload data) {
+ protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+ return this;
+ }
+
+ @Override
+ public void startLogRecoveryBatch(int maxBatchSize) {
+ }
+
+ @Override
+ public void appendRecoveredLogEntry(Payload data) {
state.add(data);
}
@Override
- protected void applyCurrentLogRecoveryBatch() {
+ public void applyCurrentLogRecoveryBatch() {
}
@Override
protected void onRecoveryComplete() {
- delegate.onRecoveryComplete();
+ actorDelegate.onRecoveryComplete();
recoveryComplete.countDown();
}
}
@Override
- protected void applyRecoverySnapshot(byte[] bytes) {
- delegate.applyRecoverySnapshot(bytes);
+ public void applyRecoverySnapshot(byte[] bytes) {
+ recoveryCohortDelegate.applyRecoverySnapshot(bytes);
try {
Object data = toObject(bytes);
if (data instanceof List) {
}
}
- @Override protected void createSnapshot() {
+ @Override
+ public void createSnapshot(ActorRef actorRef) {
LOG.info("{}: createSnapshot called", persistenceId());
- delegate.createSnapshot();
+ snapshotCohortDelegate.createSnapshot(actorRef);
}
- @Override protected void applySnapshot(byte [] snapshot) {
+ @Override
+ public void applySnapshot(byte [] snapshot) {
LOG.info("{}: applySnapshot called", persistenceId());
- delegate.applySnapshot(snapshot);
+ snapshotCohortDelegate.applySnapshot(snapshot);
}
- @Override protected void onStateChanged() {
- delegate.onStateChanged();
+ @Override
+ protected void onStateChanged() {
+ actorDelegate.onStateChanged();
}
@Override
public ReplicatedLog getReplicatedLog(){
return this.getRaftActorContext().getReplicatedLog();
}
-
}
// add more entries after snapshot is taken
List<ReplicatedLogEntry> entries = new ArrayList<>();
ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
- new MockRaftActorContext.MockPayload("F"));
+ new MockRaftActorContext.MockPayload("F", 2));
ReplicatedLogEntry entry3 = new MockRaftActorContext.MockReplicatedLogEntry(1, 6,
- new MockRaftActorContext.MockPayload("G"));
+ new MockRaftActorContext.MockPayload("G", 3));
ReplicatedLogEntry entry4 = new MockRaftActorContext.MockReplicatedLogEntry(1, 7,
- new MockRaftActorContext.MockPayload("H"));
+ new MockRaftActorContext.MockPayload("H", 4));
entries.add(entry2);
entries.add(entry3);
entries.add(entry4);
RaftActorContext context = ref.underlyingActor().getRaftActorContext();
assertEquals("Journal log size", snapshotUnappliedEntries.size() + entries.size(),
context.getReplicatedLog().size());
+ assertEquals("Journal data size", 10, context.getReplicatedLog().dataSize());
assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
assertEquals("Last applied", lastAppliedToState, context.getLastApplied());
assertEquals("Commit index", lastAppliedToState, context.getCommitIndex());
mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
- verify(mockRaftActor.delegate).applyRecoverySnapshot(eq(snapshotBytes.toByteArray()));
+ verify(mockRaftActor.recoveryCohortDelegate).applyRecoverySnapshot(eq(snapshotBytes.toByteArray()));
mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(0, 1, new MockRaftActorContext.MockPayload("A")));
mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
- verify(mockRaftActor.delegate, times(0)).applyRecoverySnapshot(any(byte[].class));
+ verify(mockRaftActor.recoveryCohortDelegate, times(0)).applyRecoverySnapshot(any(byte[].class));
mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(0, 1, new MockRaftActorContext.MockPayload("A")));
mockRaftActor.getRaftActorContext().getSnapshotManager().capture(lastEntry, replicatedToAllIndex);
- verify(mockRaftActor.delegate).createSnapshot();
+ verify(mockRaftActor.snapshotCohortDelegate).createSnapshot(any(ActorRef.class));
mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
mockRaftActor.onReceiveCommand(new ApplyState(mockActorRef, "apply-state", entry));
- verify(mockRaftActor.delegate).applyState(eq(mockActorRef), eq("apply-state"), anyObject());
+ verify(mockRaftActor.actorDelegate).applyState(eq(mockActorRef), eq("apply-state"), anyObject());
}
};
mockRaftActor.onReceiveCommand(new ApplySnapshot(snapshot));
- verify(mockRaftActor.delegate).applySnapshot(eq(snapshot.getState()));
+ verify(mockRaftActor.snapshotCohortDelegate).applySnapshot(eq(snapshot.getState()));
assertTrue("The replicatedLog should have changed",
oldReplicatedLog != mockRaftActor.getReplicatedLog());
.capture(new MockRaftActorContext.MockReplicatedLogEntry(1, 6,
new MockRaftActorContext.MockPayload("x")), 4);
- verify(leaderActor.delegate).createSnapshot();
+ verify(leaderActor.snapshotCohortDelegate).createSnapshot(any(ActorRef.class));
assertEquals(8, leaderActor.getReplicatedLog().size());
new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
new MockRaftActorContext.MockPayload("D")), 4);
- verify(followerActor.delegate).createSnapshot();
+ verify(followerActor.snapshotCohortDelegate).createSnapshot(any(ActorRef.class));
assertEquals(6, followerActor.getReplicatedLog().size());
@Nonnull ModificationType getModificationType();
/**
- * Returns after state of top level container.
+ * Returns before-state of top level container. Implementations are encouraged,
+ * but not required to provide this state.
+ *
+ * @param root Class representing data container
+ * @return State of object before modification. Null if subtree was not present,
+ * or the implementation cannot provide the state.
+ */
+ @Nullable T getDataBefore();
+
+ /**
+ * Returns after-state of top level container.
*
* @param root Class representing data container
* @return State of object after modification. Null if subtree is not present.
*
* @param <T> Type of Binding Data Object
*/
-class LazyDataObjectModification<T extends DataObject> implements DataObjectModification<T> {
+final class LazyDataObjectModification<T extends DataObject> implements DataObjectModification<T> {
private final static Logger LOG = LoggerFactory.getLogger(LazyDataObjectModification.class);
return new LazyDataObjectModification<>(codec,domData);
}
- static Collection<DataObjectModification<? extends DataObject>> from(final BindingCodecTreeNode<?> parentCodec,
+ private static Collection<DataObjectModification<? extends DataObject>> from(final BindingCodecTreeNode<?> parentCodec,
final Collection<DataTreeCandidateNode> domChildNodes) {
final ArrayList<DataObjectModification<? extends DataObject>> result = new ArrayList<>(domChildNodes.size());
populateList(result, parentCodec, domChildNodes);
parentCodec.yangPathArgumentChild(domChildNode.getIdentifier());
populateList(result,type, childCodec, domChildNode);
} catch (final IllegalArgumentException e) {
- if(type == BindingStructuralType.UNKNOWN) {
+ if (type == BindingStructuralType.UNKNOWN) {
LOG.debug("Unable to deserialize unknown DOM node {}",domChildNode,e);
} else {
LOG.debug("Binding representation for DOM node {} was not found",domChildNode,e);
}
}
-
private static void populateList(final List<DataObjectModification<? extends DataObject>> result,
final BindingStructuralType type, final BindingCodecTreeNode<?> childCodec,
final DataTreeCandidateNode domChildNode) {
}
}
+ @Override
+ public T getDataBefore() {
+ return deserialize(domData.getDataBefore());
+ }
+
@Override
public T getDataAfter() {
return deserialize(domData.getDataAfter());
@Override
public Collection<DataObjectModification<? extends DataObject>> getModifiedChildren() {
- if(childNodesCache == null) {
- childNodesCache = from(codec,domData.getChildNodes());
+ if (childNodesCache == null) {
+ childNodesCache = from(codec, domData.getChildNodes());
}
return childNodesCache;
}
}
private T deserialize(final Optional<NormalizedNode<?, ?>> dataAfter) {
- if(dataAfter.isPresent()) {
+ if (dataAfter.isPresent()) {
return codec.deserialize(dataAfter.get());
}
return null;
return config;
}
- public static abstract class Builder<T extends Builder>{
+ public static abstract class Builder<T extends Builder<T>> {
protected Map<String, Object> configHolder;
protected Config fallback;
return cachedMailBoxPushTimeout;
}
- public static class Builder<T extends Builder> extends AbstractConfig.Builder<T>{
+ public static class Builder<T extends Builder<T>> extends AbstractConfig.Builder<T>{
public Builder(String actorSystemName) {
super(actorSystemName);
return builder.build();
}
- private NormalizedNode<?, ?> buildDataContainer(DataContainerNodeBuilder builder, NormalizedNodeMessages.Node node){
+ private NormalizedNode<?, ?> buildDataContainer(DataContainerNodeBuilder<?, ?> builder, NormalizedNodeMessages.Node node){
for(NormalizedNodeMessages.Node child : node.getChildList()){
builder.withChild((DataContainerChild<?, ?>) deSerialize(child));
@Override
- public Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> encode() {
+ public Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> encode() {
Preconditions.checkState(byteString!=null);
- Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> map = new HashMap<>();
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> map = new HashMap<>();
map.put(org.opendaylight.controller.protobuff.messages.shard.CompositeModificationPayload.modification,
getModificationInternal());
return map;
this.modification = (PersistentMessages.CompositeModification) Preconditions.checkNotNull(modification, "modification should not be null");
}
- @Override public Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> encode() {
+ @Override public Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> encode() {
Preconditions.checkState(modification!=null);
- Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> map = new HashMap<>();
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> map = new HashMap<>();
map.put(
org.opendaylight.controller.protobuff.messages.shard.CompositeModificationPayload.modification, this.modification);
return map;
*/
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.collect.ImmutableList;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
-import scala.concurrent.Future;
abstract class AbstractTransactionContext implements TransactionContext {
- private final List<Future<Object>> recordedOperationFutures = new ArrayList<>();
private final TransactionIdentifier identifier;
protected AbstractTransactionContext(TransactionIdentifier identifier) {
this.identifier = identifier;
}
- @Override
- public final void copyRecordedOperationFutures(Collection<Future<Object>> target) {
- target.addAll(recordedOperationFutures);
- }
-
protected final TransactionIdentifier getIdentifier() {
return identifier;
}
-
- protected final Collection<Future<Object>> copyRecordedOperationFutures() {
- return ImmutableList.copyOf(recordedOperationFutures);
- }
-
- protected final int recordedOperationCount() {
- return recordedOperationFutures.size();
- }
-
- protected final void recordOperationFuture(Future<Object> future) {
- recordedOperationFutures.add(future);
- }
-}
+}
\ No newline at end of file
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
-import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
import org.opendaylight.controller.cluster.raft.RaftActor;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
*/
public class Shard extends RaftActor {
- private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build();
-
private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
@VisibleForTesting
private DatastoreContext datastoreContext;
- private SchemaContext schemaContext;
-
- private int createSnapshotTransactionCounter;
-
private final ShardCommitCoordinator commitCoordinator;
private long transactionCommitTimeout;
private final ReadyTransactionReply READY_TRANSACTION_REPLY = new ReadyTransactionReply(
Serialization.serializedActorPath(getSelf()));
+ private final DOMTransactionFactory domTransactionFactory;
- /**
- * Coordinates persistence recovery on startup.
- */
- private ShardRecoveryCoordinator recoveryCoordinator;
+ private final ShardTransactionActorFactory transactionActorFactory;
- private final DOMTransactionFactory transactionFactory;
-
- private final String txnDispatcherPath;
+ private final ShardSnapshotCohort snapshotCohort;
private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
this.name = name.toString();
this.datastoreContext = datastoreContext;
- this.schemaContext = schemaContext;
- this.txnDispatcherPath = new Dispatchers(context().system().dispatchers())
- .getDispatcherPath(Dispatchers.DispatcherType.Transaction);
setPersistence(datastoreContext.isPersistent());
getContext().become(new MeteringBehavior(this));
}
- transactionFactory = new DOMTransactionFactory(store, shardMBean, LOG, this.name);
+ domTransactionFactory = new DOMTransactionFactory(store, shardMBean, LOG, this.name);
- commitCoordinator = new ShardCommitCoordinator(transactionFactory,
+ commitCoordinator = new ShardCommitCoordinator(domTransactionFactory,
TimeUnit.SECONDS.convert(5, TimeUnit.MINUTES),
datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
- recoveryCoordinator = new ShardRecoveryCoordinator(store, persistenceId(), LOG);
+ transactionActorFactory = new ShardTransactionActorFactory(domTransactionFactory, datastoreContext,
+ new Dispatchers(context().system().dispatchers()).getDispatcherPath(
+ Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
+
+ snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
}
private void setTransactionCommitTimeout() {
//
if(isLeader()) {
try {
- BatchedModificationsReply reply = commitCoordinator.handleTransactionModifications(batched);
- sender().tell(reply, self());
+ boolean ready = commitCoordinator.handleTransactionModifications(batched);
+ if(ready) {
+ sender().tell(READY_TRANSACTION_REPLY, self());
+ } else {
+ sender().tell(new BatchedModificationsReply(batched.getModifications().size()), self());
+ }
} catch (Exception e) {
LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
batched.getTransactionID(), e);
// node. In that case, the subsequent 3-phase commit messages won't contain the
// transactionId so to maintain backwards compatibility, we create a separate cohort actor
// to provide the compatible behavior.
- if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
- LOG.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", persistenceId());
- ActorRef replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
- ready.getTransactionID()));
+ if(ready.getTxnClientVersion() < DataStoreVersions.LITHIUM_VERSION) {
+ ActorRef replyActorPath = getSelf();
+ if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
+ LOG.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", persistenceId());
+ replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
+ ready.getTransactionID()));
+ }
ReadyTransactionReply readyTransactionReply =
- new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath));
+ new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath),
+ ready.getTxnClientVersion());
getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
- readyTransactionReply, getSelf());
-
+ readyTransactionReply, getSelf());
} else {
-
- getSender().tell(ready.isReturnSerialized() ? READY_TRANSACTION_REPLY.toSerializable() :
- READY_TRANSACTION_REPLY, getSelf());
+ getSender().tell(READY_TRANSACTION_REPLY, getSelf());
}
}
}
private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
- transactionFactory.closeTransactionChain(closeTransactionChain.getTransactionChainId());
+ domTransactionFactory.closeTransactionChain(closeTransactionChain.getTransactionChainId());
}
private ActorRef createTypedTransactionActor(int transactionType,
ShardTransactionIdentifier transactionId, String transactionChainId,
short clientVersion ) {
- DOMStoreTransaction transaction = transactionFactory.newTransaction(
- TransactionProxy.TransactionType.fromInt(transactionType), transactionId.toString(),
- transactionChainId);
-
- return createShardTransaction(transaction, transactionId, clientVersion);
- }
-
- private ActorRef createShardTransaction(DOMStoreTransaction transaction, ShardTransactionIdentifier transactionId,
- short clientVersion){
- return getContext().actorOf(
- ShardTransaction.props(transaction, getSelf(),
- schemaContext, datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId(), clientVersion)
- .withDispatcher(txnDispatcherPath),
- transactionId.toString());
-
+ return transactionActorFactory.newShardTransaction(TransactionProxy.TransactionType.fromInt(transactionType),
+ transactionId, transactionChainId, clientVersion);
}
private void createTransaction(CreateTransaction createTransaction) {
return transactionActor;
}
- private void syncCommitTransaction(final DOMStoreWriteTransaction transaction)
- throws ExecutionException, InterruptedException {
- DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
- commitCohort.preCommit().get();
- commitCohort.commit().get();
- }
-
private void commitWithNewTransaction(final Modification modification) {
DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
modification.apply(tx);
try {
- syncCommitTransaction(tx);
+ snapshotCohort.syncCommitTransaction(tx);
shardMBean.incrementCommittedTransactionCount();
shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
} catch (InterruptedException | ExecutionException e) {
}
private void updateSchemaContext(final UpdateSchemaContext message) {
- this.schemaContext = message.getSchemaContext();
updateSchemaContext(message.getSchemaContext());
- store.onGlobalContextUpdated(message.getSchemaContext());
}
@VisibleForTesting
}
@Override
- protected
- void startLogRecoveryBatch(final int maxBatchSize) {
- recoveryCoordinator.startLogRecoveryBatch(maxBatchSize);
+ protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+ return snapshotCohort;
}
@Override
- protected void appendRecoveredLogEntry(final Payload data) {
- recoveryCoordinator.appendRecoveredLogPayload(data);
- }
-
- @Override
- protected void applyRecoverySnapshot(final byte[] snapshotBytes) {
- recoveryCoordinator.applyRecoveredSnapshot(snapshotBytes);
- }
-
- @Override
- protected void applyCurrentLogRecoveryBatch() {
- recoveryCoordinator.applyCurrentLogRecoveryBatch();
+ @Nonnull
+ protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+ return new ShardRecoveryCoordinator(store, persistenceId(), LOG);
}
@Override
protected void onRecoveryComplete() {
- recoveryCoordinator = null;
-
//notify shard manager
getContext().parent().tell(new ActorInitialized(), getSelf());
}
}
- @Override
- protected void createSnapshot() {
- // Create a transaction actor. We are really going to treat the transaction as a worker
- // so that this actor does not get block building the snapshot. THe transaction actor will
- // after processing the CreateSnapshot message.
-
- ActorRef createSnapshotTransaction = createTransaction(
- TransactionProxy.TransactionType.READ_ONLY.ordinal(),
- "createSnapshot" + ++createSnapshotTransactionCounter, "",
- DataStoreVersions.CURRENT_VERSION);
-
- createSnapshotTransaction.tell(CreateSnapshot.INSTANCE, self());
- }
-
- @VisibleForTesting
- @Override
- protected void applySnapshot(final byte[] snapshotBytes) {
- // Since this will be done only on Recovery or when this actor is a Follower
- // we can safely commit everything in here. We not need to worry about event notifications
- // as they would have already been disabled on the follower
-
- LOG.info("{}: Applying snapshot", persistenceId());
- try {
- DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
-
- NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
-
- // delete everything first
- transaction.delete(DATASTORE_ROOT);
-
- // Add everything from the remote node back
- transaction.write(DATASTORE_ROOT, node);
- syncCommitTransaction(transaction);
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("{}: An exception occurred when applying snapshot", persistenceId(), e);
- } finally {
- LOG.info("{}: Done applying snapshot", persistenceId());
- }
- }
-
@Override
protected void onStateChanged() {
boolean isLeader = isLeader();
persistenceId(), getId());
}
- transactionFactory.closeAllTransactionChains();
+ domTransactionFactory.closeAllTransactionChains();
}
}
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
*
* @throws ExecutionException if an error occurs loading the cache
*/
- public BatchedModificationsReply handleTransactionModifications(BatchedModifications batched)
+ public boolean handleTransactionModifications(BatchedModifications batched)
throws ExecutionException {
CohortEntry cohortEntry = cohortCache.getIfPresent(batched.getTransactionID());
if(cohortEntry == null) {
cohortEntry.applyModifications(batched.getModifications());
- String cohortPath = null;
if(batched.isReady()) {
if(log.isDebugEnabled()) {
log.debug("{}: Readying Tx {}, client version {}", name,
}
cohortEntry.ready(cohortDecorator);
- cohortPath = shardActorPath;
}
- return new BatchedModificationsReply(batched.getModifications().size(), cohortPath);
+ return batched.isReady();
}
/**
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* @author: syedbahm
private final DOMStoreReadTransaction transaction;
public ShardReadTransaction(DOMStoreReadTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID,
- short clientTxVersion) {
- super(shardActor, schemaContext, shardStats, transactionID, clientTxVersion);
+ ShardStats shardStats, String transactionID, short clientTxVersion) {
+ super(shardActor, shardStats, transactionID, clientTxVersion);
this.transaction = transaction;
}
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* @author: syedbahm
private final DOMStoreReadWriteTransaction transaction;
public ShardReadWriteTransaction(DOMStoreReadWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID,
- short clientTxVersion) {
- super(transaction, shardActor, schemaContext, shardStats, transactionID, clientTxVersion);
+ ShardStats shardStats, String transactionID, short clientTxVersion) {
+ super(transaction, shardActor, shardStats, transactionID, clientTxVersion);
this.transaction = transaction;
}
import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
*
* @author Thomas Panetelis
*/
-class ShardRecoveryCoordinator {
+class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
private final InMemoryDOMDataStore store;
private List<ModificationPayload> currentLogRecoveryBatch;
this.log = log;
}
- void startLogRecoveryBatch(int maxBatchSize) {
+ @Override
+ public void startLogRecoveryBatch(int maxBatchSize) {
currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
}
- void appendRecoveredLogPayload(Payload payload) {
+ @Override
+ public void appendRecoveredLogEntry(Payload payload) {
try {
if(payload instanceof ModificationPayload) {
currentLogRecoveryBatch.add((ModificationPayload) payload);
/**
* Applies the current batched log entries to the data store.
*/
- void applyCurrentLogRecoveryBatch() {
+ @Override
+ public void applyCurrentLogRecoveryBatch() {
log.debug("{}: Applying current log recovery batch with size {}", shardName, currentLogRecoveryBatch.size());
DOMStoreWriteTransaction writeTx = store.newWriteOnlyTransaction();
*
* @param snapshotBytes the serialized snapshot
*/
- void applyRecoveredSnapshot(final byte[] snapshotBytes) {
+ @Override
+ public void applyRecoverySnapshot(final byte[] snapshotBytes) {
log.debug("{}: Applyng recovered sbapshot", shardName);
DOMStoreWriteTransaction writeTx = store.newWriteOnlyTransaction();
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import java.util.concurrent.ExecutionException;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
+import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
+import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+
+/**
+ * Participates in raft snapshotting on behalf of a Shard actor.
+ *
+ * @author Thomas Pantelis
+ */
+class ShardSnapshotCohort implements RaftActorSnapshotCohort {
+
+ private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build();
+
+ private int createSnapshotTransactionCounter;
+ private final ShardTransactionActorFactory transactionActorFactory;
+ private final InMemoryDOMDataStore store;
+ private final Logger log;
+ private final String logId;
+
+ ShardSnapshotCohort(ShardTransactionActorFactory transactionActorFactory, InMemoryDOMDataStore store,
+ Logger log, String logId) {
+ this.transactionActorFactory = transactionActorFactory;
+ this.store = store;
+ this.log = log;
+ this.logId = logId;
+ }
+
+ @Override
+ public void createSnapshot(ActorRef actorRef) {
+ // Create a transaction actor. We are really going to treat the transaction as a worker
+ // so that this actor does not get block building the snapshot. THe transaction actor will
+ // after processing the CreateSnapshot message.
+
+ ShardTransactionIdentifier transactionID = new ShardTransactionIdentifier(
+ "createSnapshot" + ++createSnapshotTransactionCounter);
+
+ ActorRef createSnapshotTransaction = transactionActorFactory.newShardTransaction(
+ TransactionProxy.TransactionType.READ_ONLY, transactionID, "", DataStoreVersions.CURRENT_VERSION);
+
+ createSnapshotTransaction.tell(CreateSnapshot.INSTANCE, actorRef);
+ }
+
+ @Override
+ public void applySnapshot(byte[] snapshotBytes) {
+ // Since this will be done only on Recovery or when this actor is a Follower
+ // we can safely commit everything in here. We not need to worry about event notifications
+ // as they would have already been disabled on the follower
+
+ log.info("{}: Applying snapshot", logId);
+
+ try {
+ DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
+
+ NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
+
+ // delete everything first
+ transaction.delete(DATASTORE_ROOT);
+
+ // Add everything from the remote node back
+ transaction.write(DATASTORE_ROOT, node);
+ syncCommitTransaction(transaction);
+ } catch (InterruptedException | ExecutionException e) {
+ log.error("{}: An exception occurred when applying snapshot", logId, e);
+ } finally {
+ log.info("{}: Done applying snapshot", logId);
+ }
+
+ }
+
+ void syncCommitTransaction(final DOMStoreWriteTransaction transaction)
+ throws ExecutionException, InterruptedException {
+ DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
+ commitCohort.preCommit().get();
+ commitCohort.commit().get();
+ }
+}
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* The ShardTransaction Actor represents a remote transaction
protected static final boolean SERIALIZED_REPLY = true;
private final ActorRef shardActor;
- private final SchemaContext schemaContext;
private final ShardStats shardStats;
private final String transactionID;
private final short clientTxVersion;
- protected ShardTransaction(ActorRef shardActor, SchemaContext schemaContext,
- ShardStats shardStats, String transactionID, short clientTxVersion) {
+ protected ShardTransaction(ActorRef shardActor, ShardStats shardStats, String transactionID,
+ short clientTxVersion) {
super("shard-tx"); //actor name override used for metering. This does not change the "real" actor name
this.shardActor = shardActor;
- this.schemaContext = schemaContext;
this.shardStats = shardStats;
this.transactionID = transactionID;
this.clientTxVersion = clientTxVersion;
}
public static Props props(DOMStoreTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext,DatastoreContext datastoreContext, ShardStats shardStats,
- String transactionID, short txnClientVersion) {
- return Props.create(new ShardTransactionCreator(transaction, shardActor, schemaContext,
+ DatastoreContext datastoreContext, ShardStats shardStats, String transactionID, short txnClientVersion) {
+ return Props.create(new ShardTransactionCreator(transaction, shardActor,
datastoreContext, shardStats, transactionID, txnClientVersion));
}
return transactionID;
}
- protected SchemaContext getSchemaContext() {
- return schemaContext;
- }
-
protected short getClientTxVersion() {
return clientTxVersion;
}
final DOMStoreTransaction transaction;
final ActorRef shardActor;
- final SchemaContext schemaContext;
final DatastoreContext datastoreContext;
final ShardStats shardStats;
final String transactionID;
final short txnClientVersion;
ShardTransactionCreator(DOMStoreTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, DatastoreContext datastoreContext,
- ShardStats shardStats, String transactionID, short txnClientVersion) {
+ DatastoreContext datastoreContext, ShardStats shardStats, String transactionID, short txnClientVersion) {
this.transaction = transaction;
this.shardActor = shardActor;
this.shardStats = shardStats;
- this.schemaContext = schemaContext;
this.datastoreContext = datastoreContext;
this.transactionID = transactionID;
this.txnClientVersion = txnClientVersion;
ShardTransaction tx;
if(transaction instanceof DOMStoreReadWriteTransaction) {
tx = new ShardReadWriteTransaction((DOMStoreReadWriteTransaction)transaction,
- shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
+ shardActor, shardStats, transactionID, txnClientVersion);
} else if(transaction instanceof DOMStoreReadTransaction) {
tx = new ShardReadTransaction((DOMStoreReadTransaction)transaction, shardActor,
- schemaContext, shardStats, transactionID, txnClientVersion);
+ shardStats, transactionID, txnClientVersion);
} else {
tx = new ShardWriteTransaction((DOMStoreWriteTransaction)transaction,
- shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
+ shardActor, shardStats, transactionID, txnClientVersion);
}
tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout());
private final DOMStoreTransactionChain chain;
private final DatastoreContext datastoreContext;
- private final SchemaContext schemaContext;
private final ShardStats shardStats;
- public ShardTransactionChain(DOMStoreTransactionChain chain, SchemaContext schemaContext,
- DatastoreContext datastoreContext, ShardStats shardStats) {
+ public ShardTransactionChain(DOMStoreTransactionChain chain, DatastoreContext datastoreContext,
+ ShardStats shardStats) {
this.chain = chain;
this.datastoreContext = datastoreContext;
- this.schemaContext = schemaContext;
this.shardStats = shardStats;
}
TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newReadOnlyTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId(),
+ datastoreContext, shardStats, createTransaction.getTransactionId(),
createTransaction.getVersion()), transactionName);
} else if (createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newReadWriteTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId(),
+ datastoreContext, shardStats, createTransaction.getTransactionId(),
createTransaction.getVersion()), transactionName);
} else if (createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newWriteOnlyTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId(),
+ datastoreContext, shardStats, createTransaction.getTransactionId(),
createTransaction.getVersion()), transactionName);
} else {
throw new IllegalArgumentException (
public static Props props(DOMStoreTransactionChain chain, SchemaContext schemaContext,
DatastoreContext datastoreContext, ShardStats shardStats) {
- return Props.create(new ShardTransactionChainCreator(chain, schemaContext,
- datastoreContext, shardStats));
+ return Props.create(new ShardTransactionChainCreator(chain, datastoreContext, shardStats));
}
private static class ShardTransactionChainCreator implements Creator<ShardTransactionChain> {
final DOMStoreTransactionChain chain;
final DatastoreContext datastoreContext;
- final SchemaContext schemaContext;
final ShardStats shardStats;
- ShardTransactionChainCreator(DOMStoreTransactionChain chain, SchemaContext schemaContext,
- DatastoreContext datastoreContext, ShardStats shardStats) {
+ ShardTransactionChainCreator(DOMStoreTransactionChain chain, DatastoreContext datastoreContext,
+ ShardStats shardStats) {
this.chain = chain;
this.datastoreContext = datastoreContext;
- this.schemaContext = schemaContext;
this.shardStats = shardStats;
}
@Override
public ShardTransactionChain create() throws Exception {
- return new ShardTransactionChain(chain, schemaContext, datastoreContext, shardStats);
+ return new ShardTransactionChain(chain, datastoreContext, shardStats);
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.UntypedActorContext;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
+
+/**
+ * A factory for creating ShardTransaction actors.
+ *
+ * @author Thomas Pantelis
+ */
+class ShardTransactionActorFactory {
+
+ private final DOMTransactionFactory domTransactionFactory;
+ private final DatastoreContext datastoreContext;
+ private final String txnDispatcherPath;
+ private final ShardStats shardMBean;
+ private final UntypedActorContext actorContext;
+ private final ActorRef shardActor;
+
+ ShardTransactionActorFactory(DOMTransactionFactory domTransactionFactory, DatastoreContext datastoreContext,
+ String txnDispatcherPath, ActorRef shardActor, UntypedActorContext actorContext, ShardStats shardMBean) {
+ this.domTransactionFactory = domTransactionFactory;
+ this.datastoreContext = datastoreContext;
+ this.txnDispatcherPath = txnDispatcherPath;
+ this.shardMBean = shardMBean;
+ this.actorContext = actorContext;
+ this.shardActor = shardActor;
+ }
+
+ ActorRef newShardTransaction(TransactionProxy.TransactionType type, ShardTransactionIdentifier transactionID,
+ String transactionChainID, short clientVersion) {
+
+ DOMStoreTransaction transaction = domTransactionFactory.newTransaction(type, transactionID.toString(),
+ transactionChainID);
+
+ return actorContext.actorOf(ShardTransaction.props(transaction, shardActor, datastoreContext, shardMBean,
+ transactionID.getRemoteTransactionId(), clientVersion).withDispatcher(txnDispatcherPath),
+ transactionID.toString());
+ }
+}
/*
- *
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* @author: syedbahm
public class ShardWriteTransaction extends ShardTransaction {
private final MutableCompositeModification compositeModification = new MutableCompositeModification();
+ private int totalBatchedModificationsReceived;
+ private Exception lastBatchedModificationsException;
private final DOMStoreWriteTransaction transaction;
public ShardWriteTransaction(DOMStoreWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID,
- short clientTxVersion) {
- super(shardActor, schemaContext, shardStats, transactionID, clientTxVersion);
+ ShardStats shardStats, String transactionID, short clientTxVersion) {
+ super(shardActor, shardStats, transactionID, clientTxVersion);
this.transaction = transaction;
}
modification.apply(transaction);
}
- getSender().tell(new BatchedModificationsReply(batched.getModifications().size()), getSelf());
+ totalBatchedModificationsReceived++;
+ if(batched.isReady()) {
+ if(lastBatchedModificationsException != null) {
+ throw lastBatchedModificationsException;
+ }
+
+ if(totalBatchedModificationsReceived != batched.getTotalMessagesSent()) {
+ throw new IllegalStateException(String.format(
+ "The total number of batched messages received %d does not match the number sent %d",
+ totalBatchedModificationsReceived, batched.getTotalMessagesSent()));
+ }
+
+ readyTransaction(transaction, false);
+ } else {
+ getSender().tell(new BatchedModificationsReply(batched.getModifications().size()), getSelf());
+ }
} catch (Exception e) {
+ lastBatchedModificationsException = e;
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+
+ if(batched.isReady()) {
+ getSelf().tell(PoisonPill.getInstance(), getSelf());
+ }
}
}
import akka.actor.ActorSelection;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collection;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import scala.concurrent.Future;
void readData(final YangInstanceIdentifier path, SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture);
void dataExists(YangInstanceIdentifier path, SettableFuture<Boolean> proxyFuture);
-
- void copyRecordedOperationFutures(Collection<Future<Object>> target);
}
/*
* Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import akka.dispatch.Mapper;
import akka.dispatch.OnComplete;
import com.google.common.base.Optional;
-import com.google.common.collect.Lists;
import com.google.common.util.concurrent.SettableFuture;
-import java.util.List;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.SerializableMessage;
import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
private final OperationCompleter operationCompleter;
private BatchedModifications batchedModifications;
+ private int totalBatchedModificationsSent;
protected TransactionContextImpl(ActorSelection actor, TransactionIdentifier identifier,
String transactionChainId, ActorContext actorContext, SchemaContext schemaContext, boolean isTxActorLocal,
@Override
public Future<ActorSelection> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
- getIdentifier(), recordedOperationCount());
+ LOG.debug("Tx {} readyTransaction called", getIdentifier());
- // Send the remaining batched modifications if any.
+ // Send the remaining batched modifications, if any, with the ready flag set.
- sendAndRecordBatchedModifications();
+ Future<Object> lastModificationsFuture = sendBatchedModifications(true);
- // Send the ReadyTransaction message to the Tx actor.
-
- Future<Object> readyReplyFuture = executeOperationAsync(ReadyTransaction.INSTANCE);
-
- return combineRecordedOperationsFutures(readyReplyFuture);
+ return transformReadyReply(lastModificationsFuture);
}
- protected Future<ActorSelection> combineRecordedOperationsFutures(final Future<Object> withLastReplyFuture) {
- // Combine all the previously recorded put/merge/delete operation reply Futures and the
- // ReadyTransactionReply Future into one Future. If any one fails then the combined
- // Future will fail. We need all prior operations and the ready operation to succeed
- // in order to attempt commit.
-
- List<Future<Object>> futureList = Lists.newArrayListWithCapacity(recordedOperationCount() + 1);
- copyRecordedOperationFutures(futureList);
- futureList.add(withLastReplyFuture);
-
- Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(futureList,
- actorContext.getClientDispatcher());
-
- // Transform the combined Future into a Future that returns the cohort actor path from
- // the ReadyTransactionReply. That's the end result of the ready operation.
+ protected Future<ActorSelection> transformReadyReply(final Future<Object> readyReplyFuture) {
+ // Transform the last reply Future into a Future that returns the cohort actor path from
+ // the last reply message. That's the end result of the ready operation.
- return combinedFutures.transform(new Mapper<Iterable<Object>, ActorSelection>() {
+ return readyReplyFuture.transform(new Mapper<Object, ActorSelection>() {
@Override
- public ActorSelection checkedApply(Iterable<Object> notUsed) {
- LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
- getIdentifier());
-
- // At this point all the Futures succeeded and we need to extract the cohort
- // actor path from the ReadyTransactionReply. For the recorded operations, they
- // don't return any data so we're only interested that they completed
- // successfully. We could be paranoid and verify the correct reply types but
- // that really should never happen so it's not worth the overhead of
- // de-serializing each reply.
-
- // Note the Future get call here won't block as it's complete.
- Object serializedReadyReply = withLastReplyFuture.value().get().get();
- if (serializedReadyReply instanceof ReadyTransactionReply) {
- return actorContext.actorSelection(((ReadyTransactionReply)serializedReadyReply).getCohortPath());
- } else if(serializedReadyReply instanceof BatchedModificationsReply) {
- return actorContext.actorSelection(((BatchedModificationsReply)serializedReadyReply).getCohortPath());
- } else if(serializedReadyReply.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)) {
- ReadyTransactionReply reply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
- String cohortPath = deserializeCohortPath(reply.getCohortPath());
- return actorContext.actorSelection(cohortPath);
- } else {
- // Throwing an exception here will fail the Future.
- throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
- getIdentifier(), serializedReadyReply.getClass()));
+ public ActorSelection checkedApply(Object serializedReadyReply) {
+ LOG.debug("Tx {} readyTransaction", getIdentifier());
+
+ // At this point the ready operation succeeded and we need to extract the cohort
+ // actor path from the reply.
+ if(ReadyTransactionReply.isSerializedType(serializedReadyReply)) {
+ ReadyTransactionReply readyTxReply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
+ return actorContext.actorSelection(extractCohortPathFrom(readyTxReply));
}
+
+ // Throwing an exception here will fail the Future.
+ throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
+ getIdentifier(), serializedReadyReply.getClass()));
}
}, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getClientDispatcher());
}
- protected String deserializeCohortPath(String cohortPath) {
- return cohortPath;
+ protected String extractCohortPathFrom(ReadyTransactionReply readyTxReply) {
+ return readyTxReply.getCohortPath();
+ }
+
+ private BatchedModifications newBatchedModifications() {
+ return new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion, transactionChainId);
}
private void batchModification(Modification modification) {
if(batchedModifications == null) {
- batchedModifications = new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion,
- transactionChainId);
+ batchedModifications = newBatchedModifications();
}
batchedModifications.addModification(modification);
if(batchedModifications.getModifications().size() >=
actorContext.getDatastoreContext().getShardBatchedModificationCount()) {
- sendAndRecordBatchedModifications();
- }
- }
-
- private void sendAndRecordBatchedModifications() {
- Future<Object> sentFuture = sendBatchedModifications();
- if(sentFuture != null) {
- recordOperationFuture(sentFuture);
+ sendBatchedModifications();
}
}
protected Future<Object> sendBatchedModifications(boolean ready) {
Future<Object> sent = null;
- if(batchedModifications != null) {
+ if(ready || (batchedModifications != null && !batchedModifications.getModifications().isEmpty())) {
+ if(batchedModifications == null) {
+ batchedModifications = newBatchedModifications();
+ }
+
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {} sending {} batched modifications, ready: {}", getIdentifier(),
batchedModifications.getModifications().size(), ready);
}
batchedModifications.setReady(ready);
+ batchedModifications.setTotalMessagesSent(++totalBatchedModificationsSent);
sent = executeOperationAsync(batchedModifications);
- batchedModifications = new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion,
- transactionChainId);
+ if(ready) {
+ batchedModifications = null;
+ } else {
+ batchedModifications = newBatchedModifications();
+ }
}
return sent;
// Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
// public API contract.
- sendAndRecordBatchedModifications();
+ sendBatchedModifications();
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
// Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
// public API contract.
- sendAndRecordBatchedModifications();
+ sendBatchedModifications();
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregator;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
return new TransactionIdentifier(memberName, counter.getAndIncrement());
}
- @VisibleForTesting
- List<Future<Object>> getRecordedOperationFutures() {
- List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
- for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
- TransactionContext transactionContext = txFutureCallback.getTransactionContext();
- if (transactionContext != null) {
- transactionContext.copyRecordedOperationFutures(recordedOperationFutures);
- }
- }
-
- return recordedOperationFutures;
- }
-
@VisibleForTesting
boolean hasTransactionContext() {
for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
return false;
}
+ private boolean isRootPath(YangInstanceIdentifier path){
+ return !path.getPathArguments().iterator().hasNext();
+ }
+
@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final YangInstanceIdentifier path) {
LOG.debug("Tx {} read {}", getIdentifier(), path);
- throttleOperation();
-
final SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture = SettableFuture.create();
- TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
- txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
- @Override
- public void invoke(TransactionContext transactionContext) {
- transactionContext.readData(path, proxyFuture);
- }
- });
+ if(isRootPath(path)){
+ readAllData(path, proxyFuture);
+ } else {
+ throttleOperation();
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.readData(path, proxyFuture);
+ }
+ });
+
+ }
return MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
}
+ private void readAllData(final YangInstanceIdentifier path,
+ final SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture) {
+ Set<String> allShardNames = actorContext.getConfiguration().getAllShardNames();
+ List<SettableFuture<Optional<NormalizedNode<?, ?>>>> futures = new ArrayList<>(allShardNames.size());
+
+ for(String shardName : allShardNames){
+ final SettableFuture<Optional<NormalizedNode<?, ?>>> subProxyFuture = SettableFuture.create();
+
+ throttleOperation();
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(shardName);
+ txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.readData(path, subProxyFuture);
+ }
+ });
+
+ futures.add(subProxyFuture);
+ }
+
+ final ListenableFuture<List<Optional<NormalizedNode<?, ?>>>> future = Futures.allAsList(futures);
+
+ future.addListener(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ proxyFuture.set(NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.builder().build(),
+ future.get(), actorContext.getSchemaContext()));
+ } catch (InterruptedException | ExecutionException e) {
+ proxyFuture.setException(e);
+ }
+ }
+ }, actorContext.getActorSystem().dispatcher());
+ }
+
@Override
public CheckedFuture<Boolean, ReadFailedException> exists(final YangInstanceIdentifier path) {
private TransactionFutureCallback getOrCreateTxFutureCallback(YangInstanceIdentifier path) {
String shardName = shardNameFromIdentifier(path);
+ return getOrCreateTxFutureCallback(shardName);
+ }
+
+ private TransactionFutureCallback getOrCreateTxFutureCallback(String shardName) {
TransactionFutureCallback txFutureCallback = txFutureCallbackMap.get(shardName);
if(txFutureCallback == null) {
Future<ActorSelection> findPrimaryFuture = sendFindPrimaryShardAsync(shardName);
return new PreLithiumTransactionContextImpl(transactionPath, transactionActor, getIdentifier(),
transactionChainId, actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion,
operationCompleter);
- } else if (transactionType == TransactionType.WRITE_ONLY &&
- actorContext.getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
- return new WriteOnlyTransactionContextImpl(transactionActor, getIdentifier(), transactionChainId,
- actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion, operationCompleter);
} else {
return new TransactionContextImpl(transactionActor, getIdentifier(), transactionChainId,
actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion, operationCompleter);
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * Context for a write-only transaction.
- *
- * @author Thomas Pantelis
- */
-public class WriteOnlyTransactionContextImpl extends TransactionContextImpl {
- private static final Logger LOG = LoggerFactory.getLogger(WriteOnlyTransactionContextImpl.class);
-
- public WriteOnlyTransactionContextImpl(ActorSelection actor, TransactionIdentifier identifier,
- String transactionChainId, ActorContext actorContext, SchemaContext schemaContext, boolean isTxActorLocal,
- short remoteTransactionVersion, OperationCompleter operationCompleter) {
- super(actor, identifier, transactionChainId, actorContext, schemaContext, isTxActorLocal,
- remoteTransactionVersion, operationCompleter);
- }
-
- @Override
- public Future<ActorSelection> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
- getIdentifier(), recordedOperationCount());
-
- // Send the remaining batched modifications if any.
-
- Future<Object> lastModificationsFuture = sendBatchedModifications(true);
-
- return combineRecordedOperationsFutures(lastModificationsFuture);
- }
-}
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@Override
public void deleteData(YangInstanceIdentifier path) {
- recordOperationFuture(executeOperationAsync(
- new DeleteData(path, getRemoteTransactionVersion())));
+ executeOperationAsync(new DeleteData(path, getRemoteTransactionVersion()));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- recordOperationFuture(executeOperationAsync(
- new MergeData(path, data, getRemoteTransactionVersion())));
+ executeOperationAsync(new MergeData(path, data, getRemoteTransactionVersion()));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- recordOperationFuture(executeOperationAsync(
- new WriteData(path, data, getRemoteTransactionVersion())));
+ executeOperationAsync(new WriteData(path, data, getRemoteTransactionVersion()));
}
@Override
public Future<ActorSelection> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
- getIdentifier(), recordedOperationCount());
+ LOG.debug("Tx {} readyTransaction called", getIdentifier());
// Send the ReadyTransaction message to the Tx actor.
Future<Object> lastReplyFuture = executeOperationAsync(ReadyTransaction.INSTANCE);
- return combineRecordedOperationsFutures(lastReplyFuture);
+ return transformReadyReply(lastReplyFuture);
}
@Override
- protected String deserializeCohortPath(String cohortPath) {
+ protected String extractCohortPathFrom(ReadyTransactionReply readyTxReply) {
// In base Helium we used to return the local path of the actor which represented
// a remote ThreePhaseCommitCohort. The local path would then be converted to
// a remote path using this resolvePath method. To maintain compatibility with
// we could remove this code to resolvePath and just use the cohortPath as the
// resolved cohortPath
if(getRemoteTransactionVersion() < DataStoreVersions.HELIUM_1_VERSION) {
- return getActorContext().resolvePath(transactionPath, cohortPath);
+ return getActorContext().resolvePath(transactionPath, readyTxReply.getCohortPath());
}
- return cohortPath;
+ return readyTxReply.getCohortPath();
}
}
private static final long serialVersionUID = 1L;
private boolean ready;
+ private int totalMessagesSent;
private String transactionID;
private String transactionChainID;
this.ready = ready;
}
+ public int getTotalMessagesSent() {
+ return totalMessagesSent;
+ }
+
+ public void setTotalMessagesSent(int totalMessagesSent) {
+ this.totalMessagesSent = totalMessagesSent;
+ }
+
public String getTransactionID() {
return transactionID;
}
transactionID = in.readUTF();
transactionChainID = in.readUTF();
ready = in.readBoolean();
+ totalMessagesSent = in.readInt();
}
@Override
out.writeUTF(transactionID);
out.writeUTF(transactionChainID);
out.writeBoolean(ready);
+ out.writeInt(totalMessagesSent);
}
@Override
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
- builder.append("BatchedModifications [transactionID=").append(transactionID).append(", ready=").append(ready)
- .append(", modifications size=").append(getModifications().size()).append("]");
+ builder.append("BatchedModifications [transactionID=").append(transactionID).append(", transactionChainID=")
+ .append(transactionChainID).append(", ready=").append(ready).append(", totalMessagesSent=")
+ .append(totalMessagesSent).append(", modifications size=").append(getModifications().size())
+ .append("]");
return builder.toString();
}
}
public class BatchedModificationsReply extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
- private static final byte COHORT_PATH_NOT_PRESENT = 0;
- private static final byte COHORT_PATH_PRESENT = 1;
-
private int numBatched;
- private String cohortPath;
public BatchedModificationsReply() {
}
this.numBatched = numBatched;
}
- public BatchedModificationsReply(int numBatched, String cohortPath) {
- this.numBatched = numBatched;
- this.cohortPath = cohortPath;
- }
-
public int getNumBatched() {
return numBatched;
}
- public String getCohortPath() {
- return cohortPath;
- }
-
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
numBatched = in.readInt();
-
- if(in.readByte() == COHORT_PATH_PRESENT) {
- cohortPath = in.readUTF();
- }
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
super.writeExternal(out);
out.writeInt(numBatched);
-
- if(cohortPath != null) {
- out.writeByte(COHORT_PATH_PRESENT);
- out.writeUTF(cohortPath);
- } else {
- out.writeByte(COHORT_PATH_NOT_PRESENT);
- }
}
@Override
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
- builder.append("BatchedModificationsReply [numBatched=").append(numBatched).append(", cohortPath=")
- .append(cohortPath).append("]");
+ builder.append("BatchedModificationsReply [numBatched=").append(numBatched).append("]");
return builder.toString();
}
}
private final DOMStoreThreePhaseCommitCohort cohort;
private final Modification modification;
private final boolean returnSerialized;
- private final int txnClientVersion;
+ private final short txnClientVersion;
- public ForwardedReadyTransaction(String transactionID, int txnClientVersion,
+ public ForwardedReadyTransaction(String transactionID, short txnClientVersion,
DOMStoreThreePhaseCommitCohort cohort, Modification modification,
boolean returnSerialized) {
this.transactionID = transactionID;
return returnSerialized;
}
- public int getTxnClientVersion() {
+ public short getTxnClientVersion() {
return txnClientVersion;
}
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+@Deprecated
public class ReadyTransaction implements SerializableMessage{
public static final Class<ShardTransactionMessages.ReadyTransaction> SERIALIZABLE_CLASS =
ShardTransactionMessages.ReadyTransaction.class;
package org.opendaylight.controller.cluster.datastore.messages;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
-public class ReadyTransactionReply implements SerializableMessage {
+public class ReadyTransactionReply extends VersionedExternalizableMessage {
+ private static final long serialVersionUID = 1L;
+
public static final Class<ShardTransactionMessages.ReadyTransactionReply> SERIALIZABLE_CLASS =
ShardTransactionMessages.ReadyTransactionReply.class;
- private final String cohortPath;
+ private String cohortPath;
+
+ public ReadyTransactionReply() {
+ }
public ReadyTransactionReply(String cohortPath) {
+ this(cohortPath, DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public ReadyTransactionReply(String cohortPath, short version) {
+ super(version);
this.cohortPath = cohortPath;
}
}
@Override
- public ShardTransactionMessages.ReadyTransactionReply toSerializable() {
- return ShardTransactionMessages.ReadyTransactionReply.newBuilder()
- .setActorPath(cohortPath)
- .build();
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ super.readExternal(in);
+ cohortPath = in.readUTF();
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ super.writeExternal(out);
+ out.writeUTF(cohortPath);
+ }
+
+ @Override
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
+ return this;
+ } else {
+ return ShardTransactionMessages.ReadyTransactionReply.newBuilder().setActorPath(cohortPath).build();
+ }
}
public static ReadyTransactionReply fromSerializable(Object serializable) {
- ShardTransactionMessages.ReadyTransactionReply o =
- (ShardTransactionMessages.ReadyTransactionReply) serializable;
+ if(serializable instanceof ReadyTransactionReply) {
+ return (ReadyTransactionReply)serializable;
+ } else {
+ ShardTransactionMessages.ReadyTransactionReply o =
+ (ShardTransactionMessages.ReadyTransactionReply) serializable;
+ return new ReadyTransactionReply(o.getActorPath(), DataStoreVersions.HELIUM_2_VERSION);
+ }
+ }
- return new ReadyTransactionReply(o.getActorPath());
+ public static boolean isSerializedType(Object message) {
+ return message instanceof ReadyTransactionReply ||
+ message instanceof ShardTransactionMessages.ReadyTransactionReply;
}
}
out.write(serializedPayload);
}
- @SuppressWarnings("rawtypes")
@Override
@Deprecated
public <T> Map<GeneratedExtension, T> encode() {
return this.dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
}
+ public Configuration getConfiguration() {
+ return configuration;
+ }
+
protected Future<Object> doAsk(ActorRef actorRef, Object message, Timeout timeout){
return ask(actorRef, message, timeout);
}
private static final Context NO_OP_CONTEXT = new NoOpContext();
- private final Class expectedMessageClass;
+ private final Class<?> expectedMessageClass;
private final long expectedArrivalInterval;
* @param expectedArrivalIntervalInMillis The expected arrival interval between two instances of the expected
* message
*/
- public MessageTracker(Class expectedMessageClass, long expectedArrivalIntervalInMillis){
+ public MessageTracker(Class<?> expectedMessageClass, long expectedArrivalIntervalInMillis){
this.expectedMessageClass = expectedMessageClass;
this.expectedArrivalInterval = expectedArrivalIntervalInMillis;
}
}
public static class MessageProcessingTime {
- private final Class messageClass;
+ private final Class<?> messageClass;
private final long elapsedTimeInNanos;
- MessageProcessingTime(Class messageClass, long elapsedTimeInNanos){
+ MessageProcessingTime(Class<?> messageClass, long elapsedTimeInNanos){
this.messageClass = messageClass;
this.elapsedTimeInNanos = elapsedTimeInNanos;
}
'}';
}
- public Class getMessageClass() {
+ public Class<?> getMessageClass() {
return messageClass;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class NormalizedNodeAggregator {
+
+ private static final ExecutorService executorService = MoreExecutors.newDirectExecutorService();
+
+ private final YangInstanceIdentifier rootIdentifier;
+ private final List<Optional<NormalizedNode<?, ?>>> nodes;
+ private final InMemoryDOMDataStore dataStore;
+
+ NormalizedNodeAggregator(YangInstanceIdentifier rootIdentifier, List<Optional<NormalizedNode<?, ?>>> nodes,
+ SchemaContext schemaContext){
+
+ this.rootIdentifier = rootIdentifier;
+ this.nodes = nodes;
+ this.dataStore = new InMemoryDOMDataStore("aggregator", executorService);
+ this.dataStore.onGlobalContextUpdated(schemaContext);
+ }
+
+ /**
+ * Combine data from all the nodes in the list into a tree with root as rootIdentifier
+ *
+ * @param nodes
+ * @param schemaContext
+ * @return
+ * @throws ExecutionException
+ * @throws InterruptedException
+ */
+ public static Optional<NormalizedNode<?,?>> aggregate(YangInstanceIdentifier rootIdentifier,
+ List<Optional<NormalizedNode<?, ?>>> nodes,
+ SchemaContext schemaContext)
+ throws ExecutionException, InterruptedException {
+ return new NormalizedNodeAggregator(rootIdentifier, nodes, schemaContext).aggregate();
+ }
+
+ private Optional<NormalizedNode<?,?>> aggregate() throws ExecutionException, InterruptedException {
+ return combine().getRootNode();
+ }
+
+ private NormalizedNodeAggregator combine() throws InterruptedException, ExecutionException {
+ DOMStoreWriteTransaction domStoreWriteTransaction = dataStore.newWriteOnlyTransaction();
+
+ for(Optional<NormalizedNode<?,?>> node : nodes) {
+ if(node.isPresent()) {
+ domStoreWriteTransaction.merge(rootIdentifier, node.get());
+ }
+ }
+ DOMStoreThreePhaseCommitCohort ready = domStoreWriteTransaction.ready();
+ ready.canCommit().get();
+ ready.preCommit().get();
+ ready.commit().get();
+
+ return this;
+ }
+
+ private Optional<NormalizedNode<?, ?>> getRootNode() throws InterruptedException, ExecutionException {
+ DOMStoreReadTransaction readTransaction = dataStore.newReadOnlyTransaction();
+
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read =
+ readTransaction.read(rootIdentifier);
+
+ return read.get();
+ }
+
+
+}
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
return argThat(matcher);
}
- protected Future<Object> readySerializedTxReply(String path) {
- return Futures.successful((Object)new ReadyTransactionReply(path).toSerializable());
- }
-
protected Future<Object> readyTxReply(String path) {
return Futures.successful((Object)new ReadyTransactionReply(path));
}
eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
- protected void expectBatchedModificationsReady(ActorRef actorRef, int count) {
- Future<BatchedModificationsReply> replyFuture = Futures.successful(
- new BatchedModificationsReply(count, actorRef.path().toString()));
- doReturn(replyFuture).when(mockActorContext).executeOperationAsync(
+ protected void expectBatchedModificationsReady(ActorRef actorRef) {
+ doReturn(readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
any(ActorSelection.class), isA(BatchedModifications.class));
}
- protected void expectReadyTransaction(ActorRef actorRef) {
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
- }
-
protected void expectFailedBatchedModifications(ActorRef actorRef) {
doReturn(Futures.failed(new TestException())).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(BatchedModifications.class));
import akka.dispatch.OnComplete;
import akka.japi.Creator;
import akka.pattern.Patterns;
+import akka.persistence.SaveSnapshotSuccess;
import akka.testkit.TestActorRef;
import akka.util.Timeout;
import com.google.common.base.Function;
waitUntilLeader(shard);
- final String transactionID1 = "tx1";
- final String transactionID2 = "tx2";
- final String transactionID3 = "tx3";
+ // Setup 3 simulated transactions with mock cohorts backed by real cohorts.
- final AtomicReference<DOMStoreThreePhaseCommitCohort> mockCohort1 = new AtomicReference<>();
- final AtomicReference<DOMStoreThreePhaseCommitCohort> mockCohort2 = new AtomicReference<>();
- final AtomicReference<DOMStoreThreePhaseCommitCohort> mockCohort3 = new AtomicReference<>();
- ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
- @Override
- public DOMStoreThreePhaseCommitCohort decorate(String transactionID, DOMStoreThreePhaseCommitCohort actual) {
- if(transactionID.equals(transactionID1)) {
- mockCohort1.set(createDelegatingMockCohort("cohort1", actual));
- return mockCohort1.get();
- } else if(transactionID.equals(transactionID2)) {
- mockCohort2.set(createDelegatingMockCohort("cohort2", actual));
- return mockCohort2.get();
- } else {
- mockCohort3.set(createDelegatingMockCohort("cohort3", actual));
- return mockCohort3.get();
- }
- }
- };
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
- shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
+
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
+ modification2);
+
+ String transactionID3 = "tx3";
+ MutableCompositeModification modification3 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
+ modification3);
long timeoutSec = 5;
final FiniteDuration duration = FiniteDuration.create(timeoutSec, TimeUnit.SECONDS);
final Timeout timeout = new Timeout(duration);
- // Send a BatchedModifications message for the first transaction.
+ // Simulate the ForwardedReadyTransaction message for the first Tx that would be sent
+ // by the ShardTransaction.
- shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- BatchedModificationsReply batchedReply = expectMsgClass(duration, BatchedModificationsReply.class);
- assertEquals("getCohortPath", shard.path().toString(), batchedReply.getCohortPath());
- assertEquals("getNumBatched", 1, batchedReply.getNumBatched());
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
+ ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(
+ expectMsgClass(duration, ReadyTransactionReply.class));
+ assertEquals("Cohort path", shard.path().toString(), readyReply.getCohortPath());
// Send the CanCommitTransaction message for the first Tx.
expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
assertEquals("Can commit", true, canCommitReply.getCanCommit());
- // Send BatchedModifications for the next 2 Tx's.
+ // Send the ForwardedReadyTransaction for the next 2 Tx's.
- shard.tell(newBatchedModifications(transactionID2, TestModel.OUTER_LIST_PATH,
- ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(newBatchedModifications(transactionID3, YangInstanceIdentifier.builder(
- TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
- ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
+ cohort3, modification3, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the next 2 Tx's. These should get queued and
// processed after the first Tx completes.
assertEquals("Commits complete", true, done);
- InOrder inOrder = inOrder(mockCohort1.get(), mockCohort2.get(), mockCohort3.get());
- inOrder.verify(mockCohort1.get()).canCommit();
- inOrder.verify(mockCohort1.get()).preCommit();
- inOrder.verify(mockCohort1.get()).commit();
- inOrder.verify(mockCohort2.get()).canCommit();
- inOrder.verify(mockCohort2.get()).preCommit();
- inOrder.verify(mockCohort2.get()).commit();
- inOrder.verify(mockCohort3.get()).canCommit();
- inOrder.verify(mockCohort3.get()).preCommit();
- inOrder.verify(mockCohort3.get()).commit();
+ InOrder inOrder = inOrder(cohort1, cohort2, cohort3);
+ inOrder.verify(cohort1).canCommit();
+ inOrder.verify(cohort1).preCommit();
+ inOrder.verify(cohort1).commit();
+ inOrder.verify(cohort2).canCommit();
+ inOrder.verify(cohort2).preCommit();
+ inOrder.verify(cohort2).commit();
+ inOrder.verify(cohort3).canCommit();
+ inOrder.verify(cohort3).preCommit();
+ inOrder.verify(cohort3).commit();
// Verify data in the data store.
shard.tell(newBatchedModifications(transactionID, YangInstanceIdentifier.builder(
TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
YangInstanceIdentifier path = TestModel.TEST_PATH;
shard.tell(newBatchedModifications(transactionID1, transactionChainID, path,
containerNode, true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Create a read Tx on the same chain.
waitUntilLeader(shard);
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+
+ // Setup a simulated transactions with a mock cohort.
+
String transactionID = "tx";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ DOMStoreThreePhaseCommitCohort cohort = setupMockWriteTransaction("cohort", dataStore,
+ TestModel.TEST_PATH, containerNode, modification);
+
FiniteDuration duration = duration("5 seconds");
- // Send a BatchedModifications to start a transaction.
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH, containerNode, true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
shard.tell(new CommitTransaction(transactionID).toSerializable(), getRef());
expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+ InOrder inOrder = inOrder(cohort);
+ inOrder.verify(cohort).canCommit();
+ inOrder.verify(cohort).preCommit();
+ inOrder.verify(cohort).commit();
+
NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
cohort, modification, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
cohort, modification, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
waitUntilLeader(shard);
- // Setup 2 mock cohorts. The first one fails in the commit phase.
+ // Setup 2 simulated transactions with mock cohorts. The first one fails in the
+ // commit phase.
- final String transactionID1 = "tx1";
- final DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
doReturn(Futures.immediateFuture(null)).when(cohort1).preCommit();
doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort1).commit();
- final String transactionID2 = "tx2";
- final DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
- ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
- @Override
- public DOMStoreThreePhaseCommitCohort decorate(String transactionID,
- DOMStoreThreePhaseCommitCohort actual) {
- return transactionID1.equals(transactionID) ? cohort1 : cohort2;
- }
- };
-
- shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
-
FiniteDuration duration = duration("5 seconds");
final Timeout timeout = new Timeout(duration);
- // Send BatchedModifications to start and ready each transaction.
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the first Tx.
waitUntilLeader(shard);
String transactionID = "tx1";
- final DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ MutableCompositeModification modification = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).preCommit();
- ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
- @Override
- public DOMStoreThreePhaseCommitCohort decorate(String transactionID,
- DOMStoreThreePhaseCommitCohort actual) {
- return cohort;
- }
- };
-
- shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
-
FiniteDuration duration = duration("5 seconds");
- // Send BatchedModifications to start and ready a transaction.
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
final FiniteDuration duration = duration("5 seconds");
String transactionID = "tx1";
- final DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ MutableCompositeModification modification = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).canCommit();
- ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
- @Override
- public DOMStoreThreePhaseCommitCohort decorate(String transactionID,
- DOMStoreThreePhaseCommitCohort actual) {
- return cohort;
- }
- };
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
-
- // Send BatchedModifications to start and ready a transaction.
-
- shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
}
};
- shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ MutableCompositeModification modification = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort = setupMockWriteTransaction("cohort1", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME),
+ modification, preCommit);
+
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
final FiniteDuration duration = duration("5 seconds");
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+
writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
writeToStore(shard, TestModel.OUTER_LIST_PATH,
ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
- // Create and ready the 1st Tx - will timeout
+ // Create 1st Tx - will timeout
String transactionID1 = "tx1";
- shard.tell(newBatchedModifications(transactionID1, YangInstanceIdentifier.builder(
- TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
- ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
+ modification1);
- // Create and ready the 2nd Tx
+ // Create 2nd Tx
- String transactionID2 = "tx2";
+ String transactionID2 = "tx3";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
YangInstanceIdentifier listNodePath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
- .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build();
- shard.tell(newBatchedModifications(transactionID2, listNodePath,
- ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build();
+ DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort3", dataStore,
+ listNodePath,
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2),
+ modification2);
+
+ // Ready the Tx's
+
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// canCommit 1st Tx. We don't send the commit so it should timeout.
final FiniteDuration duration = duration("5 seconds");
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+
String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
+
String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
+ modification2);
+
String transactionID3 = "tx3";
+ MutableCompositeModification modification3 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification3);
- // Send a BatchedModifications to start transactions and ready them.
+ // Ready the Tx's
- shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(newBatchedModifications(transactionID2,TestModel.OUTER_LIST_PATH,
- ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(newBatchedModifications(transactionID3, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
+ cohort3, modification3, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// canCommit 1st Tx.
// Setup 2 simulated transactions with mock cohorts. The first one will be aborted.
- final String transactionID1 = "tx1";
- final DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
doReturn(Futures.immediateFuture(null)).when(cohort1).abort();
- final String transactionID2 = "tx2";
- final DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
FiniteDuration duration = duration("5 seconds");
final Timeout timeout = new Timeout(duration);
- ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
- @Override
- public DOMStoreThreePhaseCommitCohort decorate(String transactionID,
- DOMStoreThreePhaseCommitCohort actual) {
- return transactionID1.equals(transactionID) ? cohort1 : cohort2;
- }
- };
-
- shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
-
- // Send BatchedModifications to start and ready each transaction.
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
- expectMsgClass(duration, BatchedModificationsReply.class);
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the first Tx.
@SuppressWarnings("serial")
public void testCreateSnapshot(final boolean persistent, final String shardActorName) throws Exception{
+ final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
+
final AtomicReference<Object> savedSnapshot = new AtomicReference<>();
class TestPersistentDataProvider extends DelegatingPersistentDataProvider {
TestPersistentDataProvider(DataPersistenceProvider delegate) {
dataStoreContextBuilder.persistent(persistent);
new ShardTestKit(getSystem()) {{
- final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
-
class TestShard extends Shard {
protected TestShard(ShardIdentifier name, Map<String, String> peerAddresses,
}
@Override
- protected void commitSnapshot(final long sequenceNumber) {
- super.commitSnapshot(sequenceNumber);
- latch.get().countDown();
+ public void handleCommand(Object message) {
+ super.handleCommand(message);
+
+ if (message instanceof SaveSnapshotSuccess || message.equals("commit_snapshot")) {
+ latch.get().countDown();
+ }
}
@Override
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props, "testNegativeMergeTransactionReady");
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doThrow;
import akka.actor.ActorRef;
import akka.actor.Props;
+import akka.actor.Status.Failure;
import akka.actor.Terminated;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
private ActorRef newTransactionActor(DOMStoreTransaction transaction, ActorRef shard, String name,
short version) {
Props props = ShardTransaction.props(transaction, shard != null ? shard : createShard(),
- testSchemaContext, datastoreContext, shardStats, "txn", version);
+ datastoreContext, shardStats, "txn", version);
return getSystem().actorOf(props, name);
}
}
@Test
- public void testOnReceiveReadyTransaction() throws Exception {
+ public void testOnReceiveBatchedModificationsReady() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ "testOnReceiveBatchedModificationsReady");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.addModification(new WriteModification(writePath, writeData));
+
+ transaction.tell(batched, getRef());
+ BatchedModificationsReply reply = expectMsgClass(duration("5 seconds"), BatchedModificationsReply.class);
+ assertEquals("getNumBatched", 1, reply.getNumBatched());
+
+ batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.setReady(true);
+ batched.setTotalMessagesSent(2);
+
+ transaction.tell(batched, getRef());
+ expectMsgClass(duration("5 seconds"), ReadyTransactionReply.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+ }};
+ }
+
+ @Test(expected=TestException.class)
+ public void testOnReceiveBatchedModificationsFailure() throws Throwable {
+ new JavaTestKit(getSystem()) {{
+
+ DOMStoreWriteTransaction mockWriteTx = Mockito.mock(DOMStoreWriteTransaction.class);
+ final ActorRef transaction = newTransactionActor(mockWriteTx,
+ "testOnReceiveBatchedModificationsFailure");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ ContainerNode node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doThrow(new TestException()).when(mockWriteTx).write(path, node);
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.addModification(new WriteModification(path, node));
+
+ transaction.tell(batched, getRef());
+ expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+
+ batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.setReady(true);
+ batched.setTotalMessagesSent(2);
+
+ transaction.tell(batched, getRef());
+ Failure failure = expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+
+ if(failure != null) {
+ throw failure.cause();
+ }
+ }};
+ }
+
+ @Test(expected=IllegalStateException.class)
+ public void testOnReceiveBatchedModificationsReadyWithIncorrectTotalMessageCount() throws Throwable {
+ new JavaTestKit(getSystem()) {{
+
+ final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ "testOnReceiveBatchedModificationsReadyWithIncorrectTotalMessageCount");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.setReady(true);
+ batched.setTotalMessagesSent(2);
+
+ transaction.tell(batched, getRef());
+
+ Failure failure = expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+
+ if(failure != null) {
+ throw failure.cause();
+ }
+ }};
+ }
+
+ @Test
+ public void testOnReceivePreLithiumReadyTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
- "testReadyTransaction");
+ "testReadyTransaction", DataStoreVersions.HELIUM_2_VERSION);
- watch(transaction);
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
transaction.tell(new ReadyTransaction().toSerializable(), getRef());
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS,
- Terminated.class);
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS,
- Terminated.class);
+ expectMsgClass(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
}};
// test
new JavaTestKit(getSystem()) {{
final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
- "testReadyTransaction2");
+ "testReadyTransaction2", DataStoreVersions.HELIUM_2_VERSION);
- watch(transaction);
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
transaction.tell(new ReadyTransaction(), getRef());
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.class,
- Terminated.class);
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.class,
- Terminated.class);
+ expectMsgClass(duration("5 seconds"), ReadyTransactionReply.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
}};
}
public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> transaction = TestActorRef.apply(props,getSystem());
transaction.receive(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION).
expectMsgClass(duration("3 seconds"), Terminated.class);
}};
}
+
+ public static class TestException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+ }
}
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
}
- batchedReplyPromise1.success(new BatchedModificationsReply(1, txActorRef1.path().toString()));
+ batchedReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
// Tx 2 should've proceeded to find the primary shard.
verify(mockActorContext, timeout(5000).times(2)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
Promise<Object> readyReplyPromise1 = akka.dispatch.Futures.promise();
doReturn(readyReplyPromise1.future()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(txActorRef1)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ eq(actorSelection(txActorRef1)), isA(BatchedModifications.class));
DOMStoreWriteTransaction writeTx1 = txChainProxy.newReadWriteTransaction();
writeTx1.ready();
- verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), false);
+ verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), true);
String tx2MemberName = "tx2MemberName";
doReturn(tx2MemberName).when(mockActorContext).getCurrentMemberName();
fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
}
- readyReplyPromise1.success(readySerializedTxReply(txActorRef1.path().toString()).value().get().get());
+ readyReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
verify(mockActorContext, timeout(5000)).executeOperationAsync(eq(getSystem().actorSelection(shardActorRef2.path())),
eqCreateTransaction(tx2MemberName, READ_WRITE));
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_ONLY;
import akka.actor.Props;
import akka.dispatch.Futures;
import com.google.common.base.Optional;
+import com.google.common.collect.Sets;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.Collection;
import java.util.List;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Assert;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
+import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregatorTest;
import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
+import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.Promise;
-import scala.concurrent.duration.Duration;
@SuppressWarnings("resource")
public class TransactionProxyTest extends AbstractTransactionProxyTest {
transactionProxy.exists(TestModel.TEST_PATH);
}
- private void verifyRecordingOperationFutures(List<Future<Object>> futures,
- Class<?>... expResultTypes) throws Exception {
- assertEquals("getRecordingOperationFutures size", expResultTypes.length, futures.size());
-
- int i = 0;
- for( Future<Object> future: futures) {
- assertNotNull("Recording operation Future is null", future);
-
- Class<?> expResultType = expResultTypes[i++];
- if(Throwable.class.isAssignableFrom(expResultType)) {
- try {
- Await.result(future, Duration.create(5, TimeUnit.SECONDS));
- fail("Expected exception from recording operation Future");
- } catch(Exception e) {
- // Expected
- }
- } else {
- assertEquals(String.format("Recording operation %d Future result type", i +1 ), expResultType,
- Await.result(future, Duration.create(5, TimeUnit.SECONDS)).getClass());
- }
- }
- }
-
@Test
public void testWrite() throws Exception {
dataStoreContextBuilder.shardBatchedModificationCount(1);
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- expectBatchedModifications(actorRef, 1);
- expectReadyTransaction(actorRef);
+ expectBatchedModificationsReady(actorRef);
final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
// This sends the batched modification.
transactionProxy.ready();
- verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), false);
-
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
+ verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), true);
}
@Test(expected=IllegalStateException.class)
}
@Test
- public void testReadyWithReadWrite() throws Exception {
+ public void testReadWrite() throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
eq(actorSelection(actorRef)), eqSerializedReadData());
expectBatchedModifications(actorRef, 1);
- expectReadyTransaction(actorRef);
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
+
+ transactionProxy.read(TestModel.TEST_PATH);
+
+ transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+
+ transactionProxy.read(TestModel.TEST_PATH);
+
+ transactionProxy.read(TestModel.TEST_PATH);
+
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), false,
+ new WriteModification(TestModel.TEST_PATH, nodeToWrite));
+ }
+
+ @Test
+ public void testReadyWithReadWrite() throws Exception {
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
+
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
+
+ expectBatchedModificationsReady(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
-
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
- verify(mockActorContext).executeOperationAsync(eq(actorSelection(actorRef)),
- isA(BatchedModifications.class));
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verify(mockActorContext).executeOperationAsync(eq(actorSelection(actorRef)),
- isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ verifyBatchedModifications(batchedModifications.get(0), true,
+ new WriteModification(TestModel.TEST_PATH, nodeToWrite));
+
+ assertEquals("getTotalMessageCount", 1, batchedModifications.get(0).getTotalMessagesSent());
}
@Test
- public void testReadyWithWriteOnlyAndLastBatchPending() throws Exception {
- dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
+ public void testReadyWithNoModifications() throws Exception {
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
- expectBatchedModificationsReady(actorRef, 1);
+ expectBatchedModificationsReady(actorRef);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+ transactionProxy.read(TestModel.TEST_PATH);
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures());
-
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), true,
- new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-
- verify(mockActorContext, never()).executeOperationAsync(eq(actorSelection(actorRef)),
- isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ verifyBatchedModifications(batchedModifications.get(0), true);
}
@Test
- public void testReadyWithWriteOnlyAndLastBatchEmpty() throws Exception {
- dataStoreContextBuilder.shardBatchedModificationCount(1).writeOnlyTransactionOptimizationsEnabled(true);
+ public void testReadyWithWriteOnlyAndLastBatchPending() throws Exception {
+ dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
+
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- expectBatchedModificationsReady(actorRef, 1);
+ expectBatchedModificationsReady(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
-
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
- assertEquals("Captured BatchedModifications count", 2, batchedModifications.size());
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), false,
+ verifyBatchedModifications(batchedModifications.get(0), true,
new WriteModification(TestModel.TEST_PATH, nodeToWrite));
- verifyBatchedModifications(batchedModifications.get(1), true);
-
verify(mockActorContext, never()).executeOperationAsync(eq(actorSelection(actorRef)),
isA(ReadyTransaction.SERIALIZABLE_CLASS));
}
@Test
- public void testReadyWithRecordingOperationFailure() throws Exception {
+ public void testReadyWithWriteOnlyAndLastBatchEmpty() throws Exception {
dataStoreContextBuilder.shardBatchedModificationCount(1).writeOnlyTransactionOptimizationsEnabled(true);
-
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- expectFailedBatchedModifications(actorRef);
-
- doReturn(false).when(mockActorContext).isPathLocal(actorRef.path().toString());
+ expectBatchedModificationsReady(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
- verifyCohortFutures(proxy, TestException.class);
+ verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(), TestException.class);
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 2, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), false,
+ new WriteModification(TestModel.TEST_PATH, nodeToWrite));
+
+ verifyBatchedModifications(batchedModifications.get(1), true);
+
+ verify(mockActorContext, never()).executeOperationAsync(eq(actorSelection(actorRef)),
+ isA(ReadyTransaction.SERIALIZABLE_CLASS));
}
@Test
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
doReturn(true).when(mockActorContext).isPathLocal(anyString());
- doReturn(batchedModificationsReply(1)).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), isA(BatchedModifications.class));
+ expectBatchedModificationsReady(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- // testing ready
- doReturn(readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.class));
-
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
expectBatchedModifications(actorRef, shardBatchedModificationCount);
- expectReadyTransaction(actorRef);
-
YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
new MergeModification(mergePath2, mergeNode2), new WriteModification(writePath3, writeNode3));
- boolean optimizedWriteOnly = type == WRITE_ONLY && dataStoreContextBuilder.build().isWriteOnlyTransactionOptimizationsEnabled();
- verifyBatchedModifications(batchedModifications.get(2), optimizedWriteOnly, new MergeModification(mergePath3, mergeNode3),
+ verifyBatchedModifications(batchedModifications.get(2), true, new MergeModification(mergePath3, mergeNode3),
new DeleteModification(deletePath2));
- if(optimizedWriteOnly) {
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class, BatchedModificationsReply.class);
- } else {
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class, BatchedModificationsReply.class, BatchedModificationsReply.class);
- }
+ assertEquals("getTotalMessageCount", 3, batchedModifications.get(2).getTotalMessagesSent());
}
@Test
inOrder.verify(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedDataExists());
+ }
+
+ @Test
+ public void testReadRoot() throws ReadFailedException, InterruptedException, ExecutionException, java.util.concurrent.TimeoutException {
+
+ SchemaContext schemaContext = SchemaContextHelper.full();
+ Configuration configuration = mock(Configuration.class);
+ doReturn(configuration).when(mockActorContext).getConfiguration();
+ doReturn(schemaContext).when(mockActorContext).getSchemaContext();
+ doReturn(Sets.newHashSet("test", "cars")).when(configuration).getAllShardNames();
+
+ NormalizedNode<?, ?> expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ NormalizedNode<?, ?> expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
+
+ setUpReadData("test", NormalizedNodeAggregatorTest.getRootNode(expectedNode1, schemaContext));
+ setUpReadData("cars", NormalizedNodeAggregatorTest.getRootNode(expectedNode2, schemaContext));
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class, BatchedModificationsReply.class, BatchedModificationsReply.class);
+ doReturn(memberName).when(mockActorContext).getCurrentMemberName();
+
+ doReturn(10).when(mockActorContext).getTransactionOutstandingOperationLimit();
+
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
+
+ Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
+ YangInstanceIdentifier.builder().build()).get(5, TimeUnit.SECONDS);
+
+ assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
+
+ NormalizedNode<?, ?> normalizedNode = readOptional.get();
+
+ assertTrue("Expect value to be a Collection", normalizedNode.getValue() instanceof Collection);
+
+ Collection<NormalizedNode<?,?>> collection = (Collection<NormalizedNode<?,?>>) normalizedNode.getValue();
+
+ for(NormalizedNode<?,?> node : collection){
+ assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
+ }
+
+ assertTrue("Child with QName = " + TestModel.TEST_QNAME + " not found",
+ NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME) != null);
+
+ assertEquals(expectedNode1, NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME));
+
+ assertTrue("Child with QName = " + CarsModel.BASE_QNAME + " not found",
+ NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME) != null);
+
+ assertEquals(expectedNode2, NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME));
+ }
+
+
+ private void setUpReadData(String shardName, NormalizedNode<?, ?> expectedNode) {
+ ActorSystem actorSystem = getSystem();
+ ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(getSystem().actorSelection(shardActorRef.path())).
+ when(mockActorContext).actorSelection(shardActorRef.path().toString());
+
+ doReturn(Futures.successful(getSystem().actorSelection(shardActorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(eq(shardName));
+
+ doReturn(true).when(mockActorContext).isPathLocal(shardActorRef.path().toString());
+
+ ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(actorSystem.actorSelection(txActorRef.path())).
+ when(mockActorContext).actorSelection(txActorRef.path().toString());
+
+ doReturn(Futures.successful(createTransactionReply(txActorRef, DataStoreVersions.CURRENT_VERSION))).when(mockActorContext).
+ executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
+ eqCreateTransaction(memberName, TransactionType.READ_ONLY));
+
+ doReturn(readSerializedDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(txActorRef)), eqSerializedReadData(YangInstanceIdentifier.builder().build()));
}
}
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.inOrder;
-import static org.opendaylight.controller.cluster.datastore.DataStoreVersions.CURRENT_VERSION;
+import static org.opendaylight.controller.cluster.datastore.DataStoreVersions.HELIUM_2_VERSION;
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import akka.dispatch.Dispatchers;
// Simulate the ForwardedReadyTransaction message for the first Tx that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ shard.tell(new ForwardedReadyTransaction(transactionID1, HELIUM_2_VERSION,
cohort1, modification1, true), getRef());
ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS));
// Send the ForwardedReadyTransaction for the next 2 Tx's.
- shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ shard.tell(new ForwardedReadyTransaction(transactionID2, HELIUM_2_VERSION,
cohort2, modification2, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
- shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
+ shard.tell(new ForwardedReadyTransaction(transactionID3, HELIUM_2_VERSION,
cohort3, modification3, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.MergeDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import scala.concurrent.Future;
/**
* Unit tests for backwards compatibility with pre-Lithium versions.
return argThat(matcher);
}
+ private Future<Object> readySerializedTxReply(String path, short version) {
+ return Futures.successful(new ReadyTransactionReply(path, version).toSerializable());
+ }
+
private ActorRef testCompatibilityWithHeliumVersion(short version) throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE, version);
doReturn(Futures.successful(new DeleteDataReply().toSerializable(version))).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyDeleteData(TestModel.TEST_PATH));
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ doReturn(readySerializedTxReply(actorRef.path().toString(), version)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
doReturn(actorRef.path().toString()).when(mockActorContext).resolvePath(eq(actorRef.path().toString()),
doReturn(Futures.successful(new WriteDataReply().toSerializable(version))).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyWriteData(testNode));
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ doReturn(readySerializedTxReply(actorRef.path().toString(), version)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
doReturn(actorRef.path().toString()).when(mockActorContext).resolvePath(eq(actorRef.path().toString()),
batched.addModification(new MergeModification(mergePath, mergeData));
batched.addModification(new DeleteModification(deletePath));
batched.setReady(true);
+ batched.setTotalMessagesSent(5);
BatchedModifications clone = (BatchedModifications) SerializationUtils.clone(
(Serializable) batched.toSerializable());
assertEquals("getTransactionID", "tx1", clone.getTransactionID());
assertEquals("getTransactionChainID", "txChain", clone.getTransactionChainID());
assertEquals("isReady", true, clone.isReady());
+ assertEquals("getTotalMessagesSent", 5, clone.getTotalMessagesSent());
assertEquals("getModifications size", 3, clone.getModifications().size());
BatchedModificationsReply clone = (BatchedModificationsReply) SerializationUtils.clone(
(Serializable) new BatchedModificationsReply(100).toSerializable());
assertEquals("getNumBatched", 100, clone.getNumBatched());
- assertEquals("getCohortPath", null, clone.getCohortPath());
-
- clone = (BatchedModificationsReply) SerializationUtils.clone(
- (Serializable) new BatchedModificationsReply(50, "cohort path").toSerializable());
- assertEquals("getNumBatched", 50, clone.getNumBatched());
- assertEquals("getCohortPath", "cohort path", clone.getCohortPath());
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import static org.junit.Assert.assertEquals;
+import java.io.Serializable;
+import org.apache.commons.lang.SerializationUtils;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+
+/**
+ * Unit tests for ReadyTransactionReply.
+ *
+ * @author Thomas Pantelis
+ */
+public class ReadyTransactionReplyTest {
+
+ @Test
+ public void testSerialization() {
+ String cohortPath = "cohort path";
+ ReadyTransactionReply expected = new ReadyTransactionReply(cohortPath);
+
+ Object serialized = expected.toSerializable();
+ assertEquals("Serialized type", ReadyTransactionReply.class, serialized.getClass());
+
+ ReadyTransactionReply actual = ReadyTransactionReply.fromSerializable(SerializationUtils.clone(
+ (Serializable) serialized));
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
+ assertEquals("getCohortPath", cohortPath, actual.getCohortPath());
+ }
+
+ @Test
+ public void testSerializationWithPreLithiumVersion() throws Exception {
+ String cohortPath = "cohort path";
+ ReadyTransactionReply expected = new ReadyTransactionReply(cohortPath, DataStoreVersions.HELIUM_2_VERSION);
+
+ Object serialized = expected.toSerializable();
+ assertEquals("Serialized type", ShardTransactionMessages.ReadyTransactionReply.class, serialized.getClass());
+
+ ReadyTransactionReply actual = ReadyTransactionReply.fromSerializable(SerializationUtils.clone(
+ (Serializable) serialized));
+ assertEquals("getVersion", DataStoreVersions.HELIUM_2_VERSION, actual.getVersion());
+ assertEquals("getCohortPath", cohortPath, actual.getCohortPath());
+ }
+}
@Override public void onReceive(Object message) throws Exception {
if(message instanceof String){
if("messages".equals(message)){
- getSender().tell(new ArrayList(messages), getSelf());
+ getSender().tell(new ArrayList<>(messages), getSelf());
}
} else {
messages.add(message);
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import com.google.common.base.Optional;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Collection;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import org.junit.Test;
+import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
+import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class NormalizedNodeAggregatorTest {
+
+ @Test
+ public void testAggregate() throws InterruptedException, ExecutionException, ReadFailedException {
+ SchemaContext schemaContext = SchemaContextHelper.full();
+ NormalizedNode<?, ?> expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ NormalizedNode<?, ?> expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
+
+ Optional<NormalizedNode<?, ?>> optional = NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.builder().build(),
+ Lists.newArrayList(
+ Optional.<NormalizedNode<?, ?>>of(getRootNode(expectedNode1, schemaContext)),
+ Optional.<NormalizedNode<?, ?>>of(getRootNode(expectedNode2, schemaContext))),
+ schemaContext);
+
+
+ NormalizedNode<?,?> normalizedNode = optional.get();
+
+ assertTrue("Expect value to be a Collection", normalizedNode.getValue() instanceof Collection);
+
+ Collection<NormalizedNode<?,?>> collection = (Collection<NormalizedNode<?,?>>) normalizedNode.getValue();
+
+ for(NormalizedNode<?,?> node : collection){
+ assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
+ }
+
+ assertTrue("Child with QName = " + TestModel.TEST_QNAME + " not found",
+ findChildWithQName(collection, TestModel.TEST_QNAME) != null);
+
+ assertEquals(expectedNode1, findChildWithQName(collection, TestModel.TEST_QNAME));
+
+ assertTrue("Child with QName = " + CarsModel.BASE_QNAME + " not found",
+ findChildWithQName(collection, CarsModel.BASE_QNAME) != null);
+
+ assertEquals(expectedNode2, findChildWithQName(collection, CarsModel.BASE_QNAME));
+
+ }
+
+ public static NormalizedNode<?,?> getRootNode(NormalizedNode<?, ?> moduleNode, SchemaContext schemaContext) throws ReadFailedException, ExecutionException, InterruptedException {
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", Executors.newSingleThreadExecutor());
+ store.onGlobalContextUpdated(schemaContext);
+
+ DOMStoreWriteTransaction writeTransaction = store.newWriteOnlyTransaction();
+
+ writeTransaction.merge(YangInstanceIdentifier.builder().node(moduleNode.getNodeType()).build(), moduleNode);
+
+ DOMStoreThreePhaseCommitCohort ready = writeTransaction.ready();
+
+ ready.canCommit().get();
+ ready.preCommit().get();
+ ready.commit().get();
+
+ DOMStoreReadTransaction readTransaction = store.newReadOnlyTransaction();
+
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read = readTransaction.read(YangInstanceIdentifier.builder().build());
+
+ Optional<NormalizedNode<?, ?>> nodeOptional = read.checkedGet();
+
+ return nodeOptional.get();
+ }
+
+ public static NormalizedNode<?,?> findChildWithQName(Collection<NormalizedNode<?, ?>> collection, QName qName) {
+ for(NormalizedNode<?,?> node : collection){
+ if(node.getNodeType().equals(qName)){
+ return node;
+ }
+ }
+
+ return null;
+ }
+
+}
\ No newline at end of file
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AbstractCheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
* A {@link Future} used to report the status of an future {@link java.util.concurrent.Future}.
*/
final class PingPongFuture extends AbstractCheckedFuture<Void, TransactionCommitFailedException> {
- protected PingPongFuture(final ListenableFuture<Void> delegate) {
- super(delegate);
- }
+ protected PingPongFuture(final ListenableFuture<Void> delegate) {
+ super(delegate);
+ }
- @Override
- protected TransactionCommitFailedException mapException(final Exception e) {
- Preconditions.checkArgument(e instanceof TransactionCommitFailedException);
- return (TransactionCommitFailedException) e;
+ @Override
+ protected TransactionCommitFailedException mapException(final Exception e) {
+ if (e.getCause() instanceof TransactionCommitFailedException){
+ return (TransactionCommitFailedException) e.getCause();
+ } else {
+ return new TransactionCommitFailedException(e.getMessage(), e.getCause(), null);
}
+ }
}
+
import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.OrderedNormalizedNodeWriter;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XmlUtils;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
}
private void writeNormalizedRpc(final ContainerNode normalized, final DOMResult result, final SchemaPath schemaPath, final SchemaContext baseNetconfCtx) throws IOException, XMLStreamException {
- final NormalizedNodeWriter normalizedNodeWriter;
+ final OrderedNormalizedNodeWriter normalizedNodeWriter;
NormalizedNodeStreamWriter normalizedNodeStreamWriter = null;
XMLStreamWriter writer = null;
try {
writer = NetconfMessageTransformUtil.XML_FACTORY.createXMLStreamWriter(result);
normalizedNodeStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(writer, baseNetconfCtx, schemaPath);
- normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(normalizedNodeStreamWriter);
-
- for (final DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?> editElement : normalized.getValue()) {
- normalizedNodeWriter.write(editElement);
- }
+ normalizedNodeWriter = new OrderedNormalizedNodeWriter(normalizedNodeStreamWriter, baseNetconfCtx, schemaPath);
+ Collection<DataContainerChild<?, ?>> value = (Collection) normalized.getValue();
+ normalizedNodeWriter.write(value);
normalizedNodeWriter.flush();
} finally {
try {
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
+import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import javax.annotation.Nullable;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.util.OrderedNormalizedNodeWriter;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
final NormalizedNodeStreamWriter nnStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(xmlWriter,
schemaContext.getCurrentContext(), rpcOutputPath);
- final NormalizedNodeWriter nnWriter = NormalizedNodeWriter.forStreamWriter(nnStreamWriter);
+ final OrderedNormalizedNodeWriter nnWriter = new OrderedNormalizedNodeWriter(nnStreamWriter, schemaContext.getCurrentContext(), rpcOutputPath);
writeRootElement(xmlWriter, nnWriter, (ContainerNode) data);
try {
}
}
- private void writeRootElement(final XMLStreamWriter xmlWriter, final NormalizedNodeWriter nnWriter, final ContainerNode data) {
+ private void writeRootElement(final XMLStreamWriter xmlWriter, final OrderedNormalizedNodeWriter nnWriter, final ContainerNode data) {
try {
- for (final DataContainerChild<? extends PathArgument, ?> child : data.getValue()) {
- nnWriter.write(child);
- }
+ Collection<DataContainerChild<?, ?>> value = (Collection) data.getValue();
+ nnWriter.write(value);
nnWriter.flush();
xmlWriter.flush();
} catch (XMLStreamException | IOException e) {
import java.util.List;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
+import javax.xml.transform.TransformerException;
import org.custommonkey.xmlunit.DetailedDiff;
import org.custommonkey.xmlunit.Diff;
import org.custommonkey.xmlunit.XMLUnit;
verifyResponse(response, XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid-control.xml"));
}
+ @Test
+ public void testSuccesfullContainerInvocation() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceSuccesfullInvocation);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-container.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+ verifyResponse(response, XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-container-control.xml"));
+ }
+
@Test
public void testFailedInvocation() throws Exception {
RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceFailedInvocation);
verifyResponse(response, RPC_REPLY_OK);
}
- private void verifyResponse(Document response, Document template) {
+ private void verifyResponse(Document response, Document template) throws IOException, TransformerException {
DetailedDiff dd = new DetailedDiff(new Diff(response, template));
dd.overrideElementQualifier(new RecursiveElementNameAndTextQualifier());
- assertTrue(dd.similar());
+ //we care about order so response has to be identical
+ assertTrue(dd.identical());
}
private RpcDefinition getRpcDefinitionFromModule(Module module, URI namespaceURI, String name) {
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <cont1 xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <test-string>
+ cont1 input string 1
+ </test-string>
+ <test-string2>
+ cont1 input string 2
+ </test-string2>
+ </cont1>
+ <cont2 xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <test-string>
+ cont2 input string 1
+ </test-string>
+ <test-string2>
+ cont2 input string 2
+ </test-string2>
+ </cont2>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <container-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <cont1>
+ <test-string>
+ cont1 input string 1
+ </test-string>
+ <test-string2>
+ cont1 input string 2
+ </test-string2>
+ </cont1>
+ <cont2>
+ <test-string>
+ cont2 input string 1
+ </test-string>
+ <test-string2>
+ cont2 input string 2
+ </test-string2>
+ </cont2>
+ </container-rpc>
+</rpc>
\ No newline at end of file
}
}
}
+
+ rpc container-rpc {
+ input {
+ container cont1 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+
+ container cont2 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+ }
+
+ output {
+ container cont1 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+
+ container cont2 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+ }
+ }
}
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-model-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ </dependency>
</dependencies>
<build>
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util;
+
+import static org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter.UNKNOWN_SIZE;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Iterables;
+import java.io.Closeable;
+import java.io.Flushable;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.OrderedMapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamAttributeWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
+import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+//TODO this does not extend NormalizedNodeWriter from yangtools due to api freeze, make this inherit common methods to avoid code duplication
+//TODO move this to yangtools, since this is in netconf-util due to api freeze in lithium
+public class OrderedNormalizedNodeWriter implements Closeable, Flushable{
+
+ private final SchemaContext schemaContext;
+ private final SchemaNode root;
+ private final NormalizedNodeStreamWriter writer;
+
+ public OrderedNormalizedNodeWriter(NormalizedNodeStreamWriter writer, SchemaContext schemaContext, SchemaPath path) {
+ this.writer = writer;
+ this.schemaContext = schemaContext;
+ this.root = findParentSchemaOnPath(schemaContext, path);
+ }
+
+ public OrderedNormalizedNodeWriter write(final NormalizedNode<?, ?> node) throws IOException {
+ if (root == schemaContext) {
+ return write(node, schemaContext.getDataChildByName(node.getNodeType()));
+ }
+
+ return write(node, root);
+ }
+
+ public OrderedNormalizedNodeWriter write(final Collection<DataContainerChild<?,?>> nodes) throws IOException {
+ if (writeChildren(nodes, root, false)) {
+ return this;
+ }
+
+ throw new IllegalStateException("It wasn't possible to serialize nodes " + nodes);
+
+ }
+
+ private OrderedNormalizedNodeWriter write(NormalizedNode<?, ?> node, SchemaNode dataSchemaNode) throws IOException {
+ if (node == null) {
+ return this;
+ }
+
+ if (wasProcessedAsCompositeNode(node, dataSchemaNode)) {
+ return this;
+ }
+
+ if (wasProcessAsSimpleNode(node)) {
+ return this;
+ }
+
+ throw new IllegalStateException("It wasn't possible to serialize node " + node);
+ }
+
+ private void write(List<NormalizedNode<?, ?>> nodes, SchemaNode dataSchemaNode) throws IOException {
+ for (NormalizedNode<?, ?> node : nodes) {
+ write(node, dataSchemaNode);
+ }
+ }
+
+ private OrderedNormalizedNodeWriter writeLeaf(final NormalizedNode<?, ?> node) throws IOException {
+ if (wasProcessAsSimpleNode(node)) {
+ return this;
+ }
+
+ throw new IllegalStateException("It wasn't possible to serialize node " + node);
+ }
+
+ private boolean writeChildren(final Iterable<? extends NormalizedNode<?, ?>> children, SchemaNode parentSchemaNode, boolean endParent) throws IOException {
+ //Augmentations cannot be gotten with node.getChild so create our own structure with augmentations resolved
+ ArrayListMultimap<QName, NormalizedNode<?, ?>> qNameToNodes = ArrayListMultimap.create();
+ for (NormalizedNode<?, ?> child : children) {
+ if (child instanceof AugmentationNode) {
+ qNameToNodes.putAll(resolveAugmentations(child));
+ } else {
+ qNameToNodes.put(child.getNodeType(), child);
+ }
+ }
+
+ if (parentSchemaNode instanceof DataNodeContainer) {
+ if (parentSchemaNode instanceof ListSchemaNode && qNameToNodes.containsKey(parentSchemaNode.getQName())) {
+ write(qNameToNodes.get(parentSchemaNode.getQName()), parentSchemaNode);
+ } else {
+ for (DataSchemaNode schemaNode : ((DataNodeContainer) parentSchemaNode).getChildNodes()) {
+ write(qNameToNodes.get(schemaNode.getQName()), schemaNode);
+ }
+ }
+ } else if(parentSchemaNode instanceof ChoiceSchemaNode) {
+ for (ChoiceCaseNode ccNode : ((ChoiceSchemaNode) parentSchemaNode).getCases()) {
+ for (DataSchemaNode dsn : ccNode.getChildNodes()) {
+ if (qNameToNodes.containsKey(dsn.getQName())) {
+ write(qNameToNodes.get(dsn.getQName()), dsn);
+ }
+ }
+ }
+ } else {
+ for (NormalizedNode<?, ?> child : children) {
+ writeLeaf(child);
+ }
+ }
+ if (endParent) {
+ writer.endNode();
+ }
+ return true;
+ }
+
+ private ArrayListMultimap<QName, NormalizedNode<?, ?>> resolveAugmentations(NormalizedNode<?, ?> child) {
+ final ArrayListMultimap<QName, NormalizedNode<?, ?>> resolvedAugs = ArrayListMultimap.create();
+ for (NormalizedNode<?, ?> node : ((AugmentationNode) child).getValue()) {
+ if (node instanceof AugmentationNode) {
+ resolvedAugs.putAll(resolveAugmentations(node));
+ } else {
+ resolvedAugs.put(node.getNodeType(), node);
+ }
+ }
+ return resolvedAugs;
+ }
+
+ private boolean writeMapEntryNode(final MapEntryNode node, final SchemaNode dataSchemaNode) throws IOException {
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer)
+ .startMapEntryNode(node.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(node.getValue()), node.getAttributes());
+ } else {
+ writer.startMapEntryNode(node.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(node.getValue()));
+ }
+ return writeChildren(node.getValue(), dataSchemaNode, true);
+ }
+
+ private boolean wasProcessAsSimpleNode(final NormalizedNode<?, ?> node) throws IOException {
+ if (node instanceof LeafSetEntryNode) {
+ final LeafSetEntryNode<?> nodeAsLeafList = (LeafSetEntryNode<?>)node;
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer).leafSetEntryNode(nodeAsLeafList.getValue(), nodeAsLeafList.getAttributes());
+ } else {
+ writer.leafSetEntryNode(nodeAsLeafList.getValue());
+ }
+ return true;
+ } else if (node instanceof LeafNode) {
+ final LeafNode<?> nodeAsLeaf = (LeafNode<?>)node;
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer).leafNode(nodeAsLeaf.getIdentifier(), nodeAsLeaf.getValue(), nodeAsLeaf.getAttributes());
+ } else {
+ writer.leafNode(nodeAsLeaf.getIdentifier(), nodeAsLeaf.getValue());
+ }
+ return true;
+ } else if (node instanceof AnyXmlNode) {
+ final AnyXmlNode anyXmlNode = (AnyXmlNode)node;
+ writer.anyxmlNode(anyXmlNode.getIdentifier(), anyXmlNode.getValue());
+ return true;
+ }
+
+ return false;
+ }
+
+ private boolean wasProcessedAsCompositeNode(final NormalizedNode<?, ?> node, SchemaNode dataSchemaNode) throws IOException {
+ if (node instanceof ContainerNode) {
+ final ContainerNode n = (ContainerNode) node;
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer).startContainerNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()), n.getAttributes());
+ } else {
+ writer.startContainerNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ }
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof MapEntryNode) {
+ return writeMapEntryNode((MapEntryNode) node, dataSchemaNode);
+ }
+ if (node instanceof UnkeyedListEntryNode) {
+ final UnkeyedListEntryNode n = (UnkeyedListEntryNode) node;
+ writer.startUnkeyedListItem(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof ChoiceNode) {
+ final ChoiceNode n = (ChoiceNode) node;
+ writer.startChoiceNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof AugmentationNode) {
+ final AugmentationNode n = (AugmentationNode) node;
+ writer.startAugmentationNode(n.getIdentifier());
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof UnkeyedListNode) {
+ final UnkeyedListNode n = (UnkeyedListNode) node;
+ writer.startUnkeyedList(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof OrderedMapNode) {
+ final OrderedMapNode n = (OrderedMapNode) node;
+ writer.startOrderedMapNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof MapNode) {
+ final MapNode n = (MapNode) node;
+ writer.startMapNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof LeafSetNode) {
+ //covers also OrderedLeafSetNode for which doesn't exist start* method
+ final LeafSetNode<?> n = (LeafSetNode<?>) node;
+ writer.startLeafSet(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+
+ return false;
+ }
+
+ private static final int childSizeHint(final Iterable<?> children) {
+ return (children instanceof Collection) ? ((Collection<?>) children).size() : UNKNOWN_SIZE;
+ }
+
+ //TODO similar code is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private SchemaNode findParentSchemaOnPath(SchemaContext schemaContext, SchemaPath path) {
+ SchemaNode current = Preconditions.checkNotNull(schemaContext);
+ for (final QName qname : path.getPathFromRoot()) {
+ SchemaNode child;
+ if(current instanceof DataNodeContainer) {
+ child = ((DataNodeContainer) current).getDataChildByName(qname);
+
+ if (child == null && current instanceof SchemaContext) {
+ child = tryFindGroupings((SchemaContext) current, qname).orNull();
+ }
+
+ if(child == null && current instanceof SchemaContext) {
+ child = tryFindNotification((SchemaContext) current, qname)
+ .or(tryFindRpc(((SchemaContext) current), qname)).orNull();
+ }
+ } else if (current instanceof ChoiceSchemaNode) {
+ child = ((ChoiceSchemaNode) current).getCaseNodeByName(qname);
+ } else if (current instanceof RpcDefinition) {
+ switch (qname.getLocalName()) {
+ case "input":
+ child = ((RpcDefinition) current).getInput();
+ break;
+ case "output":
+ child = ((RpcDefinition) current).getOutput();
+ break;
+ default:
+ child = null;
+ break;
+ }
+ } else {
+ throw new IllegalArgumentException(String.format("Schema node %s does not allow children.", current));
+ }
+ current = child;
+ }
+ return current;
+ }
+
+ //TODO this method is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private Optional<SchemaNode> tryFindGroupings(final SchemaContext ctx, final QName qname) {
+ return Optional.<SchemaNode> fromNullable(Iterables.find(ctx.getGroupings(), new SchemaNodePredicate(qname), null));
+ }
+
+ //TODO this method is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private Optional<SchemaNode> tryFindRpc(final SchemaContext ctx, final QName qname) {
+ return Optional.<SchemaNode>fromNullable(Iterables.find(ctx.getOperations(), new SchemaNodePredicate(qname), null));
+ }
+
+ //TODO this method is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private Optional<SchemaNode> tryFindNotification(final SchemaContext ctx, final QName qname) {
+ return Optional.<SchemaNode>fromNullable(Iterables.find(ctx.getNotifications(), new SchemaNodePredicate(qname), null));
+ }
+
+ @Override
+ public void flush() throws IOException {
+ writer.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ writer.flush();
+ writer.close();
+ }
+
+ //TODO this class is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private static final class SchemaNodePredicate implements Predicate<SchemaNode> {
+ private final QName qname;
+
+ public SchemaNodePredicate(final QName qname) {
+ this.qname = qname;
+ }
+
+ @Override
+ public boolean apply(final SchemaNode input) {
+ return input.getQName().equals(qname);
+ }
+ }
+}
\ No newline at end of file