</modules>
</profile>
<profile>
- <id>docs</id>
+ <id>docs-java7</id>
<activation>
<activeByDefault>false</activeByDefault>
+ <jdk>1.7</jdk>
</activation>
<modules>
<module>northbound/java-client</module>
package org.opendaylight.controller.topologymanager.internal;
-import org.junit.Assert;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.opendaylight.controller.switchmanager.SwitchConfig;
import org.opendaylight.controller.topologymanager.TopologyUserLinkConfig;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-
public class TopologyManagerImplTest {
private TopologyManagerImpl topoManagerImpl;
}
Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
- // Give TopologyManger time to update its edges DB.
- Thread.sleep(1000);
- Assert.assertEquals(1, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(1);
Assert.assertNotNull(topoManagerImpl.getEdges().get(edge));
}
+ private void verifyEdgesSize(int expSize) throws InterruptedException {
+ int timeout = 5000;
+ for(int i = 0; i < timeout / 50; i++) {
+ if(topoManagerImpl.getEdges().size() == expSize) {
+ return;
+ }
+
+ Thread.sleep(50);
+ }
+
+ Assert.fail(String.format("Expected edges size %d. Actual was %d",
+ topoManagerImpl.getEdges().size(), expSize));
+ }
+
@Test
- public void testNotifyNodeConnector() throws ConstructionException,
- InterruptedException {
+ public void testNotifyNodeConnector() throws Exception {
TestSwitchManager swMgr = new TestSwitchManager();
topoManagerImpl.setSwitchManager(swMgr);
topoManagerImpl.nonClusterObjectCreate();
Map<String, Property> propMap = new HashMap<>();
swMgr.addNodeConnectors(nc1);
topoManagerImpl.notifyNodeConnector(nc1, UpdateType.ADDED, propMap);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
topoManagerImpl.notifyNodeConnector(nc1, UpdateType.CHANGED, propMap);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
swMgr.clear();
topoManagerImpl.notifyNodeConnector(nc1, UpdateType.REMOVED, propMap);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
// Test NodeConnector notification in the case that there is a related
// edge update just before the notification.
swMgr.addNodeConnectors(nc2);
topoManagerImpl.notifyNodeConnector(nc2, UpdateType.CHANGED, propMap);
Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
- // Give TopologyManger time to update its edges DB.
- Thread.sleep(1000);
- Assert.assertEquals(2, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(2);
teu1 = new TopoEdgeUpdate(edge1, props, UpdateType.REMOVED);
teu2 = new TopoEdgeUpdate(edge2, props, UpdateType.REMOVED);
topoedgeupdateList.add(teu2);
topoManagerImpl.edgeUpdate(topoedgeupdateList);
Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
- // Give TopologyManger time to update its edges DB.
- Thread.sleep(1000);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
topoManagerImpl.notifyNodeConnector(nc1, UpdateType.REMOVED, propMap);
topoManagerImpl.notifyNodeConnector(nc2, UpdateType.REMOVED, propMap);
swMgr.addNodeConnectors(nc2);
topoManagerImpl.notifyNodeConnector(nc2, UpdateType.CHANGED, propMap);
Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
- // Give TopologyManger time to update its edges DB.
- Thread.sleep(1000);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
topoManagerImpl.notifyNodeConnector(nc1, UpdateType.REMOVED, propMap);
topoManagerImpl.notifyNodeConnector(nc2, UpdateType.REMOVED, propMap);
Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
- // Give TopologyManger time to update its edges DB.
- Thread.sleep(1000);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
}
}
(configParams.isPresent() ? configParams.get(): new DefaultConfigParamsImpl()),
delegatingPersistenceProvider, LOG);
- context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, delegatingPersistenceProvider, currentBehavior));
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, currentBehavior));
}
@Override
context.getConfigParams().getJournalRecoveryLogBatchSize());
super.preStart();
+
+ snapshotSupport = newRaftActorSnapshotMessageSupport();
}
@Override
}
protected RaftActorRecoverySupport newRaftActorRecoverySupport() {
- return new RaftActorRecoverySupport(delegatingPersistenceProvider, context, currentBehavior,
- getRaftActorRecoveryCohort());
+ return new RaftActorRecoverySupport(context, currentBehavior, getRaftActorRecoveryCohort());
}
protected void initializeBehavior(){
@Override
public void handleCommand(Object message) {
- if(snapshotSupport == null) {
- snapshotSupport = newRaftActorSnapshotMessageSupport();
- }
-
- boolean handled = snapshotSupport.handleSnapshotMessage(message);
- if(handled) {
- return;
- }
-
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
);
} else if(message instanceof GetOnDemandRaftState) {
onGetOnDemandRaftStats();
- } else {
+ } else if(!snapshotSupport.handleSnapshotMessage(message)) {
reusableBehaviorStateHolder.init(getCurrentBehavior());
setCurrentBehavior(currentBehavior.handleMessage(getSender(), message));
}
protected RaftActorSnapshotMessageSupport newRaftActorSnapshotMessageSupport() {
- return new RaftActorSnapshotMessageSupport(delegatingPersistenceProvider, context,
- currentBehavior, getRaftActorSnapshotCohort());
+ return new RaftActorSnapshotMessageSupport(context, currentBehavior,
+ getRaftActorSnapshotCohort());
}
private void onGetOnDemandRaftStats() {
import akka.persistence.RecoveryCompleted;
import akka.persistence.SnapshotOffer;
import com.google.common.base.Stopwatch;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
* @author Thomas Pantelis
*/
class RaftActorRecoverySupport {
- private final DataPersistenceProvider persistence;
private final RaftActorContext context;
private final RaftActorBehavior currentBehavior;
private final RaftActorRecoveryCohort cohort;
private Stopwatch recoveryTimer;
private final Logger log;
- RaftActorRecoverySupport(DataPersistenceProvider persistence, RaftActorContext context,
- RaftActorBehavior currentBehavior, RaftActorRecoveryCohort cohort) {
- this.persistence = persistence;
+ RaftActorRecoverySupport(RaftActorContext context, RaftActorBehavior currentBehavior,
+ RaftActorRecoveryCohort cohort) {
this.context = context;
this.currentBehavior = currentBehavior;
this.cohort = cohort;
boolean handleRecoveryMessage(Object message) {
boolean recoveryComplete = false;
- if(persistence.isRecoveryApplicable()) {
+ if(context.getPersistenceProvider().isRecoveryApplicable()) {
if (message instanceof SnapshotOffer) {
onRecoveredSnapshot((SnapshotOffer) message);
} else if (message instanceof ReplicatedLogEntry) {
// The replicated log can be used later on to retrieve this snapshot
// when we need to install it on a peer
- context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, persistence, currentBehavior));
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, currentBehavior));
context.setLastApplied(snapshot.getLastAppliedIndex());
context.setCommitIndex(snapshot.getLastAppliedIndex());
import akka.japi.Procedure;
import akka.persistence.SaveSnapshotFailure;
import akka.persistence.SaveSnapshotSuccess;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
import org.slf4j.Logger;
class RaftActorSnapshotMessageSupport {
static final String COMMIT_SNAPSHOT = "commit_snapshot";
- private final DataPersistenceProvider persistence;
private final RaftActorContext context;
private final RaftActorBehavior currentBehavior;
private final RaftActorSnapshotCohort cohort;
}
};
- RaftActorSnapshotMessageSupport(DataPersistenceProvider persistence, RaftActorContext context,
- RaftActorBehavior currentBehavior, RaftActorSnapshotCohort cohort) {
- this.persistence = persistence;
+ RaftActorSnapshotMessageSupport(RaftActorContext context, RaftActorBehavior currentBehavior,
+ RaftActorSnapshotCohort cohort) {
this.context = context;
this.currentBehavior = currentBehavior;
this.cohort = cohort;
this.log = context.getLogger();
+
+ context.getSnapshotManager().setCreateSnapshotCallable(createSnapshotProcedure);
}
boolean handleSnapshotMessage(Object message) {
} else if (message instanceof SaveSnapshotFailure) {
onSaveSnapshotFailure((SaveSnapshotFailure) message);
return true;
- } else if (message instanceof CaptureSnapshot) {
- onCaptureSnapshot(message);
- return true;
} else if (message instanceof CaptureSnapshotReply) {
onCaptureSnapshotReply(((CaptureSnapshotReply) message).getSnapshot());
return true;
} else if (message.equals(COMMIT_SNAPSHOT)) {
- context.getSnapshotManager().commit(persistence, -1);
+ context.getSnapshotManager().commit(-1);
return true;
} else {
return false;
private void onCaptureSnapshotReply(byte[] snapshotBytes) {
log.debug("{}: CaptureSnapshotReply received by actor: snapshot size {}", context.getId(), snapshotBytes.length);
- context.getSnapshotManager().persist(persistence, snapshotBytes, currentBehavior, context.getTotalMemory());
- }
-
- private void onCaptureSnapshot(Object message) {
- log.debug("{}: CaptureSnapshot received by actor: {}", context.getId(), message);
-
- context.getSnapshotManager().create(createSnapshotProcedure);
+ context.getSnapshotManager().persist(snapshotBytes, currentBehavior, context.getTotalMemory());
}
private void onSaveSnapshotFailure(SaveSnapshotFailure saveSnapshotFailure) {
long sequenceNumber = success.metadata().sequenceNr();
- context.getSnapshotManager().commit(persistence, sequenceNumber);
+ context.getSnapshotManager().commit(sequenceNumber);
}
private void onApplySnapshot(Snapshot snapshot) {
cohort.applySnapshot(snapshot.getState());
//clears the followers log, sets the snapshot index to ensure adjusted-index works
- context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, persistence,
- currentBehavior));
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, currentBehavior));
context.setLastApplied(snapshot.getLastAppliedIndex());
}
}
import akka.japi.Procedure;
import java.util.Collections;
import java.util.List;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
private long dataSizeSinceLastSnapshot = 0L;
private final RaftActorContext context;
- private final DataPersistenceProvider persistence;
private final RaftActorBehavior currentBehavior;
private final Procedure<DeleteEntries> deleteProcedure = new Procedure<DeleteEntries>() {
};
static ReplicatedLog newInstance(Snapshot snapshot, RaftActorContext context,
- DataPersistenceProvider persistence, RaftActorBehavior currentBehavior) {
+ RaftActorBehavior currentBehavior) {
return new ReplicatedLogImpl(snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
- snapshot.getUnAppliedEntries(), context, persistence, currentBehavior);
+ snapshot.getUnAppliedEntries(), context, currentBehavior);
}
- static ReplicatedLog newInstance(RaftActorContext context,
- DataPersistenceProvider persistence, RaftActorBehavior currentBehavior) {
+ static ReplicatedLog newInstance(RaftActorContext context, RaftActorBehavior currentBehavior) {
return new ReplicatedLogImpl(-1L, -1L, Collections.<ReplicatedLogEntry>emptyList(), context,
- persistence, currentBehavior);
+ currentBehavior);
}
private ReplicatedLogImpl(long snapshotIndex, long snapshotTerm, List<ReplicatedLogEntry> unAppliedEntries,
- RaftActorContext context, DataPersistenceProvider persistence, RaftActorBehavior currentBehavior) {
+ RaftActorContext context, RaftActorBehavior currentBehavior) {
super(snapshotIndex, snapshotTerm, unAppliedEntries);
this.context = context;
- this.persistence = persistence;
this.currentBehavior = currentBehavior;
}
// FIXME: Maybe this should be done after the command is saved
long adjustedIndex = removeFrom(logEntryIndex);
if(adjustedIndex >= 0) {
- persistence.persist(new DeleteEntries(adjustedIndex), deleteProcedure);
+ context.getPersistenceProvider().persist(new DeleteEntries(adjustedIndex), deleteProcedure);
}
}
// persist call and the execution(s) of the associated event
// handler. This also holds for multiple persist calls in context
// of a single command.
- persistence.persist(replicatedLogEntry,
+ context.getPersistenceProvider().persist(replicatedLogEntry,
new Procedure<ReplicatedLogEntry>() {
@Override
public void apply(ReplicatedLogEntry evt) throws Exception {
import akka.japi.Procedure;
import akka.persistence.SnapshotSelectionCriteria;
+import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.ByteString;
import java.util.List;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
public class SnapshotManager implements SnapshotState {
private final SnapshotState IDLE = new Idle();
- private final SnapshotState CAPTURING = new Capturing();
private final SnapshotState PERSISTING = new Persisting();
private final SnapshotState CREATING = new Creating();
private CaptureSnapshot captureSnapshot;
private long lastSequenceNumber = -1;
+ private Procedure<Void> createSnapshotProcedure;
+
public SnapshotManager(RaftActorContext context, Logger logger) {
this.context = context;
this.LOG = logger;
}
@Override
- public void create(Procedure<Void> callback) {
- currentState.create(callback);
+ public void persist(byte[] snapshotBytes, RaftActorBehavior currentBehavior, long totalMemory) {
+ currentState.persist(snapshotBytes, currentBehavior, totalMemory);
}
@Override
- public void persist(DataPersistenceProvider persistenceProvider, byte[] snapshotBytes,
- RaftActorBehavior currentBehavior, long totalMemory) {
- currentState.persist(persistenceProvider, snapshotBytes, currentBehavior, totalMemory);
- }
-
- @Override
- public void commit(DataPersistenceProvider persistenceProvider, long sequenceNumber) {
- currentState.commit(persistenceProvider, sequenceNumber);
+ public void commit(long sequenceNumber) {
+ currentState.commit(sequenceNumber);
}
@Override
return currentState.trimLog(desiredTrimIndex, currentBehavior);
}
+ public void setCreateSnapshotCallable(Procedure<Void> createSnapshotProcedure) {
+ this.createSnapshotProcedure = createSnapshotProcedure;
+ }
+
+ @VisibleForTesting
+ public CaptureSnapshot getCaptureSnapshot() {
+ return captureSnapshot;
+ }
+
private boolean hasFollowers(){
return context.getPeerAddresses().keySet().size() > 0;
}
}
@Override
- public void create(Procedure<Void> callback) {
- LOG.debug("create should not be called in state {}", this);
- }
-
- @Override
- public void persist(DataPersistenceProvider persistenceProvider, byte[] snapshotBytes,
- RaftActorBehavior currentBehavior, long totalMemory) {
+ public void persist(byte[] snapshotBytes, RaftActorBehavior currentBehavior, long totalMemory) {
LOG.debug("persist should not be called in state {}", this);
}
@Override
- public void commit(DataPersistenceProvider persistenceProvider, long sequenceNumber) {
+ public void commit(long sequenceNumber) {
LOG.debug("commit should not be called in state {}", this);
}
lastLogEntry.getTerm(), lastAppliedIndex, lastAppliedTerm,
newReplicatedToAllIndex, newReplicatedToAllTerm, unAppliedEntries, targetFollower != null);
- SnapshotManager.this.currentState = CAPTURING;
-
if(captureSnapshot.isInstallSnapshotInitiated()) {
LOG.info("{}: Initiating snapshot capture {} to install on {}",
persistenceId(), captureSnapshot, targetFollower);
LOG.debug("lastSequenceNumber prior to capture: {}", lastSequenceNumber);
- context.getActor().tell(captureSnapshot, context.getActor());
+ SnapshotManager.this.currentState = CREATING;
+
+ try {
+ createSnapshotProcedure.apply(null);
+ } catch (Exception e) {
+ SnapshotManager.this.currentState = IDLE;
+ LOG.error("Error creating snapshot", e);
+ return false;
+ }
return true;
}
}
}
- private class Capturing extends AbstractSnapshotState {
-
- @Override
- public boolean isCapturing() {
- return true;
- }
-
- @Override
- public void create(Procedure<Void> callback) {
- try {
- callback.apply(null);
- SnapshotManager.this.currentState = CREATING;
- } catch (Exception e) {
- LOG.error("Unexpected error occurred", e);
- }
- }
-
- @Override
- public String toString() {
- return "Capturing";
- }
-
- }
-
private class Creating extends AbstractSnapshotState {
@Override
}
@Override
- public void persist(DataPersistenceProvider persistenceProvider, byte[] snapshotBytes,
- RaftActorBehavior currentBehavior, long totalMemory) {
+ public void persist(byte[] snapshotBytes, RaftActorBehavior currentBehavior, long totalMemory) {
// create a snapshot object from the state provided and save it
// when snapshot is saved async, SaveSnapshotSuccess is raised.
captureSnapshot.getLastIndex(), captureSnapshot.getLastTerm(),
captureSnapshot.getLastAppliedIndex(), captureSnapshot.getLastAppliedTerm());
- persistenceProvider.saveSnapshot(sn);
+ context.getPersistenceProvider().saveSnapshot(sn);
LOG.info("{}: Persisting of snapshot done:{}", persistenceId(), sn.getLogMessage());
private class Persisting extends AbstractSnapshotState {
@Override
- public void commit(DataPersistenceProvider persistenceProvider, long sequenceNumber) {
+ public void commit(long sequenceNumber) {
context.getReplicatedLog().snapshotCommit();
- persistenceProvider.deleteSnapshots(new SnapshotSelectionCriteria(
+ context.getPersistenceProvider().deleteSnapshots(new SnapshotSelectionCriteria(
sequenceNumber - context.getConfigParams().getSnapshotBatchCount(), 43200000));
- persistenceProvider.deleteMessages(lastSequenceNumber);
+ context.getPersistenceProvider().deleteMessages(lastSequenceNumber);
lastSequenceNumber = -1;
SnapshotManager.this.currentState = IDLE;
package org.opendaylight.controller.cluster.raft;
-import akka.japi.Procedure;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
public interface SnapshotState {
*/
boolean captureToInstall(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex, String targetFollower);
- /**
- * Create the snapshot
- *
- * @param callback a procedure to be called which should create the snapshot
- */
- void create(Procedure<Void> callback);
-
/**
* Persist the snapshot
*
- * @param persistenceProvider
* @param snapshotBytes
* @param currentBehavior
* @param totalMemory
*/
- void persist(DataPersistenceProvider persistenceProvider, byte[] snapshotBytes, RaftActorBehavior currentBehavior
- ,long totalMemory);
+ void persist(byte[] snapshotBytes, RaftActorBehavior currentBehavior, long totalMemory);
/**
* Commit the snapshot by trimming the log
*
- * @param persistenceProvider
* @param sequenceNumber
*/
- void commit(DataPersistenceProvider persistenceProvider, long sequenceNumber);
+ void commit(long sequenceNumber);
/**
* Rollback the snapshot
public SnapshotManager getSnapshotManager() {
if(this.snapshotManager == null){
this.snapshotManager = new SnapshotManager(this, getLogger());
+ this.snapshotManager.setCreateSnapshotCallable(NoopProcedure.<Void>instance());
}
return this.snapshotManager;
}
context = new RaftActorContextImpl(null, null, "test", new ElectionTermImpl(mockPersistence, "test", LOG),
-1, -1, Collections.<String,String>emptyMap(), configParams, mockPersistence, LOG);
- support = new RaftActorRecoverySupport(mockPersistence, context , mockBehavior, mockCohort);
+ support = new RaftActorRecoverySupport(context, mockBehavior , mockCohort);
doReturn(true).when(mockPersistence).isRecoveryApplicable();
- context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior));
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, mockBehavior));
}
private void sendMessageToSupport(Object message) {
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.verify;
import akka.actor.ActorRef;
-import akka.japi.Procedure;
import akka.persistence.SaveSnapshotFailure;
import akka.persistence.SaveSnapshotSuccess;
import akka.persistence.SnapshotMetadata;
import java.util.Collections;
import org.junit.Before;
import org.junit.Test;
-import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
import org.slf4j.Logger;
}
};
- support = new RaftActorSnapshotMessageSupport(mockPersistence, context, mockBehavior, mockCohort);
+ support = new RaftActorSnapshotMessageSupport(context, mockBehavior, mockCohort);
doReturn(true).when(mockPersistence).isRecoveryApplicable();
- context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior));
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, mockBehavior));
}
private void sendMessageToSupport(Object message) {
verify(mockCohort).applySnapshot(snapshotBytes);
}
- @SuppressWarnings({ "rawtypes", "unchecked" })
- @Test
- public void testOnCaptureSnapshot() throws Exception {
-
- sendMessageToSupport(new CaptureSnapshot(3, 1, 2, 1, 2, 1, null));
-
- ArgumentCaptor<Procedure> procedure = ArgumentCaptor.forClass(Procedure.class);
- verify(mockSnapshotManager).create(procedure.capture());
-
- procedure.getValue().apply(null);
-
- verify(mockCohort).createSnapshot(same(mockRaftActorRef));
- }
-
@Test
public void testOnCaptureSnapshotReply() {
byte[] snapshot = {1,2,3,4,5};
sendMessageToSupport(new CaptureSnapshotReply(snapshot));
- verify(mockSnapshotManager).persist(same(mockPersistence), same(snapshot), same(mockBehavior), anyLong());
+ verify(mockSnapshotManager).persist(same(snapshot), same(mockBehavior), anyLong());
}
@Test
long sequenceNumber = 100;
sendMessageToSupport(new SaveSnapshotSuccess(new SnapshotMetadata("foo", sequenceNumber, 1234L)));
- verify(mockSnapshotManager).commit(mockPersistence, sequenceNumber);
+ verify(mockSnapshotManager).commit(sequenceNumber);
}
@Test
sendMessageToSupport(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT);
- verify(mockSnapshotManager).commit(mockPersistence, -1);
+ verify(mockSnapshotManager).commit(-1);
}
@Test
Uninterruptibles.sleepUninterruptibly(heartBeatInterval, TimeUnit.MILLISECONDS);
}
+ assertNotNull(matches);
assertEquals(2, matches.size());
// check if the notifier got a role change from null to Follower
new MockRaftActorContext.MockPayload("foo-3"),
new MockRaftActorContext.MockPayload("foo-4")));
- leaderActor.getRaftActorContext().getSnapshotManager().persist(new NonPersistentDataProvider()
- , snapshotBytes.toByteArray(), leader, Runtime.getRuntime().totalMemory());
+ leaderActor.getRaftActorContext().getSnapshotManager().persist(snapshotBytes.toByteArray(),
+ leader, Runtime.getRuntime().totalMemory());
assertFalse(leaderActor.getRaftActorContext().getSnapshotManager().isCapturing());
// The commit is needed to complete the snapshot creation process
- leaderActor.getRaftActorContext().getSnapshotManager().commit(new NonPersistentDataProvider(), -1);
+ leaderActor.getRaftActorContext().getSnapshotManager().commit(-1);
// capture snapshot reply should remove the snapshotted entries only
assertEquals(3, leaderActor.getReplicatedLog().size());
assertFalse(followerActor.getRaftActorContext().getSnapshotManager().isCapturing());
// The commit is needed to complete the snapshot creation process
- followerActor.getRaftActorContext().getSnapshotManager().commit(new NonPersistentDataProvider(), -1);
+ followerActor.getRaftActorContext().getSnapshotManager().commit(-1);
// capture snapshot reply should remove the snapshotted entries only till replicatedToAllIndex
assertEquals(3, followerActor.getReplicatedLog().size()); //indexes 5,6,7 left in the log
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
send2InitialPayloads();
// Block these messages initially so we can control the sequence.
- leaderActor.underlyingActor().startDropMessages(CaptureSnapshot.class);
leaderActor.underlyingActor().startDropMessages(CaptureSnapshotReply.class);
follower1Actor.underlyingActor().startDropMessages(AppendEntries.class);
MessageCollectorActor.expectMatching(follower1CollectorActor, AppendEntries.class, 3);
- CaptureSnapshot captureSnapshot = MessageCollectorActor.expectFirstMatching(
- leaderCollectorActor, CaptureSnapshot.class);
-
- // First, deliver the CaptureSnapshot to the leader.
- leaderActor.underlyingActor().stopDropMessages(CaptureSnapshot.class);
- leaderActor.tell(captureSnapshot, leaderActor);
-
// Send another payload.
MockPayload payload4 = sendPayloadData(leaderActor, "four");
}
@Test
- public void testStatePersistedBetweenInitiateSnapshotAndCapture() {
-
- send2InitialPayloads();
-
- // Block these messages initially so we can control the sequence.
- leaderActor.underlyingActor().startDropMessages(CaptureSnapshot.class);
- follower1Actor.underlyingActor().startDropMessages(AppendEntries.class);
-
- MockPayload payload2 = sendPayloadData(leaderActor, "two");
-
- // This should trigger a snapshot.
- MockPayload payload3 = sendPayloadData(leaderActor, "three");
-
- // Send another payload.
- MockPayload payload4 = sendPayloadData(leaderActor, "four");
-
- MessageCollectorActor.expectMatching(follower1CollectorActor, AppendEntries.class, 3);
-
- CaptureSnapshot captureSnapshot = MessageCollectorActor.expectFirstMatching(
- leaderCollectorActor, CaptureSnapshot.class);
-
- // First, deliver the AppendEntries to the follower
- follower1Actor.underlyingActor().stopDropMessages(AppendEntries.class);
-
- MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyJournalEntries.class, 3);
-
- // Now deliver the CaptureSnapshot to the leader.
- leaderActor.underlyingActor().stopDropMessages(CaptureSnapshot.class);
- leaderActor.tell(captureSnapshot, leaderActor);
-
- // Wait for snapshot complete.
- MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
-
- reinstateLeaderActor();
-
- assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
- assertEquals("Leader snapshot index", 1, leaderContext.getReplicatedLog().getSnapshotIndex());
- assertEquals("Leader journal log size", 3, leaderContext.getReplicatedLog().size());
- assertEquals("Leader journal last index", 4, leaderContext.getReplicatedLog().lastIndex());
- assertEquals("Leader commit index", 4, leaderContext.getCommitIndex());
- assertEquals("Leader last applied", 4, leaderContext.getLastApplied());
-
- // payloads 2, 3, and 4 were applied after the snapshot was initiated and before it was captured so
- // were included in the snapshot. They were also included as unapplied entries in the snapshot as
- // they weren't yet applied to the state at the time the snapshot was initiated. They were applied to the
- // state on recovery by the ApplyJournalEntries messages which remained in the persisted log.
- // This is a side effect of trimming the persisted log to the sequence number captured at the time
- // the snapshot was initiated.
- assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4, payload2,
- payload3, payload4), leaderActor.underlyingActor().getState());
- }
-
- @Test
- public void testApplyJournalEntriesPersistedAfterSnapshotPersisted() {
+ public void testStatePersistedAfterSnapshotPersisted() {
send2InitialPayloads();
@SuppressWarnings("unchecked")
@Test
public void testAppendAndPersistExpectingNoCapture() throws Exception {
- ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior);
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockBehavior);
MockReplicatedLogEntry logEntry = new MockReplicatedLogEntry(1, 1, new MockPayload("1"));
doReturn(1L).when(mockBehavior).getReplicatedToAllIndex();
- ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior);
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockBehavior);
MockReplicatedLogEntry logEntry1 = new MockReplicatedLogEntry(1, 2, new MockPayload("2"));
MockReplicatedLogEntry logEntry2 = new MockReplicatedLogEntry(1, 3, new MockPayload("3"));
}
});
- ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior);
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockBehavior);
int dataSize = 600;
MockReplicatedLogEntry logEntry = new MockReplicatedLogEntry(1, 2, new MockPayload("2", dataSize));
@Test
public void testRemoveFromAndPersist() throws Exception {
- ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockPersistence, mockBehavior);
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockBehavior);
log.append(new MockReplicatedLogEntry(1, 0, new MockPayload("0")));
log.append(new MockReplicatedLogEntry(1, 1, new MockPayload("1")));
import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
expSnapshotState.add(payload6);
// Delay the CaptureSnapshot message to the leader actor.
- leaderActor.underlyingActor().startDropMessages(CaptureSnapshot.class);
+ leaderActor.underlyingActor().startDropMessages(CaptureSnapshotReply.class);
// Send the payload.
payload7 = sendPayloadData(leaderActor, "seven");
- // Capture the CaptureSnapshot message so we can send it later.
- CaptureSnapshot captureSnapshot = MessageCollectorActor.expectFirstMatching(
- leaderCollectorActor, CaptureSnapshot.class);
+ // Capture the CaptureSnapshotReply message so we can send it later.
+ CaptureSnapshotReply captureSnapshotReply = MessageCollectorActor.expectFirstMatching(leaderCollectorActor,
+ CaptureSnapshotReply.class);
// Wait for the state to be applied in the leader.
ApplyState applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
assertEquals("Leader last applied", 7, leaderContext.getLastApplied());
assertEquals("Leader replicatedToAllIndex", 5, leader.getReplicatedToAllIndex());
- // Now deliver the CaptureSnapshot.
- leaderActor.underlyingActor().stopDropMessages(CaptureSnapshot.class);
- leaderActor.tell(captureSnapshot, leaderActor);
-
- // Wait for CaptureSnapshotReply to complete.
- MessageCollectorActor.expectFirstMatching(leaderCollectorActor, CaptureSnapshotReply.class);
+ // Now deliver the CaptureSnapshotReply.
+ leaderActor.underlyingActor().stopDropMessages(CaptureSnapshotReply.class);
+ leaderActor.tell(captureSnapshotReply, leaderActor);
// Wait for snapshot complete.
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
assertEquals("Leader journal last index", 7, leaderContext.getReplicatedLog().lastIndex());
assertEquals("Leader commit index", 7, leaderContext.getCommitIndex());
- expSnapshotState.add(payload7);
-
// Verify the persisted snapshot. This should reflect the snapshot index as the last applied
// log entry (7) and shouldn't contain any unapplied entries as we capture persisted the snapshot data
// when the snapshot is created (ie when the CaptureSnapshot is processed).
assertEquals("Follower 2 journal last index", 7, follower2Context.getReplicatedLog().lastIndex());
assertEquals("Follower 2 commit index", 7, follower2Context.getCommitIndex());
+ expSnapshotState.add(payload7);
+
testLog.info("testSecondSnapshot ending");
}
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
import com.google.common.collect.ImmutableMap;
import java.util.Arrays;
import java.util.HashMap;
-import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
actorRef = factory.createTestActor(MessageCollectorActor.props(), factory.generateActorId("test-"));
doReturn(actorRef).when(mockRaftActorContext).getActor();
+ snapshotManager.setCreateSnapshotCallable(mockProcedure);
}
@After
}
@Test
- public void testCaptureToInstall(){
+ public void testCaptureToInstall() throws Exception {
// Force capturing toInstall = true
snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(1, 0,
assertEquals(true, snapshotManager.isCapturing());
- CaptureSnapshot captureSnapshot = MessageCollectorActor.expectFirstMatching(actorRef, CaptureSnapshot.class);
+ verify(mockProcedure).apply(null);
+
+ CaptureSnapshot captureSnapshot = snapshotManager.getCaptureSnapshot();
// LastIndex and LastTerm are picked up from the lastLogEntry
assertEquals(0L, captureSnapshot.getLastIndex());
}
@Test
- public void testCapture(){
+ public void testCapture() throws Exception {
boolean capture = snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(1,9,
new MockRaftActorContext.MockPayload()), 9);
assertEquals(true, snapshotManager.isCapturing());
- CaptureSnapshot captureSnapshot = MessageCollectorActor.expectFirstMatching(actorRef, CaptureSnapshot.class);
+ verify(mockProcedure).apply(null);
+
+ CaptureSnapshot captureSnapshot = snapshotManager.getCaptureSnapshot();
+
// LastIndex and LastTerm are picked up from the lastLogEntry
assertEquals(9L, captureSnapshot.getLastIndex());
assertEquals(1L, captureSnapshot.getLastTerm());
}
+ @Test
+ public void testCaptureWithCreateProcedureError () throws Exception {
+ doThrow(new Exception("mock")).when(mockProcedure).apply(null);
+
+ boolean capture = snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(1,9,
+ new MockRaftActorContext.MockPayload()), 9);
+
+ assertFalse(capture);
+
+ assertEquals(false, snapshotManager.isCapturing());
+
+ verify(mockProcedure).apply(null);
+ }
+
@Test
public void testIllegalCapture() throws Exception {
boolean capture = snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(1,9,
assertTrue(capture);
- List<CaptureSnapshot> allMatching = MessageCollectorActor.getAllMatching(actorRef, CaptureSnapshot.class);
+ verify(mockProcedure).apply(null);
- assertEquals(1, allMatching.size());
+ reset(mockProcedure);
// This will not cause snapshot capture to start again
capture = snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(1,9,
assertFalse(capture);
- allMatching = MessageCollectorActor.getAllMatching(actorRef, CaptureSnapshot.class);
-
- assertEquals(1, allMatching.size());
+ verify(mockProcedure, never()).apply(null);
}
@Test
// when replicatedToAllIndex = -1
snapshotManager.capture(lastLogEntry, -1);
- snapshotManager.create(mockProcedure);
-
byte[] bytes = new byte[] {1,2,3,4,5,6,7,8,9,10};
- snapshotManager.persist(mockDataPersistenceProvider, bytes, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
+ snapshotManager.persist(bytes, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
ArgumentCaptor<Snapshot> snapshotArgumentCaptor = ArgumentCaptor.forClass(Snapshot.class);
verify(mockDataPersistenceProvider).saveSnapshot(snapshotArgumentCaptor.capture());
verify(mockReplicatedLog).snapshotPreCommit(7L, 1L);
}
-
- @Test
- public void testCreate() throws Exception {
- // when replicatedToAllIndex = -1
- snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(6,9,
- new MockRaftActorContext.MockPayload()), -1);
-
- snapshotManager.create(mockProcedure);
-
- verify(mockProcedure).apply(null);
-
- assertEquals("isCapturing", true, snapshotManager.isCapturing());
- }
-
- @Test
- public void testCallingCreateMultipleTimesCausesNoHarm() throws Exception {
- // when replicatedToAllIndex = -1
- snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(6,9,
- new MockRaftActorContext.MockPayload()), -1);
-
- snapshotManager.create(mockProcedure);
-
- snapshotManager.create(mockProcedure);
-
- verify(mockProcedure, times(1)).apply(null);
- }
-
- @Test
- public void testCallingCreateBeforeCapture() throws Exception {
- snapshotManager.create(mockProcedure);
-
- verify(mockProcedure, times(0)).apply(null);
- }
-
- @Test
- public void testCallingCreateAfterPersist() throws Exception {
- // when replicatedToAllIndex = -1
- snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(6,9,
- new MockRaftActorContext.MockPayload()), -1);
-
- snapshotManager.create(mockProcedure);
-
- verify(mockProcedure, times(1)).apply(null);
-
- snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
-
- reset(mockProcedure);
-
- snapshotManager.create(mockProcedure);
-
- verify(mockProcedure, never()).apply(null);
- }
-
@Test
public void testPersistWhenReplicatedToAllIndexNotMinus(){
doReturn(45L).when(mockReplicatedLog).getSnapshotIndex();
snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(6,9,
new MockRaftActorContext.MockPayload()), 9);
- snapshotManager.create(mockProcedure);
-
byte[] bytes = new byte[] {1,2,3,4,5,6,7,8,9,10};
- snapshotManager.persist(mockDataPersistenceProvider, bytes, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
+ snapshotManager.persist(bytes, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
ArgumentCaptor<Snapshot> snapshotArgumentCaptor = ArgumentCaptor.forClass(Snapshot.class);
verify(mockDataPersistenceProvider).saveSnapshot(snapshotArgumentCaptor.capture());
snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(6,9,
new MockRaftActorContext.MockPayload()), -1);
- snapshotManager.create(mockProcedure);
-
- snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
verify(mockDataPersistenceProvider).saveSnapshot(any(Snapshot.class));
assertTrue(capture);
- snapshotManager.create(mockProcedure);
-
byte[] bytes = new byte[] {1,2,3,4,5,6,7,8,9,10};
- snapshotManager.persist(mockDataPersistenceProvider, bytes, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
+ snapshotManager.persist(bytes, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
verify(mockDataPersistenceProvider).saveSnapshot(any(Snapshot.class));
@Test
public void testCallingPersistWithoutCaptureWillDoNothing(){
- snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
verify(mockDataPersistenceProvider, never()).saveSnapshot(any(Snapshot.class));
snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
new MockRaftActorContext.MockPayload()), -1, "follower-1");
- snapshotManager.create(mockProcedure);
-
- snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
- snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
verify(mockDataPersistenceProvider).saveSnapshot(any(Snapshot.class));
snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
new MockRaftActorContext.MockPayload()), -1, "follower-1");
- snapshotManager.create(mockProcedure);
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
- snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
-
- snapshotManager.commit(mockDataPersistenceProvider, 100L);
+ snapshotManager.commit(100L);
verify(mockReplicatedLog).snapshotCommit();
snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
new MockRaftActorContext.MockPayload()), -1, "follower-1");
- snapshotManager.commit(mockDataPersistenceProvider, 100L);
+ snapshotManager.commit(100L);
verify(mockReplicatedLog, never()).snapshotCommit();
@Test
public void testCommitBeforeCapture(){
- snapshotManager.commit(mockDataPersistenceProvider, 100L);
+ snapshotManager.commit(100L);
verify(mockReplicatedLog, never()).snapshotCommit();
snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
new MockRaftActorContext.MockPayload()), -1, "follower-1");
- snapshotManager.create(mockProcedure);
-
- snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
- snapshotManager.commit(mockDataPersistenceProvider, 100L);
+ snapshotManager.commit(100L);
- snapshotManager.commit(mockDataPersistenceProvider, 100L);
+ snapshotManager.commit(100L);
verify(mockReplicatedLog, times(1)).snapshotCommit();
snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
new MockRaftActorContext.MockPayload()), -1, "follower-1");
- snapshotManager.create(mockProcedure);
-
- snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
snapshotManager.rollback();
snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
new MockRaftActorContext.MockPayload()), -1, "follower-1");
- snapshotManager.create(mockProcedure);
-
- snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
- , Runtime.getRuntime().totalMemory());
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
snapshotManager.rollback();
ApplySnapshot applySnapshot = MessageCollectorActor.expectFirstMatching(followerActor,
ApplySnapshot.class);
Snapshot snapshot = applySnapshot.getSnapshot();
+ assertNotNull(lastInstallSnapshot);
assertEquals("getLastIndex", lastInstallSnapshot.getLastIncludedIndex(), snapshot.getLastIndex());
assertEquals("getLastIncludedTerm", lastInstallSnapshot.getLastIncludedTerm(),
snapshot.getLastAppliedTerm());
assertTrue(raftBehavior instanceof Leader);
- MessageCollectorActor.expectFirstMatching(leaderActor, CaptureSnapshot.class);
+ assertEquals("isCapturing", true, actorContext.getSnapshotManager().isCapturing());
}
@Test
leader.handleMessage(leaderActor, new Replicate(null, "state-id", entry));
- CaptureSnapshot cs = MessageCollectorActor.expectFirstMatching(leaderActor, CaptureSnapshot.class);
+ assertEquals("isCapturing", true, actorContext.getSnapshotManager().isCapturing());
+
+ CaptureSnapshot cs = actorContext.getSnapshotManager().getCaptureSnapshot();
assertTrue(cs.isInstallSnapshotInitiated());
assertEquals(3, cs.getLastAppliedIndex());
// if an initiate is started again when first is in progress, it shouldnt initiate Capture
leader.handleMessage(leaderActor, new Replicate(null, "state-id", entry));
- List<CaptureSnapshot> captureSnapshots = MessageCollectorActor.getAllMatching(leaderActor, CaptureSnapshot.class);
- assertEquals("CaptureSnapshot should not get invoked when initiate is in progress", 1, captureSnapshots.size());
+ Assert.assertSame("CaptureSnapshot instance", cs, actorContext.getSnapshotManager().getCaptureSnapshot());
}
@Test
* @see AsyncDataBroker
* @see TransactionChainFactory
*/
-public interface DataBroker extends TransactionFactory, AsyncDataBroker<InstanceIdentifier<?>, DataObject, DataChangeListener>, BindingService, TransactionChainFactory<InstanceIdentifier<?>, DataObject> {
+public interface DataBroker extends AsyncDataBroker<InstanceIdentifier<?>, DataObject, DataChangeListener>,
+ TransactionChainFactory<InstanceIdentifier<?>, DataObject>, TransactionFactory, BindingService, DataTreeChangeService {
/**
* {@inheritDoc}
*/
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.ModuleIdentifier;
-import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationServiceAdapter;
+import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
import org.opendaylight.controller.sal.core.api.Broker;
public class BindingNotificationAdapterModule extends AbstractBindingNotificationAdapterModule {
- public BindingNotificationAdapterModule(ModuleIdentifier identifier, DependencyResolver dependencyResolver) {
+ public BindingNotificationAdapterModule(final ModuleIdentifier identifier, final DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
- public BindingNotificationAdapterModule(ModuleIdentifier identifier, DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.md.sal.binding.impl.BindingNotificationAdapterModule oldModule, java.lang.AutoCloseable oldInstance) {
+ public BindingNotificationAdapterModule(final ModuleIdentifier identifier, final DependencyResolver dependencyResolver, final org.opendaylight.controller.config.yang.md.sal.binding.impl.BindingNotificationAdapterModule oldModule, final java.lang.AutoCloseable oldInstance) {
super(identifier, dependencyResolver, oldModule, oldInstance);
}
final BindingToNormalizedNodeCodec codec = getBindingMappingServiceDependency();
final Broker.ProviderSession session = getDomAsyncBrokerDependency().registerProvider(new DummyDOMProvider());
final DOMNotificationService notifService = session.getService(DOMNotificationService.class);
- return new BindingDOMNotificationServiceAdapter(codec.getCodecRegistry(), notifService, SingletonHolder.INVOKER_FACTORY);
+ return new BindingDOMNotificationServiceAdapter(codec.getCodecRegistry(), notifService);
}
}
*/
package org.opendaylight.controller.config.yang.md.sal.binding.impl;
-import com.google.common.util.concurrent.ListeningExecutorService;
+import org.opendaylight.controller.md.sal.binding.compat.HydrogenNotificationBrokerImpl;
+
+import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
+import org.opendaylight.controller.md.sal.binding.api.NotificationService;
+import org.opendaylight.controller.md.sal.binding.compat.HeliumNotificationProviderServiceAdapter;
import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
-import org.opendaylight.controller.sal.binding.impl.NotificationBrokerImpl;
/**
*
public final class NotificationBrokerImplModule extends
org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractNotificationBrokerImplModule {
- public NotificationBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier,
- org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ public NotificationBrokerImplModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier,
+ final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
- public NotificationBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier,
- org.opendaylight.controller.config.api.DependencyResolver dependencyResolver,
- NotificationBrokerImplModule oldModule, java.lang.AutoCloseable oldInstance) {
+ public NotificationBrokerImplModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier,
+ final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver,
+ final NotificationBrokerImplModule oldModule, final java.lang.AutoCloseable oldInstance) {
super(identifier, dependencyResolver, oldModule, oldInstance);
}
@Override
public java.lang.AutoCloseable createInstance() {
+ final NotificationPublishService notificationPublishService = getNotificationPublishAdapterDependency();
+ final NotificationService notificationService = getNotificationAdapterDependency();
+
+ if(notificationPublishService != null & notificationService != null) {
+ return new HeliumNotificationProviderServiceAdapter(notificationPublishService, notificationService);
+ }
+
/*
* FIXME: Switch to new broker (which has different threading model)
* once this change is communicated with downstream users or
* we will have adapter implementation which will honor Helium
* threading model for notifications.
*/
- ListeningExecutorService listeningExecutor = SingletonHolder.getDefaultNotificationExecutor();
- NotificationBrokerImpl broker = new NotificationBrokerImpl(listeningExecutor);
- return broker;
+
+ return new HydrogenNotificationBrokerImpl(SingletonHolder.getDefaultNotificationExecutor());
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
import org.opendaylight.controller.sal.binding.api.NotificationListener;
import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
+import com.google.common.base.Preconditions;
import org.opendaylight.controller.sal.binding.api.NotificationListener;
import org.opendaylight.yangtools.yang.binding.Notification;
-import com.google.common.base.Preconditions;
-
/**
* An aggregated listener registration. This is a result of registering an invoker which can handle multiple
* interfaces at the same time. In order to support correct delivery, we need to maintain per-type registrations
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicReference;
-
import javax.annotation.concurrent.GuardedBy;
-
import org.opendaylight.controller.sal.binding.api.NotificationListener;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory.NotificationInvoker;
import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.util.ListenerRegistry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimap;
-
-public class NotificationBrokerImpl implements NotificationProviderService, AutoCloseable {
- private static final Logger LOG = LoggerFactory.getLogger(NotificationBrokerImpl.class);
+@Deprecated
+public class HydrogenNotificationBrokerImpl implements NotificationProviderService, AutoCloseable {
+ private static final Logger LOG = LoggerFactory.getLogger(HydrogenNotificationBrokerImpl.class);
private final ListenerRegistry<NotificationInterestListener> interestListeners =
ListenerRegistry.create();
private final AtomicReference<ListenerMapGeneration> listeners = new AtomicReference<>(new ListenerMapGeneration());
private final ExecutorService executor;
- public NotificationBrokerImpl(final ExecutorService executor) {
+ public HydrogenNotificationBrokerImpl(final ExecutorService executor) {
this.executor = Preconditions.checkNotNull(executor);
}
@Override
public void publish(final Notification notification, final ExecutorService service) {
- for (NotificationListenerRegistration<?> r : listeners.get().listenersFor(notification)) {
+ for (final NotificationListenerRegistration<?> r : listeners.get().listenersFor(notification)) {
service.submit(new NotifyTask(r, notification));
}
}
synchronized (this) {
final Multimap<Class<? extends Notification>, NotificationListenerRegistration<?>> newListeners =
mutableListeners();
- for (NotificationListenerRegistration<?> reg : registrations) {
+ for (final NotificationListenerRegistration<?> reg : registrations) {
newListeners.put(reg.getType(), reg);
}
}
// Notifications are dispatched out of lock...
- for (NotificationListenerRegistration<?> reg : registrations) {
+ for (final NotificationListenerRegistration<?> reg : registrations) {
announceNotificationSubscription(reg.getType());
}
}
final Multimap<Class<? extends Notification>, NotificationListenerRegistration<?>> newListeners =
mutableListeners();
- for (NotificationListenerRegistration<?> reg : registrations) {
+ for (final NotificationListenerRegistration<?> reg : registrations) {
newListeners.remove(reg.getType(), reg);
}
for (final ListenerRegistration<NotificationInterestListener> listener : interestListeners) {
try {
listener.getInstance().onNotificationSubscribtion(notification);
- } catch (Exception e) {
+ } catch (final Exception e) {
LOG.warn("Listener {} reported unexpected error on notification {}",
listener.getInstance(), notification, e);
}
@Override
public ListenerRegistration<org.opendaylight.yangtools.yang.binding.NotificationListener> registerNotificationListener(final org.opendaylight.yangtools.yang.binding.NotificationListener listener) {
- final NotificationInvoker invoker = SingletonHolder.INVOKER_FACTORY.invokerFor(listener);
+ final NotificationInvoker invoker = NotificationInvoker.invokerFor(listener);
final Set<Class<? extends Notification>> types = invoker.getSupportedNotifications();
final NotificationListenerRegistration<?>[] regs = new NotificationListenerRegistration<?>[types.size()];
// Populate the registrations...
int i = 0;
- for (Class<? extends Notification> type : types) {
- regs[i] = new AggregatedNotificationListenerRegistration<Notification, Object>(type, invoker.getInvocationProxy(), regs) {
+ for (final Class<? extends Notification> type : types) {
+ regs[i] = new AggregatedNotificationListenerRegistration<Notification, Object>(type, invoker, regs) {
@Override
protected void removeRegistration() {
// Nothing to do, will be cleaned up by parent (below)
@Override
protected void removeRegistration() {
removeRegistrations(regs);
- for (ListenerRegistration<?> reg : regs) {
+ for (final ListenerRegistration<?> reg : regs) {
reg.close();
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
import java.util.Arrays;
import java.util.Collection;
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.compat;
+
+import com.google.common.collect.ImmutableMap;
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.opendaylight.yangtools.yang.binding.Notification;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
+import org.opendaylight.yangtools.yang.binding.util.NotificationListenerInvoker;
+import org.opendaylight.yangtools.yang.common.QName;
+
+final class NotificationInvoker implements org.opendaylight.controller.sal.binding.api.NotificationListener<Notification> {
+
+ private final NotificationListener delegate;
+ private final Map<Class<? extends Notification>,InvokerContext> invokers;
+
+
+ private NotificationInvoker(final NotificationListener listener) {
+ delegate = listener;
+ final Map<Class<? extends Notification>, InvokerContext> builder = new HashMap<>();
+ for(final Class<?> iface : listener.getClass().getInterfaces()) {
+ if(NotificationListener.class.isAssignableFrom(iface) && BindingReflections.isBindingClass(iface)) {
+ @SuppressWarnings("unchecked")
+ final Class<? extends NotificationListener> listenerType = (Class<? extends NotificationListener>) iface;
+ final NotificationListenerInvoker invoker = NotificationListenerInvoker.from(listenerType);
+ for(final Class<? extends Notification> type : getNotificationTypes(listenerType)) {
+ builder.put(type, new InvokerContext(BindingReflections.findQName(type) , invoker));
+ }
+ }
+ }
+ invokers = ImmutableMap.copyOf(builder);
+ }
+
+ public static NotificationInvoker invokerFor(final NotificationListener listener) {
+ return new NotificationInvoker(listener);
+ }
+
+ public Set<Class<? extends Notification>> getSupportedNotifications() {
+ return invokers.keySet();
+ }
+
+ @Override
+ public void onNotification(final Notification notification) {
+ getContext(notification.getImplementedInterface()).invoke(notification);
+ };
+
+ private InvokerContext getContext(final Class<?> type) {
+ return invokers.get(type);
+ }
+
+ @SuppressWarnings("unchecked")
+ private static Set<Class<? extends Notification>> getNotificationTypes(final Class<? extends org.opendaylight.yangtools.yang.binding.NotificationListener> type) {
+ // TODO: Investigate possibility and performance impact if we cache this or expose
+ // it from NotificationListenerInvoker
+ final Set<Class<? extends Notification>> ret = new HashSet<>();
+ for(final Method method : type.getMethods()) {
+ if(BindingReflections.isNotificationCallback(method)) {
+ final Class<? extends Notification> notification = (Class<? extends Notification>) method.getParameterTypes()[0];
+ ret.add(notification);
+ }
+ }
+ return ret;
+ }
+
+ private class InvokerContext {
+
+ private final QName name;
+ private final NotificationListenerInvoker invoker;
+
+ private InvokerContext(final QName name, final NotificationListenerInvoker invoker) {
+ this.name = name;
+ this.invoker = invoker;
+ }
+
+ public void invoke(final Notification notification) {
+ invoker.invokeNotification(delegate, name, notification);
+ }
+
+ }
+
+}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
import org.opendaylight.controller.sal.binding.api.NotificationListener;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
import org.opendaylight.yangtools.yang.binding.Notification;
import org.slf4j.Logger;
return invokers.keySet();
}
- private static Map<SchemaPath, NotificationListenerInvoker> createInvokerMapFor(final Class<? extends NotificationListener> implClz) {
+ public static Map<SchemaPath, NotificationListenerInvoker> createInvokerMapFor(final Class<? extends NotificationListener> implClz) {
final Map<SchemaPath, NotificationListenerInvoker> builder = new HashMap<>();
for(final Class<?> iface : implClz.getInterfaces()) {
if(NotificationListener.class.isAssignableFrom(iface) && BindingReflections.isBindingClass(iface)) {
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
import org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer;
import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
private final BindingNormalizedNodeSerializer codec;
private final DOMNotificationService domNotifService;
- public BindingDOMNotificationServiceAdapter(final BindingNormalizedNodeSerializer codec, final DOMNotificationService domNotifService, final NotificationInvokerFactory notificationInvokerFactory) {
+ public BindingDOMNotificationServiceAdapter(final BindingNormalizedNodeSerializer codec, final DOMNotificationService domNotifService) {
this.codec = codec;
this.domNotifService = domNotifService;
}
protected NotificationService createInstance(final BindingToNormalizedNodeCodec codec,
final ClassToInstanceMap<DOMService> delegates) {
final DOMNotificationService domNotification = delegates.getInstance(DOMNotificationService.class);
- final NotificationInvokerFactory invokerFactory = SingletonHolder.INVOKER_FACTORY;
- return new BindingDOMNotificationServiceAdapter(codec.getCodecRegistry(), domNotification, invokerFactory);
+ return new BindingDOMNotificationServiceAdapter(codec.getCodecRegistry(), domNotification);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Exception is raised when supplied Bidning Aware
- * RPCService class is not routed and was used in context
- * where routed RPCs should only be used.
- *
- */
-public class RpcIsNotRoutedException extends IllegalStateException {
-
- private static final long serialVersionUID = 1L;
-
- public RpcIsNotRoutedException(final String message, final Throwable cause) {
- super(Preconditions.checkNotNull(message), cause);
- }
-
- public RpcIsNotRoutedException(final String message) {
- super(Preconditions.checkNotNull(message));
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen;
-
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public interface RuntimeCodeGenerator {
-
- /**
- * Returns an instance of provided RpcService type which delegates all calls
- * to the delegate.
- *
- * <p>
- * Returned instance:
- * <ul>
- * <li>implements provided subclass of RpcService type and
- * {@link org.opendaylight.controller.sal.binding.spi.DelegateProxy} interface.
- * <li>
- * <p>
- * delegates all invocations of methods, which are defined in RpcService
- * subtype to delegate which is defined by
- * {@link org.opendaylight.controller.sal.binding.spi.DelegateProxy#setDelegate(Object)}.
- * <p>
- * If delegate is not defined (<code>getDelegate() == null</code>)
- * implementation throws {@link IllegalStateException}
- * <li>{@link org.opendaylight.controller.sal.binding.spi.DelegateProxy#getDelegate()} - returns the delegate to which
- * all calls are delegated.
- * <li>{@link org.opendaylight.controller.sal.binding.spi.DelegateProxy#setDelegate(Object)} - sets the delegate for
- * particular instance
- *
- * </ul>
- *
- * @param serviceType
- * - Subclass of RpcService for which direct proxy is to be
- * generated.
- * @return Instance of RpcService of provided serviceType which implements
- * and {@link org.opendaylight.controller.sal.binding.spi.DelegateProxy}
- * @throws IllegalArgumentException
- *
- */
- <T extends RpcService> T getDirectProxyFor(Class<T> serviceType) throws IllegalArgumentException;
-
- /**
- * Returns an instance of provided RpcService type which routes all calls to
- * other instances selected on particular input field.
- *
- * <p>
- * Returned instance:
- * <ul>
- * <li>Implements:
- * <ul>
- * <li>{@link org.opendaylight.controller.sal.binding.spi.DelegateProxy}
- * <li>{@link RpcRouter}
- * </ul>
- * <li>
- * routes all invocations of methods, which are defined in RpcService
- * subtype based on method arguments and routing information defined in the
- * RpcRoutingTables for this instance
- * {@link RpcRouter#getRoutingTable(Class)}.
- * <ul>
- * <li>
- * Implementation uses
- * {@link RpcRouter#getService(Class, org.opendaylight.yangtools.yang.binding.InstanceIdentifier)} method to
- * retrieve particular instance to which call will be routed.
- * <li>
- * Instance of {@link org.opendaylight.yangtools.yang.binding.InstanceIdentifier} is determined by first argument of
- * method and is retrieved via method which is annotated with
- * {@link org.opendaylight.yangtools.yang.binding.annotations.RoutingContext}.
- * Class representing Routing Context Identifier is retrieved by a
- * {@link org.opendaylight.yangtools.yang.binding.annotations.RoutingContext}.
- * <li>If first argument is not defined / {@link org.opendaylight.yangtools.yang.binding.annotations.RoutingContext} annotation
- * is not present on any field invocation will be delegated to default
- * service {@link RpcRouter#getDefaultService()}.
- * </ul>
- *
- * @param serviceType
- * - Subclass of RpcService for which Router is to be generated.
- * @return Instance of RpcService of provided serviceType which implements
- * also {@link RpcRouter}<T> and {@link org.opendaylight.controller.sal.binding.spi.DelegateProxy}
- * @throws RpcIsNotRoutedException
- */
- <T extends RpcService> RpcRouter<T> getRouterFor(Class<T> serviceType,String name) throws IllegalArgumentException, RpcIsNotRoutedException;
-
- NotificationInvokerFactory getInvokerFactory();
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen;
-
-import java.lang.reflect.Field;
-import java.util.Map;
-
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public final class RuntimeCodeHelper {
- private RuntimeCodeHelper() {
- throw new UnsupportedOperationException("Utility class should never be instantiated");
- }
-
- private static Field getField(final Class<?> cls, final String name) {
- try {
- return cls.getField(name);
- } catch (NoSuchFieldException e) {
- throw new IllegalArgumentException(
- String.format("Class %s is missing field %s", cls, name), e);
- } catch (SecurityException e) {
- throw new IllegalStateException(String.format("Failed to examine class %s", cls), e);
- }
- }
-
- private static Field getDelegateField(final Class<?> cls) {
- return getField(cls, RuntimeCodeSpecification.DELEGATE_FIELD);
- }
-
- private static Object getFieldValue(final Field field, final Object obj) {
- try {
- return field.get(obj);
- } catch (IllegalAccessException e) {
- throw new IllegalStateException(String.format("Failed to get field %s of object %s", field, obj), e);
- }
- }
-
- private static void setFieldValue(final Field field, final Object obj, final Object value) {
- try {
- field.set(obj, value);
- } catch (IllegalAccessException e) {
- throw new IllegalStateException(String.format("Failed to set field %s to %s", field, value), e);
- }
- }
-
- /**
- * Helper method to return delegate from ManagedDirectedProxy with use of reflection.
- *
- * Note: This method uses reflection, but access to delegate field should be
- * avoided and called only if necessary.
- */
- @SuppressWarnings("unchecked")
- public static <T extends RpcService> T getDelegate(final RpcService proxy) {
- return (T)getFieldValue(getDelegateField(proxy.getClass()), proxy);
- }
-
- /**
- * Helper method to set delegate to ManagedDirectedProxy with use of reflection.
- *
- * Note: This method uses reflection, but setting delegate field should not occur too much
- * to introduce any significant performance hits.
- */
- public static void setDelegate(final Object proxy, final Object delegate) {
- final Field field = getDelegateField(proxy.getClass());
-
- if (delegate != null) {
- final Class<?> ft = field.getType();
- if (!ft.isAssignableFrom(delegate.getClass())) {
- throw new IllegalArgumentException(
- String.format("Field %s type %s is not compatible with delegate type %s",
- field, ft, delegate.getClass()));
- }
- }
-
- setFieldValue(field, proxy, delegate);
- }
-
- @SuppressWarnings("unchecked")
- public static Map<InstanceIdentifier<? extends Object>,? extends RpcService> getRoutingTable(final RpcService target, final Class<? extends BaseIdentity> tableClass) {
- final Field field = getField(target.getClass(), RuntimeCodeSpecification.getRoutingTableField(tableClass));
- return (Map<InstanceIdentifier<? extends Object>,? extends RpcService>) getFieldValue(field, target);
- }
-
- public static void setRoutingTable(final RpcService target, final Class<? extends BaseIdentity> tableClass, final Map<InstanceIdentifier<? extends Object>,? extends RpcService> routingTable) {
- final Field field = getField(target.getClass(), RuntimeCodeSpecification.getRoutingTableField(tableClass));
- setFieldValue(field, target, routingTable);
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen;
-
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public final class RuntimeCodeSpecification {
- public final static String DIRECT_PROXY_SUFFIX = "DirectProxy";
- public final static String INVOKER_SUFFIX = "ListenerInvoker";
- public final static String ROUTER_SUFFIX = "Router";
-
- public final static String DELEGATE_FIELD = "_delegate";
- public final static String ROUTING_TABLE_FIELD_PREFIX = "_routes_";
-
- private RuntimeCodeSpecification() {
- throw new UnsupportedOperationException("Utility class");
- }
-
- /**
- * Returns a name for generated interface
- */
- private static String getGeneratedName(final Class<? extends Object> cls, final String suffix) {
- return cls.getName() + "$$Broker$" + suffix;
- }
-
- public static String getInvokerName(final Class<? extends NotificationListener> listener) {
- return getGeneratedName(listener, RuntimeCodeSpecification.INVOKER_SUFFIX);
- }
-
- /**
- * Returns a name for DirectProxy implementation
- */
- public static String getDirectProxyName(final Class<? extends RpcService> base) {
- return getGeneratedName(base, RuntimeCodeSpecification.DIRECT_PROXY_SUFFIX);
- }
-
- /**
- * Returns a name for Router implementation
- */
- public static String getRouterName(final Class<? extends RpcService> base) {
- return getGeneratedName(base, RuntimeCodeSpecification.ROUTER_SUFFIX);
- }
-
- /**
- * Returns a field name for specified routing context
- */
- public static String getRoutingTableField(final Class<? extends BaseIdentity> routingContext) {
- return "_routes_" + routingContext.getSimpleName();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import com.google.common.base.Supplier;
-import com.google.common.collect.Iterables;
-import java.util.Map;
-import java.util.WeakHashMap;
-import javassist.ClassPool;
-import javassist.CtClass;
-import javassist.CtMethod;
-import javassist.NotFoundException;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
-import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
-import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
-import org.opendaylight.yangtools.util.ClassLoaderUtils;
-import org.opendaylight.yangtools.yang.binding.BindingMapping;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.binding.annotations.RoutingContext;
-
-abstract class AbstractRuntimeCodeGenerator implements org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator, NotificationInvokerFactory {
- @GuardedBy("this")
- private final Map<Class<? extends NotificationListener>, RuntimeGeneratedInvokerPrototype> invokerClasses = new WeakHashMap<>();
- private final CtClass brokerNotificationListener;
- protected final JavassistUtils utils;
-
- protected AbstractRuntimeCodeGenerator(final ClassPool pool) {
- utils = JavassistUtils.forClassPool(pool);
-
- /*
- * Make sure Javassist ClassPool sees the classloader of RpcService
- */
- utils.ensureClassLoader(RpcService.class);
-
- brokerNotificationListener = utils.asCtClass(org.opendaylight.controller.sal.binding.api.NotificationListener.class);
- }
-
- protected final CtClass getBrokerNotificationListener() {
- return brokerNotificationListener;
- }
-
- protected abstract RuntimeGeneratedInvokerPrototype generateListenerInvoker(Class<? extends NotificationListener> cls);
- protected abstract <T extends RpcService> Supplier<T> directProxySupplier(final Class<T> serviceType);
- protected abstract <T extends RpcService> Supplier<T> routerSupplier(final Class<T> serviceType, RpcServiceMetadata metadata);
-
- private RpcServiceMetadata getRpcMetadata(final CtClass iface) throws ClassNotFoundException, NotFoundException, RpcIsNotRoutedException {
- final RpcServiceMetadata metadata = new RpcServiceMetadata();
-
- for (CtMethod method : iface.getMethods()) {
- if (isRpcMethodWithInput(iface, method)) {
- final RpcMetadata routingPair = getRpcMetadata(method);
- if (routingPair != null) {
- metadata.addContext(routingPair.getContext());
- metadata.addRpcMethod(method.getName(), routingPair);
-
- /*
- * Force-load the RPC class representing the "input" of this RPC.
- *
- * FIXME: this is pre-existing side-effect of the original code, which
- * kept a reference to the loaded class, but it did not use it.
- *
- * There was no explanation as to why forcing this load was
- * necessary. As far as I can tell now is that it forces the
- * resolution of method arguments, which would (according to
- * my reading of JLS) occur only when the method is invoked via
- * binding-aware class action, not when coming from
- * binding-independent world. Whether that makes sense or not,
- * remains to be investigated.
- */
- Thread.currentThread().getContextClassLoader().loadClass(routingPair.getInputType().getName());
- } else {
- throw new RpcIsNotRoutedException(String.format("RPC %s from %s is not routed", method.getName(), iface.getName()));
- }
- }
- }
-
- return metadata;
- }
-
-
- private boolean isRpcMethodWithInput(final CtClass iface, final CtMethod method) throws NotFoundException {
- if(iface.equals(method.getDeclaringClass())
- && method.getParameterTypes().length == 1) {
- final CtClass onlyArg = method.getParameterTypes()[0];
- if(onlyArg.isInterface() && onlyArg.getName().endsWith(BindingMapping.RPC_INPUT_SUFFIX)) {
- return true;
- }
- }
- return false;
- }
-
- private RpcMetadata getRpcMetadata(final CtMethod method) throws NotFoundException {
- final CtClass inputClass = method.getParameterTypes()[0];
- return rpcMethodMetadata(inputClass, inputClass, method.getName());
- }
-
- private RpcMetadata rpcMethodMetadata(final CtClass dataClass, final CtClass inputClass, final String rpcMethod) throws NotFoundException {
- for (CtMethod method : dataClass.getMethods()) {
- if (method.getName().startsWith("get") && method.getParameterTypes().length == 0) {
- for (Object annotation : method.getAvailableAnnotations()) {
- if (annotation instanceof RoutingContext) {
- boolean encapsulated = !method.getReturnType().equals(utils.asCtClass(InstanceIdentifier.class));
- return new RpcMetadata(rpcMethod, ((RoutingContext)annotation).value(), method, encapsulated, inputClass);
- }
- }
- }
- }
-
- for (CtClass iface : dataClass.getInterfaces()) {
- final RpcMetadata ret = rpcMethodMetadata(iface, inputClass, rpcMethod);
- if(ret != null) {
- return ret;
- }
- }
- return null;
- }
-
- private synchronized RuntimeGeneratedInvokerPrototype resolveInvokerClass(final Class<? extends NotificationListener> cls) {
- RuntimeGeneratedInvokerPrototype invoker = invokerClasses.get(cls);
- if (invoker != null) {
- return invoker;
- }
-
- synchronized (utils) {
- invoker = ClassLoaderUtils.withClassLoader(cls.getClassLoader(), new Supplier<RuntimeGeneratedInvokerPrototype>() {
- @Override
- public RuntimeGeneratedInvokerPrototype get() {
- return generateListenerInvoker(cls);
- }
- });
- }
-
- invokerClasses.put(cls, invoker);
- return invoker;
- }
-
- @Override
- public final NotificationInvokerFactory getInvokerFactory() {
- return this;
- }
-
- @Override
- public final <T extends RpcService> T getDirectProxyFor(final Class<T> serviceType) {
- synchronized (utils) {
- return ClassLoaderUtils.withClassLoader(serviceType.getClassLoader(), directProxySupplier(serviceType));
- }
- }
-
- @Override
- public final <T extends RpcService> RpcRouter<T> getRouterFor(final Class<T> serviceType, final String name) throws RpcIsNotRoutedException {
- final RpcServiceMetadata metadata = ClassLoaderUtils.withClassLoader(serviceType.getClassLoader(), new Supplier<RpcServiceMetadata>() {
- @Override
- public RpcServiceMetadata get() {
- try {
- return getRpcMetadata(utils.asCtClass(serviceType));
- } catch (ClassNotFoundException | NotFoundException e) {
- throw new IllegalStateException(String.format("Failed to load metadata for class %s", serviceType), e);
- }
- }
- });
-
- if (Iterables.isEmpty(metadata.getContexts())) {
- throw new RpcIsNotRoutedException("Service doesn't have routing context associated.");
- }
-
- synchronized (utils) {
- final T instance = ClassLoaderUtils.withClassLoader(serviceType.getClassLoader(), routerSupplier(serviceType, metadata));
- return new RpcRouterCodegenInstance<T>(name, serviceType, instance, metadata.getContexts());
- }
- }
-
- @Override
- public NotificationInvoker invokerFor(final NotificationListener instance) {
- final Class<? extends NotificationListener> cls = instance.getClass();
- final RuntimeGeneratedInvokerPrototype prototype = resolveInvokerClass(cls);
-
- try {
- return RuntimeGeneratedInvoker.create(instance, prototype);
- } catch (InstantiationException | IllegalAccessException e) {
- throw new IllegalStateException(String.format("Failed to create invoker for %s", instance), e);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import com.google.common.base.Supplier;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSet.Builder;
-import java.lang.reflect.Method;
-import java.util.Map;
-import javassist.CannotCompileException;
-import javassist.ClassPool;
-import javassist.CtClass;
-import javassist.CtMethod;
-import javassist.NotFoundException;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeSpecification;
-import org.opendaylight.yangtools.sal.binding.generator.util.ClassGenerator;
-import org.opendaylight.yangtools.sal.binding.generator.util.MethodGenerator;
-import org.opendaylight.yangtools.util.ClassLoaderUtils;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.RpcImplementation;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
-
-final class DefaultRuntimeCodeGenerator extends AbstractRuntimeCodeGenerator {
-
- DefaultRuntimeCodeGenerator(final ClassPool pool) {
- super(pool);
- }
-
- @Override
- protected <T extends RpcService> Supplier<T> directProxySupplier(final Class<T> serviceType) {
- return new Supplier<T>() {
- @SuppressWarnings("unchecked")
- @Override
- public T get() {
- final String proxyName = RuntimeCodeSpecification.getDirectProxyName(serviceType);
-
- final Class<?> potentialClass = ClassLoaderUtils.tryToLoadClassWithTCCL(proxyName);
- if (potentialClass != null) {
- try {
- return (T)potentialClass.newInstance();
- } catch (InstantiationException | IllegalAccessException e) {
- throw new IllegalStateException("Failed to instantiate class " + potentialClass.getName(), e);
- }
- }
-
- final CtClass supertype = utils.asCtClass(serviceType);
- final String directProxyName = RuntimeCodeSpecification.getDirectProxyName(serviceType);
-
- final CtClass createdCls;
- try {
- createdCls = utils.createClass(directProxyName, supertype, new ClassGenerator() {
- @Override
- public void process(final CtClass cls) throws CannotCompileException {
- utils.field(cls, RuntimeCodeSpecification.DELEGATE_FIELD, serviceType);
- utils.implementsType(cls, utils.asCtClass(RpcImplementation.class));
- utils.implementMethodsFrom(cls, supertype, new MethodGenerator() {
- @Override
- public void process(final CtMethod method) throws CannotCompileException {
- final StringBuilder sb = new StringBuilder("\n");
- sb.append("{\n");
- sb.append(" if (").append(RuntimeCodeSpecification.DELEGATE_FIELD).append(" == null) {\n");
- sb.append(" throw new java.lang.IllegalStateException(\"No default provider is available\");\n");
- sb.append(" }\n");
- sb.append(" return ($r) ").append(RuntimeCodeSpecification.DELEGATE_FIELD).append('.').append(method.getName()).append("($$);\n");
- sb.append("}\n");
- method.setBody(sb.toString());
- }
- });
-
- // FIXME: copy this one...
- utils.implementMethodsFrom(cls, utils.asCtClass(RpcImplementation.class), new MethodGenerator() {
- @Override
- public void process(final CtMethod method) throws CannotCompileException {
- final StringBuilder sb = new StringBuilder("\n");
- sb.append("{\n");
- sb.append(" throw new java.lang.IllegalStateException(\"No provider is processing supplied message\");\n");
- sb.append(" return ($r) null;\n");
- sb.append("}\n");
- method.setBody(sb.toString());
- }
- });
- }
- });
- } catch (CannotCompileException e) {
- throw new IllegalStateException("Failed to create class " + directProxyName, e);
- }
-
- final Class<?> c;
- try {
- c = createdCls.toClass(serviceType.getClassLoader(), serviceType.getProtectionDomain());
- } catch (CannotCompileException e) {
- throw new IllegalStateException(String.format("Failed to create class %s", createdCls), e);
- }
-
- try {
- return (T) c.newInstance();
- } catch (InstantiationException | IllegalAccessException e) {
- throw new IllegalStateException(String.format("Failed to instantiated class %s", c), e);
- }
- }
- };
- }
-
- @Override
- protected <T extends RpcService> Supplier<T> routerSupplier(final Class<T> serviceType, final RpcServiceMetadata metadata) {
- return new Supplier<T>() {
- @SuppressWarnings("unchecked")
- @Override
- public T get() {
- final CtClass supertype = utils.asCtClass(serviceType);
- final String routerName = RuntimeCodeSpecification.getRouterName(serviceType);
- final Class<?> potentialClass = ClassLoaderUtils.tryToLoadClassWithTCCL(routerName);
- if (potentialClass != null) {
- try {
- return (T)potentialClass.newInstance();
- } catch (InstantiationException | IllegalAccessException e) {
- throw new IllegalStateException("Failed to instantiate class", e);
- }
- }
-
- final CtClass targetCls;
- try {
- targetCls = utils.createClass(routerName, supertype, new ClassGenerator() {
- @Override
- public void process(final CtClass cls) throws CannotCompileException {
- utils.field(cls, RuntimeCodeSpecification.DELEGATE_FIELD, serviceType);
- //utils.field(cls, REMOTE_INVOKER_FIELD,iface);
- utils.implementsType(cls, utils.asCtClass(RpcImplementation.class));
-
- for (final Class<? extends BaseIdentity> ctx : metadata.getContexts()) {
- utils.field(cls, RuntimeCodeSpecification.getRoutingTableField(ctx), Map.class);
- }
-
- utils.implementMethodsFrom(cls, supertype, new MethodGenerator() {
- @Override
- public void process(final CtMethod method) throws CannotCompileException {
- final int ptl;
- try {
- ptl = method.getParameterTypes().length;
- } catch (NotFoundException e) {
- throw new CannotCompileException(e);
- }
- final StringBuilder sb = new StringBuilder();
-
- switch (ptl) {
- case 0:
- sb.append("return ($r) ").append(RuntimeCodeSpecification.DELEGATE_FIELD).append('.').append(method.getName()).append("($$);");
- break;
- case 1:
- final RpcMetadata rpcMeta = metadata.getRpcMethod(method.getName());
- final String rtGetter = rpcMeta.getInputRouteGetter().getName();
- final String stName = supertype.getName();
-
- sb.append('\n');
- sb.append("{\n");
- sb.append(" if ($1 == null) {\n");
- sb.append(" throw new IllegalArgumentException(\"RPC input must not be null and must contain a value for field ").append(rtGetter).append("\");\n");
- sb.append(" }\n");
- sb.append(" if ($1.").append(rtGetter).append("() == null) {\n");
- sb.append(" throw new IllegalArgumentException(\"Field ").append(rtGetter).append(" must not be null\");\n");
- sb.append(" }\n");
-
- sb.append(" final org.opendaylight.yangtools.yang.binding.InstanceIdentifier identifier = $1.").append(rtGetter).append("()");
- if (rpcMeta.isRouteEncapsulated()) {
- sb.append(".getValue()");
- }
- sb.append(";\n");
-
- sb.append(" ").append(supertype.getName()).append(" instance = (").append(stName).append(") ").append(RuntimeCodeSpecification.getRoutingTableField(rpcMeta.getContext())).append(".get(identifier);\n");
- sb.append(" if (instance == null) {\n");
- sb.append(" instance = ").append(RuntimeCodeSpecification.DELEGATE_FIELD).append(";\n");
- sb.append(" }\n");
-
- sb.append(" if (instance == null) {\n");
- sb.append(" throw new java.lang.IllegalStateException(\"No routable provider is processing routed message for \" + String.valueOf(identifier));\n");
- sb.append(" }\n");
- sb.append(" return ($r) instance.").append(method.getName()).append("($$);\n");
- sb.append('}');
- break;
- default:
- throw new CannotCompileException(String.format("Unsupported parameters length %s", ptl));
- }
-
- method.setBody(sb.toString());
- }
- });
-
- // FIXME: move this into a template class
- utils.implementMethodsFrom(cls, utils.asCtClass(RpcImplementation.class), new MethodGenerator() {
- @Override
- public void process(final CtMethod method) throws CannotCompileException {
- final StringBuilder sb = new StringBuilder("\n");
- sb.append("{\n");
- sb.append(" throw new java.lang.IllegalStateException(\"No provider is processing supplied message\");\n");
- sb.append(" return ($r) null;\n");
- sb.append("}\n");
-
- method.setBody(sb.toString());
- }
- });
- }
- });
- } catch (CannotCompileException e) {
- throw new IllegalStateException("Failed to create class " + routerName, e);
- }
-
- final Class<?> c;
- try {
- c = targetCls.toClass(serviceType.getClassLoader(), serviceType.getProtectionDomain());
- } catch (CannotCompileException e) {
- throw new IllegalStateException(String.format("Failed to compile class %s", targetCls), e);
- }
-
- try {
- return (T)c.newInstance();
- } catch (InstantiationException | IllegalAccessException e) {
- throw new IllegalStateException(String.format("Failed to instantiate class %s", c), e);
- }
- }
- };
- }
-
- @SuppressWarnings("unchecked")
- @Override
- protected RuntimeGeneratedInvokerPrototype generateListenerInvoker(final Class<? extends NotificationListener> listenerType) {
- final String invokerName = RuntimeCodeSpecification.getInvokerName(listenerType);
- final CtClass targetCls;
-
- // Builder for a set of supported types. Filled while the target class is being generated
- final Builder<Class<? extends Notification>> b = ImmutableSet.builder();
-
- try {
- targetCls = utils.createClass(invokerName, getBrokerNotificationListener(), new ClassGenerator() {
- @Override
- public void process(final CtClass cls) throws CannotCompileException {
- utils.field(cls, RuntimeCodeSpecification.DELEGATE_FIELD, listenerType);
- utils.implementMethodsFrom(cls, getBrokerNotificationListener(), new MethodGenerator() {
- @Override
- public void process(final CtMethod method) throws CannotCompileException {
- final StringBuilder sb = new StringBuilder("\n");
-
- sb.append("{\n");
-
- for (Method m : listenerType.getMethods()) {
- if (BindingReflections.isNotificationCallback(m)) {
- final Class<?> argType = m.getParameterTypes()[0];
-
- // populates builder above
- b.add((Class<? extends Notification>) argType);
-
- sb.append(" if ($1 instanceof ").append(argType.getName()).append(") {\n");
- sb.append(" ").append(RuntimeCodeSpecification.DELEGATE_FIELD).append('.').append(m.getName()).append("((").append(argType.getName()).append(") $1);\n");
- sb.append(" return null;\n");
- sb.append(" } else ");
- }
- }
-
- sb.append(" return null;\n");
- sb.append("}\n");
- method.setBody(sb.toString());
- }
- });
- }
- });
- } catch (CannotCompileException e) {
- throw new IllegalStateException("Failed to create class " + invokerName, e);
- }
-
- final Class<?> finalClass;
- try {
- finalClass = targetCls.toClass(listenerType.getClassLoader(), listenerType.getProtectionDomain());
- } catch (CannotCompileException e) {
- throw new IllegalStateException(String.format("Failed to compile class %s", targetCls), e);
- }
-
- return new RuntimeGeneratedInvokerPrototype(b.build(), (Class<? extends org.opendaylight.controller.sal.binding.api.NotificationListener<?>>) finalClass);
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import javassist.CtClass;
-import javassist.CtMethod;
-
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-
-final class RpcMetadata {
- private final Class<? extends BaseIdentity> context;
- private final CtMethod inputRouteGetter;
- private final Boolean routeEncapsulated;
- private final CtClass inputType;
- private final String methodName;
-
- public Class<? extends BaseIdentity> getContext() {
- return context;
- }
-
- public CtMethod getInputRouteGetter() {
- return inputRouteGetter;
- }
-
- public CtClass getInputType() {
- return inputType;
- }
-
- public boolean isRouteEncapsulated() {
- return routeEncapsulated;
- }
-
- public RpcMetadata(final String methodName, final Class<? extends BaseIdentity> context, final CtMethod inputRouteGetter, final boolean routeEncapsulated, final CtClass inputType) {
- this.inputRouteGetter = Preconditions.checkNotNull(inputRouteGetter);
- this.methodName = Preconditions.checkNotNull(methodName);
- this.inputType = Preconditions.checkNotNull(inputType);
- this.context = Preconditions.checkNotNull(context);
- this.routeEncapsulated = routeEncapsulated;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + methodName.hashCode();
- result = prime * result + context.hashCode();
- result = prime * result + inputRouteGetter.hashCode();
- result = prime * result + routeEncapsulated.hashCode();
- result = prime * result + inputType.hashCode();
- return result;
- }
-
- @Override
- public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof RpcMetadata)) {
- return false;
- }
- final RpcMetadata other = (RpcMetadata) obj;
- if (!methodName.equals(other.methodName)) {
- return false;
- }
- if (!context.equals(other.context)) {
- return false;
- }
- if (!inputRouteGetter.equals(other.inputRouteGetter)) {
- return false;
- }
- if (!routeEncapsulated.equals(other.routeEncapsulated)) {
- return false;
- }
- return inputType.equals(other.inputType);
- }
-
- @Override
- public String toString() {
- return Objects.toStringHelper(this)
- .add("context", context)
- .add("inputRouteGetter", inputRouteGetter)
- .add("inputType", inputType)
- .add("methodName", methodName)
- .add("routeEncapsulated", routeEncapsulated)
- .toString();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import static org.opendaylight.controller.sal.binding.codegen.RuntimeCodeHelper.setRoutingTable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChange;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRoutingTable;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeHelper;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.ListenerRegistry;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class RpcRouterCodegenInstance<T extends RpcService> implements //
-RpcRouter<T>, RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>> {
-
- private static final Logger LOG = LoggerFactory.getLogger(RpcRouterCodegenInstance.class);
-
- private final Class<T> serviceType;
-
- private final T invocationProxy;
-
- private final Set<Class<? extends BaseIdentity>> contexts;
-
- private final ListenerRegistry<RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>>> listeners;
-
- private final Map<Class<? extends BaseIdentity>, RpcRoutingTableImpl<? extends BaseIdentity, T>> routingTables;
-
- @SuppressWarnings("unchecked")
- public RpcRouterCodegenInstance(final String name,final Class<T> type, final T routerImpl, final Iterable<Class<? extends BaseIdentity>> contexts) {
- this.listeners = ListenerRegistry.create();
- this.serviceType = type;
- this.invocationProxy = routerImpl;
- this.contexts = ImmutableSet.copyOf(contexts);
- Map<Class<? extends BaseIdentity>, RpcRoutingTableImpl<? extends BaseIdentity, T>> mutableRoutingTables = new HashMap<>();
- for (Class<? extends BaseIdentity> ctx : contexts) {
- RpcRoutingTableImpl<? extends BaseIdentity, T> table = new RpcRoutingTableImpl<>(name,ctx,type);
-
- @SuppressWarnings("rawtypes")
- Map invokerView = table.getRoutes();
-
- setRoutingTable(invocationProxy, ctx, invokerView);
- mutableRoutingTables.put(ctx, table);
- table.registerRouteChangeListener(this);
- }
- this.routingTables = ImmutableMap.copyOf(mutableRoutingTables);
- }
-
- @Override
- public Class<T> getServiceType() {
- return serviceType;
- }
-
- @Override
- public T getInvocationProxy() {
- return invocationProxy;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public <C extends BaseIdentity> RpcRoutingTable<C, T> getRoutingTable(final Class<C> routeContext) {
- return (RpcRoutingTable<C, T>) routingTables.get(routeContext);
- }
-
- @Override
- public T getDefaultService() {
- return RuntimeCodeHelper.getDelegate(invocationProxy);
- }
-
- @Override
- public Set<Class<? extends BaseIdentity>> getContexts() {
- return contexts;
- }
-
- @Override
- public <L extends RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(
- final L listener) {
- return listeners.registerWithType(listener);
- }
-
- @Override
- public void onRouteChange(final RouteChange<Class<? extends BaseIdentity>, InstanceIdentifier<?>> change) {
- for (ListenerRegistration<RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>>> listener : listeners) {
- try {
- listener.getInstance().onRouteChange(change);
- } catch (Exception e) {
- LOG.error("Error occured during invoker listener {}", listener.getInstance(), e);
- }
- }
- }
-
- @Override
- public T getService(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> path) {
- return routingTables.get(context).getRoute(path);
- }
-
- @Override
- public RoutedRpcRegistration<T> addRoutedRpcImplementation(final T service) {
- return new RoutedRpcRegistrationImpl(service);
- }
-
- public void removeDefaultImplementation(final T instance) {
- RpcService current = RuntimeCodeHelper.getDelegate(invocationProxy);
- if(instance == current) {
- RuntimeCodeHelper.setDelegate(invocationProxy, null);
- }
- }
-
- @Override
- public RpcRegistration<T> registerDefaultService(final T service) {
- RuntimeCodeHelper.setDelegate(invocationProxy, service);
- return new DefaultRpcImplementationRegistration(service);
- }
-
- private final class RoutedRpcRegistrationImpl extends AbstractObjectRegistration<T> implements RoutedRpcRegistration<T> {
- /*
- * FIXME: retaining this collection is not completely efficient. We really should be storing
- * a reference to this registration, as a particular listener may be registered multiple
- * times -- and then this goes kaboom in various aspects.
- */
- @GuardedBy("this")
- private final Collection<Class<? extends BaseIdentity>> contexts = new ArrayList<>(1);
-
- public RoutedRpcRegistrationImpl(final T instance) {
- super(instance);
- }
-
- @Override
- public Class<T> getServiceType() {
- return serviceType;
- }
-
- @Override
- public synchronized void registerPath(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> path) {
- if (isClosed()) {
- LOG.debug("Closed registration of {} ignoring new path {}", getInstance(), path);
- return;
- }
-
- routingTables.get(context).updateRoute(path, getInstance());
- contexts.add(context);
- }
-
- @Override
- public synchronized void unregisterPath(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> path) {
- if (isClosed()) {
- LOG.debug("Closed unregistration of {} ignoring new path {}", getInstance(), path);
- return;
- }
-
- routingTables.get(context).removeRoute(path, getInstance());
- contexts.remove(context);
- }
-
- @Deprecated
- @Override
- public void registerInstance(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> instance) {
- registerPath(context, instance);
- }
-
- @Deprecated
- @Override
- public void unregisterInstance(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> instance) {
- unregisterPath(context, instance);
- }
-
- @Override
- protected synchronized void removeRegistration() {
- for (Class<? extends BaseIdentity> ctx : contexts) {
- routingTables.get(ctx).removeAllReferences(getInstance());
- }
- contexts.clear();
- }
- }
-
- private final class DefaultRpcImplementationRegistration extends AbstractObjectRegistration<T> implements RpcRegistration<T> {
-
-
- protected DefaultRpcImplementationRegistration(final T instance) {
- super(instance);
- }
-
- @Override
- protected void removeRegistration() {
- removeDefaultImplementation(this.getInstance());
- }
-
- @Override
- public Class<T> getServiceType() {
- return serviceType;
- }
- }
-
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangePublisher;
-import org.opendaylight.controller.md.sal.common.impl.routing.RoutingUtils;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRoutingTable;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.concepts.Mutable;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class RpcRoutingTableImpl<C extends BaseIdentity, S extends RpcService> implements
- Mutable, //
- RpcRoutingTable<C, S>, //
- RouteChangePublisher<Class<? extends BaseIdentity>, InstanceIdentifier<?>> {
-
- private static final Logger LOGGER = LoggerFactory.getLogger(RpcRoutingTableImpl.class);
- private final String routerName;
- private final Class<S> serviceType;
-
- private final Class<C> contextType;
- private final ConcurrentMap<InstanceIdentifier<?>, S> routes;
- private final Map<InstanceIdentifier<?>, S> unmodifiableRoutes;
-
- private RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>> listener;
- private S defaultRoute;
-
- public RpcRoutingTableImpl(final String routerName,final Class<C> contextType, final Class<S> serviceType) {
- super();
- this.routerName = routerName;
- this.serviceType = serviceType;
- this.contextType = contextType;
- this.routes = new ConcurrentHashMap<>();
- this.unmodifiableRoutes = Collections.unmodifiableMap(routes);
- }
-
- @Override
- public void setDefaultRoute(final S target) {
- defaultRoute = target;
- }
-
- @Override
- public S getDefaultRoute() {
- return defaultRoute;
- }
-
- @Override
- public <L extends RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(
- final L listener) {
- return new SingletonListenerRegistration<L>(listener);
- }
-
- @Override
- public Class<C> getIdentifier() {
- return contextType;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public void updateRoute(final InstanceIdentifier<?> path, final S service) {
- S previous = this.routes.put(path, service);
-
- LOGGER.debug("Route {} updated to {} in routing table {}",path,service,this);
- @SuppressWarnings("rawtypes")
- RouteChangeListener listenerCapture = listener;
- if (previous == null && listenerCapture != null) {
- listenerCapture.onRouteChange(RoutingUtils.announcementChange(contextType, path));
- }
- }
-
-
- @Override
- @SuppressWarnings("unchecked")
- public void removeRoute(final InstanceIdentifier<?> path) {
- S previous = this.routes.remove(path);
- LOGGER.debug("Route {} to {} removed in routing table {}",path,previous,this);
- @SuppressWarnings("rawtypes")
- RouteChangeListener listenerCapture = listener;
- if (previous != null && listenerCapture != null) {
- listenerCapture.onRouteChange(RoutingUtils.removalChange(contextType, path));
- }
- }
-
- void removeRoute(final InstanceIdentifier<?> path, final S service) {
- @SuppressWarnings("rawtypes")
- RouteChangeListener listenerCapture = listener;
- if (routes.remove(path, service) && listenerCapture != null) {
- LOGGER.debug("Route {} to {} removed in routing table {}",path,service,this);
- listenerCapture.onRouteChange(RoutingUtils.removalChange(contextType, path));
- }
- }
-
- @Override
- public S getRoute(final InstanceIdentifier<?> nodeInstance) {
- S route = routes.get(nodeInstance);
- if (route != null) {
- return route;
- }
- return getDefaultRoute();
- }
-
- @Override
- public Map<InstanceIdentifier<?>, S> getRoutes() {
- return unmodifiableRoutes;
- }
-
- void removeAllReferences(final S service) {
- // FIXME: replace this via properly-synchronized BiMap (or something)
- final Iterator<S> it = routes.values().iterator();
- while (it.hasNext()) {
- final S s = it.next();
- if (service.equals(s)) {
- it.remove();
- }
- }
- }
-
- @Override
- public String toString() {
- return "RpcRoutingTableImpl [router=" + routerName + ", service=" + serviceType.getSimpleName() + ", context="
- + contextType.getSimpleName() + "]";
- }
-
- private class SingletonListenerRegistration<L extends RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>>> extends
- AbstractObjectRegistration<L>
- implements ListenerRegistration<L> {
-
- public SingletonListenerRegistration(final L instance) {
- super(instance);
- listener = instance;
- }
-
- @Override
- protected void removeRegistration() {
- listener = null;
- }
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-
-import com.google.common.collect.Iterables;
-
-final class RpcServiceMetadata {
- private final Set<Class<? extends BaseIdentity>> contexts = new HashSet<>();
- private final Map<String, RpcMetadata> rpcMethods = new HashMap<>();
- private final Iterable<Class<? extends BaseIdentity>> roContexts = Iterables.unmodifiableIterable(contexts);
-
- public Iterable<Class<? extends BaseIdentity>> getContexts() {
- return roContexts;
- }
-
- public RpcMetadata getRpcMethod(final String name) {
- return rpcMethods.get(name);
- }
-
- public void addContext(final Class<? extends BaseIdentity> context) {
- contexts.add(context);
- }
-
- public void addRpcMethod(final String name, final RpcMetadata routingPair) {
- rpcMethods.put(name, routingPair);
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import java.util.Set;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeHelper;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory.NotificationInvoker;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-
-final class RuntimeGeneratedInvoker implements NotificationInvoker {
- private final org.opendaylight.controller.sal.binding.api.NotificationListener<Notification> invocationProxy;
- private final RuntimeGeneratedInvokerPrototype prototype;
- private final NotificationListener delegate;
-
- @SuppressWarnings("unchecked")
- private RuntimeGeneratedInvoker(final NotificationListener delegate, final RuntimeGeneratedInvokerPrototype prototype, final org.opendaylight.controller.sal.binding.api.NotificationListener<?> proxy) {
- this.invocationProxy = (org.opendaylight.controller.sal.binding.api.NotificationListener<Notification>) proxy;
- this.delegate = Preconditions.checkNotNull(delegate);
- this.prototype = prototype;
- }
-
- public static RuntimeGeneratedInvoker create(final NotificationListener delegate, final RuntimeGeneratedInvokerPrototype prototype) throws InstantiationException, IllegalAccessException {
- final org.opendaylight.controller.sal.binding.api.NotificationListener<?> proxy = Preconditions.checkNotNull(prototype.getProtoClass().newInstance());
- RuntimeCodeHelper.setDelegate(proxy, delegate);
- return new RuntimeGeneratedInvoker(delegate, prototype, proxy);
- }
-
- @Override
- public NotificationListener getDelegate() {
- return delegate;
- }
-
- @Override
- public org.opendaylight.controller.sal.binding.api.NotificationListener<Notification> getInvocationProxy() {
- return invocationProxy;
- }
-
- @Override
- public Set<Class<? extends Notification>> getSupportedNotifications() {
- return prototype.getSupportedNotifications();
- }
-
- @Override
- public void close() {
- // Nothing to do
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + delegate.hashCode();
- result = prime * result + invocationProxy.hashCode();
- result = prime * result + prototype.hashCode();
- return result;
- }
-
- @Override
- public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof RuntimeGeneratedInvoker)) {
- return false;
- }
- final RuntimeGeneratedInvoker other = (RuntimeGeneratedInvoker) obj;
- if (!delegate.equals(other.delegate)) {
- return false;
- }
- if (!invocationProxy.equals(other.invocationProxy)) {
- return false;
- }
- return prototype.equals(other.prototype);
- }
-
- @Override
- public String toString() {
- return Objects.toStringHelper(this).toString();
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import java.util.Set;
-
-import org.opendaylight.controller.sal.binding.api.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-
-final class RuntimeGeneratedInvokerPrototype {
- private final Set<Class<? extends Notification>> supportedNotifications;
- private final Class<? extends NotificationListener<?>> protoClass;
-
- public RuntimeGeneratedInvokerPrototype(final Set<Class<? extends Notification>> supportedNotifications, final Class<? extends NotificationListener<?>> protoClass) {
- this.supportedNotifications = Preconditions.checkNotNull(supportedNotifications);
- this.protoClass = Preconditions.checkNotNull(protoClass);
- }
-
- public Set<Class<? extends Notification>> getSupportedNotifications() {
- return supportedNotifications;
- }
-
- public Class<? extends NotificationListener<?>> getProtoClass() {
- return protoClass;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + supportedNotifications.hashCode();
- result = prime * result + protoClass.hashCode();
- return result;
- }
-
- @Override
- public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof RuntimeGeneratedInvokerPrototype)) {
- return false;
- }
- final RuntimeGeneratedInvokerPrototype other = (RuntimeGeneratedInvokerPrototype) obj;
- if (!protoClass.equals(other.protoClass)) {
- return false;
- }
- return supportedNotifications.equals(other.supportedNotifications);
- }
-
- @Override
- public String toString() {
- return Objects.toStringHelper(this)
- .add("protoClass", protoClass)
- .add("supportedNotifications", supportedNotifications)
- .toString();
- }
-}
import java.util.concurrent.TimeUnit;
import javassist.ClassPool;
import org.apache.commons.lang3.StringUtils;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public static final ClassPool CLASS_POOL = ClassPool.getDefault();
public static final JavassistUtils JAVASSIST = JavassistUtils.forClassPool(CLASS_POOL);
- public static final org.opendaylight.controller.sal.binding.codegen.impl.DefaultRuntimeCodeGenerator RPC_GENERATOR_IMPL = new org.opendaylight.controller.sal.binding.codegen.impl.DefaultRuntimeCodeGenerator(
- CLASS_POOL);
- public static final RuntimeCodeGenerator RPC_GENERATOR = RPC_GENERATOR_IMPL;
- public static final NotificationInvokerFactory INVOKER_FACTORY = RPC_GENERATOR_IMPL.getInvokerFactory();
public static final int CORE_NOTIFICATION_THREADS = 4;
public static final int MAX_NOTIFICATION_THREADS = 32;
if (NOTIFICATION_EXECUTOR == null) {
int queueSize = MAX_NOTIFICATION_QUEUE_SIZE;
- String queueValue = System.getProperty(NOTIFICATION_QUEUE_SIZE_PROPERTY);
+ final String queueValue = System.getProperty(NOTIFICATION_QUEUE_SIZE_PROPERTY);
if (StringUtils.isNotBlank(queueValue)) {
try {
queueSize = Integer.parseInt(queueValue);
logger.trace("Queue size was set to {}", queueSize);
- } catch (NumberFormatException e) {
+ } catch (final NumberFormatException e) {
logger.warn("Cannot parse {} as set by {}, using default {}", queueValue,
NOTIFICATION_QUEUE_SIZE_PROPERTY, queueSize);
}
public void rejectedExecution(final Runnable r, final ThreadPoolExecutor executor) {
try {
executor.getQueue().put(r);
- } catch (InterruptedException e) {
+ } catch (final InterruptedException e) {
throw new RejectedExecutionException("Interrupted while waiting on the queue", e);
}
}
@Deprecated
public static synchronized ListeningExecutorService getDefaultCommitExecutor() {
if (COMMIT_EXECUTOR == null) {
- ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-commit-%d").build();
+ final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-commit-%d").build();
/*
* FIXME: this used to be newCacheThreadPool(), but MD-SAL does not have transaction
* ordering guarantees, which means that using a concurrent threadpool results
* in inconsistent data being present. Once proper primitives are introduced,
* concurrency can be reintroduced.
*/
- ExecutorService executor = Executors.newSingleThreadExecutor(factory);
+ final ExecutorService executor = Executors.newSingleThreadExecutor(factory);
COMMIT_EXECUTOR = MoreExecutors.listeningDecorator(executor);
}
public static ExecutorService getDefaultChangeEventExecutor() {
if (CHANGE_EVENT_EXECUTOR == null) {
- ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-change-%d").build();
+ final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-change-%d").build();
/*
* FIXME: this used to be newCacheThreadPool(), but MD-SAL does not have transaction
* ordering guarantees, which means that using a concurrent threadpool results
* in inconsistent data being present. Once proper primitives are introduced,
* concurrency can be reintroduced.
*/
- ExecutorService executor = Executors.newSingleThreadExecutor(factory);
+ final ExecutorService executor = Executors.newSingleThreadExecutor(factory);
CHANGE_EVENT_EXECUTOR = MoreExecutors.listeningDecorator(executor);
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.impl;
-
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
-import org.opendaylight.controller.md.sal.common.api.data.DataReader;
-import org.opendaylight.controller.md.sal.common.impl.routing.AbstractDataReadRouter;
-import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataBroker;
-import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
-import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
-import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.util.DataObjectReadingUtil;
-
-import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMap.Builder;
-import com.google.common.collect.Maps;
-
-@Deprecated
-public class DataBrokerImpl extends
- AbstractDataBroker<InstanceIdentifier<? extends DataObject>, DataObject, DataChangeListener> //
- implements DataProviderService, AutoCloseable {
-
- private final static class ContainsWildcarded implements Predicate<InstanceIdentifier<? extends DataObject>> {
-
- private final InstanceIdentifier<? extends DataObject> key;
-
- public ContainsWildcarded(final InstanceIdentifier<? extends DataObject> key) {
- this.key = key;
- }
-
- @Override
- public boolean apply(final InstanceIdentifier<? extends DataObject> input) {
- return key.containsWildcarded(input);
- }
- }
-
- private final static class IsContainedWildcarded implements Predicate<InstanceIdentifier<? extends DataObject>> {
-
- private final InstanceIdentifier<? extends DataObject> key;
-
- public IsContainedWildcarded(final InstanceIdentifier<? extends DataObject> key) {
- this.key = key;
- }
-
- @Override
- public boolean apply(final InstanceIdentifier<? extends DataObject> input) {
- return input.containsWildcarded(key);
- }
- }
-
- private final AtomicLong nextTransaction = new AtomicLong();
- private final AtomicLong createdTransactionsCount = new AtomicLong();
- private final DelegatingDataReadRouter router = new DelegatingDataReadRouter();
- private DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject> rootCommitHandler;
-
- public DataBrokerImpl() {
- setDataReadRouter(router);
- }
-
- public void setDataReadDelegate(final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> delegate) {
- router.setDelegate(delegate);
- }
-
- public AtomicLong getCreatedTransactionsCount() {
- return createdTransactionsCount;
- }
-
- @Override
- public DataTransactionImpl beginTransaction() {
- String transactionId = "BA-" + nextTransaction.getAndIncrement();
- createdTransactionsCount.getAndIncrement();
- return new DataTransactionImpl(transactionId, this);
- }
-
- @Override
- public void close() {
-
- }
-
- @Override
- protected Predicate<InstanceIdentifier<? extends DataObject>> createContainsPredicate(
- final InstanceIdentifier<? extends DataObject> key) {
- return new ContainsWildcarded(key);
- }
-
- @Override
- protected Predicate<InstanceIdentifier<? extends DataObject>> createIsContainedPredicate(
- final InstanceIdentifier<? extends DataObject> key) {
- return new IsContainedWildcarded(key);
- }
-
- @SuppressWarnings({ "unchecked", "rawtypes" })
- @Override
- protected Map<InstanceIdentifier<? extends DataObject>, DataObject> deepGetBySubpath(
- final Map<InstanceIdentifier<? extends DataObject>, DataObject> dataSet,
- final InstanceIdentifier<? extends DataObject> path) {
- Builder<InstanceIdentifier<? extends DataObject>, DataObject> builder = ImmutableMap.builder();
- Map<InstanceIdentifier<? extends DataObject>, DataObject> potential = Maps.filterKeys(dataSet,
- createIsContainedPredicate(path));
- for (Entry<InstanceIdentifier<? extends DataObject>, DataObject> entry : potential.entrySet()) {
- try {
- builder.putAll(DataObjectReadingUtil.readData(entry.getValue(), (InstanceIdentifier) entry.getKey(),
- path));
- } catch (Exception e) {
- // FIXME : Log exception;
- }
- }
- return builder.build();
-
- }
-
- public class DelegatingDataReadRouter extends
- AbstractDataReadRouter<InstanceIdentifier<? extends DataObject>, DataObject> {
-
- private DataReader<InstanceIdentifier<? extends DataObject>, DataObject> delegate;
-
- @Override
- public DataObject readConfigurationData(final InstanceIdentifier<? extends DataObject> path) {
- return delegate.readConfigurationData(path);
- }
-
- public void setDelegate(final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> delegate) {
- this.delegate = delegate;
- }
-
- @Override
- public DataObject readOperationalData(final InstanceIdentifier<? extends DataObject> path) {
- return delegate.readOperationalData(path);
- }
-
- @Override
- protected DataObject merge(final InstanceIdentifier<? extends DataObject> path, final Iterable<DataObject> data) {
- throw new UnsupportedOperationException("Not supported");
- }
-
- @Override
- public Registration registerConfigurationReader(
- final InstanceIdentifier<? extends DataObject> path,
- final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> reader) {
- throw new UnsupportedOperationException("Not supported");
- }
-
- @Override
- public Registration registerOperationalReader(
- final InstanceIdentifier<? extends DataObject> path,
- final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> reader) {
- throw new UnsupportedOperationException("Not supported");
- }
- }
-
- @Override
- protected ImmutableList<DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject>> affectedCommitHandlers(
- final Set<InstanceIdentifier<? extends DataObject>> paths) {
- ImmutableList.Builder<DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject>> handlersBuilder = ImmutableList.builder();
- return handlersBuilder //
- .add(rootCommitHandler) //
- .addAll(super.affectedCommitHandlers(paths)) //
- .build();
- }
-
- public void setRootCommitHandler(final DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject> commitHandler) {
- rootCommitHandler = commitHandler;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.impl;
-
-import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
-import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataTransaction;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.ListenerRegistry;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-@Deprecated
-public class DataTransactionImpl extends AbstractDataTransaction<InstanceIdentifier<? extends DataObject>, DataObject>
- implements DataModificationTransaction {
- private final ListenerRegistry<DataTransactionListener> listeners = new ListenerRegistry<DataTransactionListener>();
-
-
-
- public DataTransactionImpl(Object identifier,DataBrokerImpl dataBroker) {
- super(identifier,dataBroker);
- }
-
- @Override
- public ListenerRegistration<DataTransactionListener> registerListener(DataTransactionListener listener) {
- return listeners.register(listener);
- }
-
- @Override
- protected void onStatusChange(TransactionStatus status) {
- for (ListenerRegistration<DataTransactionListener> listenerRegistration : listeners) {
- listenerRegistration.getInstance().onStatusUpdated(this, status);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.impl;
-
-import static com.google.common.base.Preconditions.checkState;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.google.common.util.concurrent.UncheckedExecutionException;
-import java.util.EventListener;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.atomic.AtomicBoolean;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChange;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangePublisher;
-import org.opendaylight.controller.md.sal.common.impl.routing.RoutingUtils;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
-import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeHelper;
-import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.ListenerRegistry;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class RpcProviderRegistryImpl implements RpcProviderRegistry, RouteChangePublisher<RpcContextIdentifier, InstanceIdentifier<?>> {
-
- private RuntimeCodeGenerator rpcFactory = SingletonHolder.RPC_GENERATOR_IMPL;
-
- // cache of proxy objects where each value in the map corresponds to a specific RpcService
- private final LoadingCache<Class<? extends RpcService>, RpcService> publicProxies = CacheBuilder.newBuilder().weakKeys().
- build(new CacheLoader<Class<? extends RpcService>, RpcService>() {
- @Override
- public RpcService load(final Class<? extends RpcService> type) {
- final RpcService proxy = rpcFactory.getDirectProxyFor(type);
- LOG.debug("Created {} as public proxy for {} in {}", proxy, type.getSimpleName(), this);
- return proxy;
- }
- });
-
- private final Cache<Class<? extends RpcService>, RpcRouter<?>> rpcRouters = CacheBuilder.newBuilder().weakKeys()
- .build();
-
- private final ListenerRegistry<RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> routeChangeListeners = ListenerRegistry
- .create();
- private final ListenerRegistry<RouterInstantiationListener> routerInstantiationListener = ListenerRegistry.create();
-
- private final static Logger LOG = LoggerFactory.getLogger(RpcProviderRegistryImpl.class);
-
- private final String name;
-
- private final ListenerRegistry<GlobalRpcRegistrationListener> globalRpcListeners = ListenerRegistry.create();
-
- public String getName() {
- return name;
- }
-
- public RpcProviderRegistryImpl(final String name) {
- super();
- this.name = name;
- }
-
- @Override
- public final <T extends RpcService> RoutedRpcRegistration<T> addRoutedRpcImplementation(final Class<T> type,
- final T implementation) throws IllegalStateException {
- return getRpcRouter(type).addRoutedRpcImplementation(implementation);
- }
-
- @Override
- public final <T extends RpcService> RpcRegistration<T> addRpcImplementation(final Class<T> type, final T implementation) {
-
- // FIXME: This should be well documented - addRpcImplementation for
- // routed RPCs
- try {
- // Note: If RPC is really global, expected count of registrations
- // of this method is really low.
- RpcRouter<T> potentialRouter = getRpcRouter(type);
- checkState(potentialRouter.getDefaultService() == null,
- "Default service for routed RPC already registered.");
- return potentialRouter.registerDefaultService(implementation);
- } catch (RpcIsNotRoutedException e) {
- // NOOP - we could safely continue, since RPC is not routed
- // so we fallback to global routing.
- LOG.debug("RPC is not routed. Using global registration.",e);
- }
- T publicProxy = getRpcService(type);
- RpcService currentDelegate = RuntimeCodeHelper.getDelegate(publicProxy);
- checkState(currentDelegate == null, "Rpc service is already registered");
- LOG.debug("Registering {} as global implementation of {} in {}", implementation, type.getSimpleName(), this);
- RuntimeCodeHelper.setDelegate(publicProxy, implementation);
- notifyGlobalRpcAdded(type);
- return new RpcProxyRegistration<T>(type, implementation, this);
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public final <T extends RpcService> T getRpcService(final Class<T> type) {
- return (T) publicProxies.getUnchecked(type);
- }
-
-
- public <T extends RpcService> RpcRouter<T> getRpcRouter(final Class<T> type) {
- try {
- final AtomicBoolean created = new AtomicBoolean(false);
- @SuppressWarnings( "unchecked")
- // LoadingCache is unsuitable for RpcRouter since we need to distinguish
- // first creation of RPC Router, so that is why
- // we are using normal cache with load API and shared AtomicBoolean
- // for this call, which will be set to true if router was created.
- RpcRouter<T> router = (RpcRouter<T>) rpcRouters.get(type,new Callable<RpcRouter<?>>() {
-
- @Override
- public org.opendaylight.controller.sal.binding.api.rpc.RpcRouter<?> call() {
- RpcRouter<?> router = rpcFactory.getRouterFor(type, name);
- router.registerRouteChangeListener(new RouteChangeForwarder<T>(type));
- LOG.debug("Registering router {} as global implementation of {} in {}", router, type.getSimpleName(), this);
- RuntimeCodeHelper.setDelegate(getRpcService(type), router.getInvocationProxy());
- created.set(true);
- return router;
- }
- });
- if(created.get()) {
- notifyListenersRoutedCreated(router);
- }
- return router;
- } catch (ExecutionException | UncheckedExecutionException e) {
- // We rethrow Runtime Exceptions which were wrapped by
- // Execution Exceptions
- // otherwise we throw IllegalStateException with original
- Throwables.propagateIfPossible(e.getCause());
- throw new IllegalStateException("Could not load RPC Router for "+type.getName(),e);
- }
- }
-
- private void notifyGlobalRpcAdded(final Class<? extends RpcService> type) {
- for(ListenerRegistration<GlobalRpcRegistrationListener> listener : globalRpcListeners) {
- try {
- listener.getInstance().onGlobalRpcRegistered(type);
- } catch (Exception e) {
- LOG.error("Unhandled exception during invoking listener {}", e);
- }
- }
-
- }
-
- private void notifyListenersRoutedCreated(final RpcRouter<?> router) {
-
- for (ListenerRegistration<RouterInstantiationListener> listener : routerInstantiationListener) {
- try {
- listener.getInstance().onRpcRouterCreated(router);
- } catch (Exception e) {
- LOG.error("Unhandled exception during invoking listener {}", e);
- }
- }
-
- }
-
- public ListenerRegistration<RouterInstantiationListener> registerRouterInstantiationListener(
- final RouterInstantiationListener listener) {
- ListenerRegistration<RouterInstantiationListener> reg = routerInstantiationListener.register(listener);
- try {
- for (RpcRouter<?> router : rpcRouters.asMap().values()) {
- listener.onRpcRouterCreated(router);
- }
- } catch (Exception e) {
- LOG.error("Unhandled exception during invoking listener {}", e);
- }
- return reg;
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(
- final L listener) {
- return (ListenerRegistration<L>) routeChangeListeners.register(listener);
- }
-
- public RuntimeCodeGenerator getRpcFactory() {
- return rpcFactory;
- }
-
- public void setRpcFactory(final RuntimeCodeGenerator rpcFactory) {
- this.rpcFactory = rpcFactory;
- }
-
- public interface RouterInstantiationListener extends EventListener {
- void onRpcRouterCreated(RpcRouter<?> router);
- }
-
- public ListenerRegistration<GlobalRpcRegistrationListener> registerGlobalRpcRegistrationListener(final GlobalRpcRegistrationListener listener) {
- return globalRpcListeners.register(listener);
- }
-
- public interface GlobalRpcRegistrationListener extends EventListener {
- void onGlobalRpcRegistered(Class<? extends RpcService> cls);
- void onGlobalRpcUnregistered(Class<? extends RpcService> cls);
-
- }
-
- private final class RouteChangeForwarder<T extends RpcService> implements RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>> {
- private final Class<T> type;
-
- RouteChangeForwarder(final Class<T> type) {
- this.type = type;
- }
-
- @Override
- public void onRouteChange(final RouteChange<Class<? extends BaseIdentity>, InstanceIdentifier<?>> change) {
- Map<RpcContextIdentifier, Set<InstanceIdentifier<?>>> announcements = new HashMap<>();
- for (Entry<Class<? extends BaseIdentity>, Set<InstanceIdentifier<?>>> entry : change.getAnnouncements()
- .entrySet()) {
- RpcContextIdentifier key = RpcContextIdentifier.contextFor(type, entry.getKey());
- announcements.put(key, entry.getValue());
- }
- Map<RpcContextIdentifier, Set<InstanceIdentifier<?>>> removals = new HashMap<>();
- for (Entry<Class<? extends BaseIdentity>, Set<InstanceIdentifier<?>>> entry : change.getRemovals()
- .entrySet()) {
- RpcContextIdentifier key = RpcContextIdentifier.contextFor(type, entry.getKey());
- removals.put(key, entry.getValue());
- }
- RouteChange<RpcContextIdentifier, InstanceIdentifier<?>> toPublish = RoutingUtils
- .<RpcContextIdentifier, InstanceIdentifier<?>> change(announcements, removals);
- for (ListenerRegistration<RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> listener : routeChangeListeners) {
- try {
- listener.getInstance().onRouteChange(toPublish);
- } catch (Exception e) {
- LOG.error("Unhandled exception during invoking listener",listener.getInstance(),e);
- }
- }
- }
- }
-
- private static final class RpcProxyRegistration<T extends RpcService> extends AbstractObjectRegistration<T> implements RpcRegistration<T> {
- private final RpcProviderRegistryImpl registry;
- private final Class<T> serviceType;
-
- RpcProxyRegistration(final Class<T> type, final T service, final RpcProviderRegistryImpl registry) {
- super(service);
- this.registry = Preconditions.checkNotNull(registry);
- this.serviceType = type;
- }
-
- @Override
- public Class<T> getServiceType() {
- return serviceType;
- }
-
- @Override
- protected void removeRegistration() {
- T publicProxy = registry.getRpcService(serviceType);
- RpcService currentDelegate = RuntimeCodeHelper.getDelegate(publicProxy);
- if (currentDelegate == getInstance()) {
- RuntimeCodeHelper.setDelegate(publicProxy, null);
- }
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.sal.binding.impl;
-
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.osgi.framework.ServiceRegistration;
-
-@SuppressWarnings("all")
-public class RpcProxyContext {
- public RpcProxyContext(final Class<? extends RpcService> proxyClass) {
- this.proxyClass = proxyClass;
- }
-
- protected final Class<? extends RpcService> proxyClass;
-
- protected RpcService _proxy;
-
- public RpcService getProxy() {
- return this._proxy;
- }
-
- public void setProxy(final RpcService proxy) {
- this._proxy = proxy;
- }
-
- protected ServiceRegistration<? extends RpcService> _registration;
-
- public ServiceRegistration<? extends RpcService> getRegistration() {
- return this._registration;
- }
-
- public void setRegistration(final ServiceRegistration<? extends RpcService> registration) {
- this._registration = registration;
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.impl.util;
-
-import org.opendaylight.controller.md.sal.common.impl.routing.AbstractDataReadRouter;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public class BindingAwareDataReaderRouter extends AbstractDataReadRouter<InstanceIdentifier<? extends DataObject>,DataObject> {
- @Override
- protected DataObject merge(final InstanceIdentifier<? extends DataObject> path, final Iterable<DataObject> data) {
- return data.iterator().next();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.spi;
-
-import java.util.Set;
-
-import org.opendaylight.controller.sal.binding.api.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-public interface NotificationInvokerFactory {
-
- NotificationInvoker invokerFor(org.opendaylight.yangtools.yang.binding.NotificationListener instance);
-
- public interface NotificationInvoker {
-
- Set<Class<? extends Notification>> getSupportedNotifications();
-
- NotificationListener<Notification> getInvocationProxy();
-
- public abstract void close();
-
- org.opendaylight.yangtools.yang.binding.NotificationListener getDelegate();
-
- }
-}
}
}
+ augment "/config:modules/config:module/config:configuration" {
+ case binding-notification-broker {
+ when "/config:modules/config:module/config:type = 'binding-notification-broker'";
+ container notification-adapter {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity binding-new-notification-service;
+ }
+ }
+ }
+
+ container notification-publish-adapter {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity binding-new-notification-publish-service;
+ }
+ }
+ }
+ }
+ }
+
augment "/config:modules/config:module/config:state" {
case binding-notification-broker {
when "/config:modules/config:module/config:type = 'binding-notification-broker'";
+++ /dev/null
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_BAR_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.path;
-
-import com.google.common.base.Throwables;
-import java.util.Arrays;
-import javassist.ClassPool;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.md.sal.binding.compat.HeliumRpcProviderRegistry;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcProviderServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
-import org.opendaylight.controller.md.sal.binding.test.AbstractSchemaAwareTest;
-import org.opendaylight.controller.md.sal.dom.broker.impl.DOMRpcRouter;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
-import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.OpendaylightTestRpcServiceService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.OpendaylightTestRoutedRpcService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.TestContext;
-import org.opendaylight.yangtools.binding.data.codec.gen.impl.DataObjectSerializerGenerator;
-import org.opendaylight.yangtools.binding.data.codec.gen.impl.StreamWriterGenerator;
-import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
-import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-
-public class RpcProviderRegistryTest extends AbstractSchemaAwareTest {
-
- private static InstanceIdentifier<TopLevelList> FOO_PATH = path(TOP_FOO_KEY);
- private static InstanceIdentifier<TopLevelList> BAR_PATH = path(TOP_BAR_KEY);
- private static RpcContextIdentifier ROUTING_CONTEXT = RpcContextIdentifier.contextFor(OpendaylightTestRoutedRpcService.class, TestContext.class);
-
- private RpcProviderRegistry rpcRegistry;
-
-
- @Override
- protected Iterable<YangModuleInfo> getModuleInfos() {
- try {
- return Arrays.asList(
- BindingReflections.getModuleInfo(TopLevelList.class),
- BindingReflections.getModuleInfo(OpendaylightTestRoutedRpcService.class),
- BindingReflections.getModuleInfo(OpendaylightTestRpcServiceService.class));
- } catch (final Exception e) {
- throw Throwables.propagate(e);
- }
- }
-
- @Override
- protected void setupWithSchema(final SchemaContext context) {
- final DataObjectSerializerGenerator generator = StreamWriterGenerator.create(JavassistUtils.forClassPool(ClassPool.getDefault()));
- final BindingNormalizedNodeCodecRegistry codecRegistry = new BindingNormalizedNodeCodecRegistry(generator);
- final GeneratedClassLoadingStrategy classLoadingStrategy = GeneratedClassLoadingStrategy.getTCCLClassLoadingStrategy();
- final BindingToNormalizedNodeCodec codec = new BindingToNormalizedNodeCodec(classLoadingStrategy, codecRegistry);
- final DOMRpcRouter domRpcRegistry = new DOMRpcRouter();
- domRpcRegistry.onGlobalContextUpdated(context);
- codec.onGlobalContextUpdated(context);
- final RpcConsumerRegistry consumer = new BindingDOMRpcServiceAdapter(domRpcRegistry, codec);
- final BindingDOMRpcProviderServiceAdapter provider = new BindingDOMRpcProviderServiceAdapter( domRpcRegistry,codec);
- rpcRegistry = new HeliumRpcProviderRegistry(consumer,provider);
- }
-
- @Test
- public void testGlobalRpcRegistrations() throws Exception {
- final OpendaylightTestRpcServiceService one = Mockito.mock(OpendaylightTestRpcServiceService.class);
- final OpendaylightTestRpcServiceService two = Mockito.mock(OpendaylightTestRpcServiceService.class);
-
- final RpcRegistration<OpendaylightTestRpcServiceService> regOne = rpcRegistry.addRpcImplementation(OpendaylightTestRpcServiceService.class, one);
- assertNotNull(regOne);
- rpcRegistry.addRpcImplementation(OpendaylightTestRpcServiceService.class, two);
- regOne.close();
- final RpcRegistration<OpendaylightTestRpcServiceService> regTwo = rpcRegistry.addRpcImplementation(OpendaylightTestRpcServiceService.class, two);
- assertNotNull(regTwo);
- }
-
-
- @Test
- @Ignore
- public void nonRoutedRegisteredAsRouted() {
- final OpendaylightTestRpcServiceService one = Mockito.mock(OpendaylightTestRpcServiceService.class);
- try {
- final RoutedRpcRegistration<OpendaylightTestRpcServiceService> reg = rpcRegistry.addRoutedRpcImplementation(OpendaylightTestRpcServiceService.class, one);
- reg.registerPath(null, BAR_PATH);
- fail("RpcIsNotRoutedException should be thrown");
- } catch (final RpcIsNotRoutedException e) {
- assertNotNull(e.getMessage());
- } catch (final Exception e) {
- fail("RpcIsNotRoutedException should be thrown");
- }
-
- }
-
-}
import org.opendaylight.controller.md.sal.dom.broker.impl.DOMNotificationRouter;
import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
import org.opendaylight.controller.sal.binding.test.util.MockSchemaService;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
}
public NotificationService createNotificationService() {
- return new BindingDOMNotificationServiceAdapter(bindingToNormalized.getCodecRegistry(), domNotificationRouter,
- SingletonHolder.INVOKER_FACTORY);
+ return new BindingDOMNotificationServiceAdapter(bindingToNormalized.getCodecRegistry(), domNotificationRouter);
}
public NotificationPublishService createNotificationPublishService() {
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import java.util.ArrayList;
-import java.util.List;
-import javassist.ClassPool;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRoutingTable;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory.NotificationInvoker;
-import org.opendaylight.controller.sal.binding.test.mock.BarListener;
-import org.opendaylight.controller.sal.binding.test.mock.BarUpdate;
-import org.opendaylight.controller.sal.binding.test.mock.FlowDelete;
-import org.opendaylight.controller.sal.binding.test.mock.FooListener;
-import org.opendaylight.controller.sal.binding.test.mock.FooService;
-import org.opendaylight.controller.sal.binding.test.mock.FooUpdate;
-import org.opendaylight.controller.sal.binding.test.mock.ReferencableObject;
-import org.opendaylight.controller.sal.binding.test.mock.ReferencableObjectKey;
-import org.opendaylight.controller.sal.binding.test.mock.SimpleInput;
-import org.opendaylight.yangtools.yang.binding.Augmentation;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.DataContainer;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public class DefaultRuntimeCodeGeneratorTest {
-
- private RuntimeCodeGenerator codeGenerator;
- private NotificationInvokerFactory invokerFactory;
-
- @Before
- public void initialize() {
- this.codeGenerator = new DefaultRuntimeCodeGenerator(ClassPool.getDefault());
- this.invokerFactory = codeGenerator.getInvokerFactory();
- }
-
- @Test
- public void testGenerateDirectProxy() {
- FooService product = codeGenerator.getDirectProxyFor(FooService.class);
- assertNotNull(product);
- }
-
- @Test
- public void testGenerateRouter() throws Exception {
- RpcRouter<FooService> product = codeGenerator.getRouterFor(FooService.class,"test");
- assertNotNull(product);
- assertNotNull(product.getInvocationProxy());
-
- assertEquals("2 fields should be generated.", 2, product.getInvocationProxy().getClass().getFields().length);
-
- verifyRouting(product);
- }
-
- @Test
- public void testInvoker() throws Exception {
-
- FooListenerImpl fooListener = new FooListenerImpl();
-
- NotificationInvoker invokerFoo = invokerFactory.invokerFor(fooListener);
-
-
- assertSame(fooListener,invokerFoo.getDelegate());
- assertNotNull(invokerFoo.getSupportedNotifications());
- assertEquals(1, invokerFoo.getSupportedNotifications().size());
- assertNotNull(invokerFoo.getInvocationProxy());
-
- FooUpdateImpl fooOne = new FooUpdateImpl();
- invokerFoo.getInvocationProxy().onNotification(fooOne);
-
- assertEquals(1, fooListener.receivedFoos.size());
- assertSame(fooOne, fooListener.receivedFoos.get(0));
-
- CompositeListenerImpl composite = new CompositeListenerImpl();
-
- NotificationInvoker invokerComposite = invokerFactory.invokerFor(composite);
-
- assertNotNull(invokerComposite.getSupportedNotifications());
- assertEquals(3, invokerComposite.getSupportedNotifications().size());
- assertNotNull(invokerComposite.getInvocationProxy());
-
- invokerComposite.getInvocationProxy().onNotification(fooOne);
-
- assertEquals(1, composite.receivedFoos.size());
- assertSame(fooOne, composite.receivedFoos.get(0));
-
- assertEquals(0, composite.receivedBars.size());
-
- BarUpdateImpl barOne = new BarUpdateImpl();
-
- invokerComposite.getInvocationProxy().onNotification(barOne);
-
- assertEquals(1, composite.receivedFoos.size());
- assertEquals(1, composite.receivedBars.size());
- assertSame(barOne, composite.receivedBars.get(0));
-
- }
-
- private void verifyRouting(final RpcRouter<FooService> product) {
- assertNotNull("Routing table should be initialized", product.getRoutingTable(BaseIdentity.class));
-
- RpcRoutingTable<BaseIdentity, FooService> routingTable = product.getRoutingTable(BaseIdentity.class);
-
- int servicesCount = 2;
- int instancesPerService = 3;
-
- InstanceIdentifier<?>[][] identifiers = identifiers(servicesCount, instancesPerService);
- FooService service[] = new FooService[] { mock(FooService.class, "Instance 0"),
- mock(FooService.class, "Instance 1") };
-
- for (int i = 0; i < service.length; i++) {
- for (InstanceIdentifier<?> instance : identifiers[i]) {
- routingTable.updateRoute(instance, service[i]);
- }
- }
-
- assertEquals("All instances should be registered.", servicesCount * instancesPerService, routingTable
- .getRoutes().size());
-
- SimpleInput[] instance_0_input = new SimpleInputImpl[] { new SimpleInputImpl(identifiers[0][0]),
- new SimpleInputImpl(identifiers[0][1]), new SimpleInputImpl(identifiers[0][2]) };
-
- SimpleInput[] instance_1_input = new SimpleInputImpl[] { new SimpleInputImpl(identifiers[1][0]),
- new SimpleInputImpl(identifiers[1][1]), new SimpleInputImpl(identifiers[1][2]) };
-
- // We test sending mock messages
-
- product.getInvocationProxy().simple(instance_0_input[0]);
- verify(service[0]).simple(instance_0_input[0]);
-
- product.getInvocationProxy().simple(instance_0_input[1]);
- product.getInvocationProxy().simple(instance_0_input[2]);
-
- verify(service[0]).simple(instance_0_input[1]);
- verify(service[0]).simple(instance_0_input[2]);
-
- product.getInvocationProxy().simple(instance_1_input[0]);
-
- // We should have call to instance 1
- verify(service[1]).simple(instance_1_input[0]);
-
- /*
- * Generated RPC service should throw illegalArgumentException
- * with message if rpc input is null.
- */
- try {
- product.getInvocationProxy().simple(null);
- fail("Generated RPC router should throw IllegalArgumentException on null input");
- } catch (IllegalArgumentException e){
- assertNotNull(e.getMessage());
- }
-
-
- /*
- * Generated RPC service should throw illegalArgumentException
- * with message if rpc route is null.
- */
- try {
- SimpleInput withoutValue = new SimpleInputImpl(null);
- product.getInvocationProxy().simple(withoutValue);
- fail("Generated RPC router should throw IllegalArgumentException on null value for route");
- } catch (IllegalArgumentException e){
- assertNotNull(e.getMessage());
- }
-
- }
-
- private InstanceIdentifier<?>[][] identifiers(final int serviceSize, final int instancesPerService) {
- InstanceIdentifier<?>[][] ret = new InstanceIdentifier[serviceSize][];
- int service = 0;
- for (int i = 0; i < serviceSize; i++) {
-
- InstanceIdentifier<?>[] instanceIdentifiers = new InstanceIdentifier[instancesPerService];
- ret[i] = instanceIdentifiers;
- for (int id = 0; id < instancesPerService; id++) {
- instanceIdentifiers[id] = referencableIdentifier(service * instancesPerService + id);
- }
- service++;
- }
-
- return ret;
- }
-
- private InstanceIdentifier<?> referencableIdentifier(final int i) {
- return InstanceIdentifier.builder(ReferencableObject.class, new ReferencableObjectKey(i)).build();
- }
-
- private static class SimpleInputImpl implements SimpleInput {
- private final InstanceIdentifier<?> identifier;
-
- public SimpleInputImpl(final InstanceIdentifier<?> _identifier) {
- this.identifier = _identifier;
- }
-
- @Override
- public <E extends Augmentation<SimpleInput>> E getAugmentation(final Class<E> augmentationType) {
- return null;
- }
-
- @Override
- public InstanceIdentifier<?> getIdentifier() {
- return this.identifier;
- }
-
- @Override
- public Class<? extends DataObject> getImplementedInterface() {
- return SimpleInput.class;
- }
- }
-
- private static class FooUpdateImpl implements FooUpdate {
- @Override
- public Class<? extends DataContainer> getImplementedInterface() {
- return FooUpdate.class;
- }
- }
-
- private static class BarUpdateImpl implements BarUpdate {
- @Override
- public Class<? extends DataContainer> getImplementedInterface() {
- return BarUpdate.class;
- }
-
- @Override
- public InstanceIdentifier<?> getInheritedIdentifier() {
- return null;
- }
- }
-
- private static class FooListenerImpl implements FooListener {
-
- List<FooUpdate> receivedFoos = new ArrayList<>();
-
- @Override
- public void onFooUpdate(final FooUpdate notification) {
- receivedFoos.add(notification);
- }
-
- }
-
- private static class CompositeListenerImpl extends FooListenerImpl implements BarListener {
-
- List<BarUpdate> receivedBars = new ArrayList<>();
- List<FlowDelete> receivedDeletes = new ArrayList<>();
-
- @Override
- public void onBarUpdate(final BarUpdate notification) {
- receivedBars.add(notification);
- }
-
- @Override
- public void onFlowDelete(final FlowDelete notification) {
- receivedDeletes.add(notification);
- }
-
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import com.google.common.util.concurrent.ListeningExecutorService;
-import java.lang.reflect.Field;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Ignore
-public class SingletonHolderTest {
- private static final Logger logger = LoggerFactory.getLogger(SingletonHolderTest.class);
-
- @Test
- public void testNotificationExecutor() throws Exception {
- ListeningExecutorService executor = SingletonHolder.getDefaultNotificationExecutor();
- ThreadPoolExecutor tpExecutor = (ThreadPoolExecutor) setAccessible(executor.getClass().getDeclaredField("delegate")).get(executor);
- BlockingQueue<Runnable> queue = tpExecutor.getQueue();
-
- for (int idx = 0; idx < 100; idx++) {
- final int idx2 = idx;
- logger.info("Adding {}\t{}\t{}", idx, queue.size(), tpExecutor.getActiveCount());
- executor.execute(new Runnable() {
-
- @Override
- public void run() {
- logger.info("in {}", idx2);
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- logger.info("out {}", idx2);
- }
- });
- }
- executor.shutdown();
- executor.awaitTermination(10, TimeUnit.SECONDS);
- }
-
- private static Field setAccessible(Field field) {
- field.setAccessible(true);
- return field;
- }
-}
import javassist.ClassPool;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.MountPointService;
+import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
+import org.opendaylight.controller.md.sal.binding.api.NotificationService;
+import org.opendaylight.controller.md.sal.binding.compat.HeliumNotificationProviderServiceAdapter;
import org.opendaylight.controller.md.sal.binding.compat.HeliumRpcProviderRegistry;
import org.opendaylight.controller.md.sal.binding.compat.HydrogenDataBrokerAdapter;
import org.opendaylight.controller.md.sal.binding.compat.HydrogenMountProvisionServiceAdapter;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMDataBrokerAdapter;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMMountPointServiceAdapter;
+import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationPublishServiceAdapter;
+import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationServiceAdapter;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcProviderServiceAdapter;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcServiceAdapter;
import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.md.sal.dom.broker.impl.DOMNotificationRouter;
import org.opendaylight.controller.md.sal.dom.broker.impl.DOMRpcRouter;
import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
import org.opendaylight.controller.md.sal.dom.broker.impl.mount.DOMMountPointServiceImpl;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
import org.opendaylight.controller.sal.binding.api.mount.MountProviderService;
-import org.opendaylight.controller.sal.binding.impl.NotificationBrokerImpl;
import org.opendaylight.controller.sal.binding.impl.RootBindingAwareBroker;
import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
import org.opendaylight.controller.sal.core.api.BrokerService;
private RootBindingAwareBroker baBrokerImpl;
- private NotificationBrokerImpl baNotifyImpl;
+ private HeliumNotificationProviderServiceAdapter baNotifyImpl;
private BrokerImpl biBrokerImpl;
private BindingDOMRpcProviderServiceAdapter baProviderRpc;
private DOMRpcRouter domRouter;
+ private NotificationPublishService publishService;
+
+ private NotificationService listenService;
+
+ private DOMNotificationPublishService domPublishService;
+
+ private DOMNotificationService domListenService;
+
public DOMDataBroker getDomAsyncDataBroker() {
public void startBindingNotificationBroker() {
checkState(executor != null);
- baNotifyImpl = new NotificationBrokerImpl(executor);
+ final DOMNotificationRouter router = DOMNotificationRouter.create(16);
+ domPublishService = router;
+ domListenService = router;
+ publishService = new BindingDOMNotificationPublishServiceAdapter(codec, domPublishService);
+ listenService = new BindingDOMNotificationServiceAdapter(codec, domListenService);
+ baNotifyImpl = new HeliumNotificationProviderServiceAdapter(publishService,listenService);
}
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-notification-broker</type>
<name>binding-notification-broker</name>
+ <notification-adapter xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-new-notification-service</type>
+ <name>binding-notification-adapter</name>
+ </notification-adapter>
+ <notification-publish-adapter xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-new-notification-publish-service</type>
+ <name>binding-notification-publish-adapter</name>
+ </notification-publish-adapter>
</module>
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-broker-impl</type>
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-inmemory-data-broker</type>
<name>inmemory-data-broker</name>
+
<schema-service>
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
<name>yang-schema-service</name>
</schema-service>
+
+ <config-data-store>
+ <type xmlns:config-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:config-dom-store">config-dom-store-spi:config-dom-datastore</type>
+ <name>config-store-service</name>
+ </config-data-store>
+
+ <operational-data-store>
+ <type xmlns:operational-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:operational-dom-store">operational-dom-store-spi:operational-dom-datastore</type>
+ <name>operational-store-service</name>
+ </operational-data-store>
</module>
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-broker-impl</type>
input = new DataInputStream(stream);
}
- public NormalizedNodeInputStreamReader(DataInput input) throws IOException {
+ public NormalizedNodeInputStreamReader(DataInput input) {
this.input = Preconditions.checkNotNull(input);
}
return children;
}
- private PathArgument readPathArgument() throws IOException {
+ public PathArgument readPathArgument() throws IOException {
// read Type
int type = input.readByte();
@Override
public void close() throws IOException {
+ flush();
}
@Override
}
}
- private void writePathArgument(YangInstanceIdentifier.PathArgument pathArgument) throws IOException {
+ public void writePathArgument(YangInstanceIdentifier.PathArgument pathArgument) throws IOException {
byte type = PathArgumentTypes.getSerializablePathArgumentType(pathArgument);
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import static com.google.common.base.Preconditions.checkState;
+import com.google.common.collect.ImmutableMap;
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class AbstractDOMBroker extends AbstractDOMTransactionFactory<DOMStore>
+ implements DOMDataBroker, AutoCloseable {
+
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBroker.class);
+
+ private final AtomicLong txNum = new AtomicLong();
+ private final AtomicLong chainNum = new AtomicLong();
+ private final Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> extensions;
+ private volatile AutoCloseable closeable;
+
+ protected AbstractDOMBroker(final Map<LogicalDatastoreType, DOMStore> datastores) {
+ super(datastores);
+
+ boolean treeChange = true;
+ for (DOMStore ds : datastores.values()) {
+ if (!(ds instanceof DOMStoreTreeChangePublisher)) {
+ treeChange = false;
+ break;
+ }
+ }
+
+ if (treeChange) {
+ extensions = ImmutableMap.<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension>of(DOMDataTreeChangeService.class, new DOMDataTreeChangeService() {
+ @Override
+ public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(final DOMDataTreeIdentifier treeId, final L listener) {
+ DOMStore publisher = getTxFactories().get(treeId.getDatastoreType());
+ checkState(publisher != null, "Requested logical data store is not available.");
+
+ return ((DOMStoreTreeChangePublisher) publisher).registerTreeChangeListener(treeId.getRootIdentifier(), listener);
+ }
+ });
+ } else {
+ extensions = Collections.emptyMap();
+ }
+ }
+
+ public void setCloseable(final AutoCloseable closeable) {
+ this.closeable = closeable;
+ }
+
+ @Override
+ public void close() {
+ super.close();
+
+ if (closeable != null) {
+ try {
+ closeable.close();
+ } catch (Exception e) {
+ LOG.debug("Error closing instance", e);
+ }
+ }
+ }
+
+ @Override
+ protected Object newTransactionIdentifier() {
+ return "DOM-" + txNum.getAndIncrement();
+ }
+
+ @Override
+ public ListenerRegistration<DOMDataChangeListener> registerDataChangeListener(final LogicalDatastoreType store,
+ final YangInstanceIdentifier path, final DOMDataChangeListener listener, final DataChangeScope triggeringScope) {
+
+ DOMStore potentialStore = getTxFactories().get(store);
+ checkState(potentialStore != null, "Requested logical data store is not available.");
+ return potentialStore.registerChangeListener(path, listener, triggeringScope);
+ }
+
+ @Override
+ public Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> getSupportedExtensions() {
+ return extensions;
+ }
+
+ @Override
+ public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
+ checkNotClosed();
+
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = new EnumMap<>(LogicalDatastoreType.class);
+ for (Map.Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
+ backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
+ }
+
+ final long chainId = chainNum.getAndIncrement();
+ LOG.debug("Transaction chain {} created with listener {}, backing store chains {}", chainId, listener,
+ backingChains);
+ return new DOMBrokerTransactionChain(chainId, backingChains, this, listener);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+public abstract class AbstractDOMBrokerTransaction<K, T extends DOMStoreTransaction> implements
+ AsyncTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
+
+ private Map<K, T> backingTxs;
+ private final Object identifier;
+ private final Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories;
+
+ /**
+ *
+ * Creates new composite Transactions.
+ *
+ * @param identifier
+ * Identifier of transaction.
+ */
+ protected AbstractDOMBrokerTransaction(final Object identifier, Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories) {
+ this.identifier = Preconditions.checkNotNull(identifier, "Identifier should not be null");
+ this.storeTxFactories = Preconditions.checkNotNull(storeTxFactories, "Store Transaction Factories should not be null");
+ this.backingTxs = new EnumMap(LogicalDatastoreType.class);
+ }
+
+ /**
+ * Returns subtransaction associated with supplied key.
+ *
+ * @param key
+ * @return
+ * @throws NullPointerException
+ * if key is null
+ * @throws IllegalArgumentException
+ * if no subtransaction is associated with key.
+ */
+ protected final T getSubtransaction(final K key) {
+ Preconditions.checkNotNull(key, "key must not be null.");
+
+ T ret = backingTxs.get(key);
+ if(ret == null){
+ ret = createTransaction(key);
+ backingTxs.put(key, ret);
+ }
+ Preconditions.checkArgument(ret != null, "No subtransaction associated with %s", key);
+ return ret;
+ }
+
+ protected abstract T createTransaction(final K key);
+
+ /**
+ * Returns immutable Iterable of all subtransactions.
+ *
+ */
+ protected Collection<T> getSubtransactions() {
+ return backingTxs.values();
+ }
+
+ @Override
+ public Object getIdentifier() {
+ return identifier;
+ }
+
+ protected void closeSubtransactions() {
+ /*
+ * We share one exception for all failures, which are added
+ * as supressedExceptions to it.
+ */
+ IllegalStateException failure = null;
+ for (T subtransaction : backingTxs.values()) {
+ try {
+ subtransaction.close();
+ } catch (Exception e) {
+ // If we did not allocated failure we allocate it
+ if (failure == null) {
+ failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
+ } else {
+ // We update it with additional exceptions, which occurred during error.
+ failure.addSuppressed(e);
+ }
+ }
+ }
+ // If we have failure, we throw it at after all attempts to close.
+ if (failure != null) {
+ throw failure;
+ }
+ }
+
+ protected DOMStoreTransactionFactory getTxFactory(K type){
+ return storeTxFactories.get(type);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
+
+public abstract class AbstractDOMTransactionFactory<T extends DOMStoreTransactionFactory> implements AutoCloseable {
+ private static final AtomicIntegerFieldUpdater<AbstractDOMTransactionFactory> UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(AbstractDOMTransactionFactory.class, "closed");
+ private final Map<LogicalDatastoreType, T> storeTxFactories;
+ private volatile int closed = 0;
+
+ protected AbstractDOMTransactionFactory(final Map<LogicalDatastoreType, T> txFactories) {
+ this.storeTxFactories = new EnumMap<>(txFactories);
+ }
+
+ /**
+ * Implementations must return unique identifier for each and every call of
+ * this method;
+ *
+ * @return new Unique transaction identifier.
+ */
+ protected abstract Object newTransactionIdentifier();
+
+ /**
+ *
+ * @param transaction
+ * @param cohorts
+ * @return
+ */
+ protected abstract CheckedFuture<Void,TransactionCommitFailedException> submit(final DOMDataWriteTransaction transaction,
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts);
+
+ /**
+ *
+ * @return
+ */
+ public final DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+ checkNotClosed();
+
+ return new DOMBrokerReadOnlyTransaction(newTransactionIdentifier(), storeTxFactories);
+ }
+
+
+ /**
+ *
+ * @return
+ */
+ public final DOMDataWriteTransaction newWriteOnlyTransaction() {
+ checkNotClosed();
+
+ return new DOMBrokerWriteOnlyTransaction(newTransactionIdentifier(), storeTxFactories, this);
+ }
+
+
+ /**
+ *
+ * @return
+ */
+ public final DOMDataReadWriteTransaction newReadWriteTransaction() {
+ checkNotClosed();
+
+ return new DOMBrokerReadWriteTransaction<>(newTransactionIdentifier(), storeTxFactories, this);
+ }
+
+ /**
+ * Convenience accessor of backing factories intended to be used only by
+ * finalization of this class.
+ *
+ * <b>Note:</b>
+ * Finalization of this class may want to access other functionality of
+ * supplied Transaction factories.
+ *
+ * @return Map of backing transaction factories.
+ */
+ protected final Map<LogicalDatastoreType, T> getTxFactories() {
+ return storeTxFactories;
+ }
+
+ /**
+ * Checks if instance is not closed.
+ *
+ * @throws IllegalStateException If instance of this class was closed.
+ *
+ */
+ protected final void checkNotClosed() {
+ Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
+ }
+
+ @Override
+ public void close() {
+ final boolean success = UPDATER.compareAndSet(this, 0, 1);
+ Preconditions.checkState(success, "Transaction factory was already closed");
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+public class DOMBrokerReadOnlyTransaction<T extends DOMStoreReadTransaction>
+ extends AbstractDOMBrokerTransaction<LogicalDatastoreType, T>
+ implements DOMDataReadOnlyTransaction {
+ /**
+ * Creates new composite Transactions.
+ *
+ * @param identifier Identifier of transaction.
+ */
+ protected DOMBrokerReadOnlyTransaction(Object identifier, Map storeTxFactories) {
+ super(identifier, storeTxFactories);
+ }
+
+ @Override
+ public CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(
+ final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ return getSubtransaction(store).read(path);
+ }
+
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
+ return getSubtransaction(store).exists(path);
+ }
+
+ @Override
+ public void close() {
+ closeSubtransactions();
+ }
+
+ @Override
+ protected T createTransaction(LogicalDatastoreType key) {
+ return (T) getTxFactory(key).newReadOnlyTransaction();
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+public class DOMBrokerReadWriteTransaction<T extends DOMStoreReadWriteTransaction>
+ extends DOMBrokerWriteOnlyTransaction<DOMStoreReadWriteTransaction> implements DOMDataReadWriteTransaction {
+ /**
+ * Creates new composite Transactions.
+ *
+ * @param identifier Identifier of transaction.
+ * @param storeTxFactories
+ */
+ protected DOMBrokerReadWriteTransaction(Object identifier, Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories, final AbstractDOMTransactionFactory<?> commitImpl) {
+ super(identifier, storeTxFactories, commitImpl);
+ }
+
+ @Override
+ public CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(
+ final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ return getSubtransaction(store).read(path);
+ }
+
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
+ return getSubtransaction(store).exists(path);
+ }
+
+ @Override
+ protected DOMStoreReadWriteTransaction createTransaction(LogicalDatastoreType key) {
+ return getTxFactory(key).newReadWriteTransaction();
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DOMBrokerTransactionChain extends AbstractDOMTransactionFactory<DOMStoreTransactionChain>
+ implements DOMTransactionChain {
+ private static enum State {
+ RUNNING,
+ CLOSING,
+ CLOSED,
+ FAILED,
+ }
+
+ private static final AtomicIntegerFieldUpdater<DOMBrokerTransactionChain> COUNTER_UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, "counter");
+ private static final AtomicReferenceFieldUpdater<DOMBrokerTransactionChain, State> STATE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, State.class, "state");
+ private static final Logger LOG = LoggerFactory.getLogger(DOMBrokerTransactionChain.class);
+ private final AtomicLong txNum = new AtomicLong();
+ private final AbstractDOMBroker broker;
+ private final TransactionChainListener listener;
+ private final long chainId;
+
+ private volatile State state = State.RUNNING;
+ private volatile int counter = 0;
+
+ /**
+ *
+ * @param chainId
+ * ID of transaction chain
+ * @param chains
+ * Backing {@link DOMStoreTransactionChain}s.
+ * @param listener
+ * Listener, which listens on transaction chain events.
+ * @throws NullPointerException
+ * If any of arguments is null.
+ */
+ public DOMBrokerTransactionChain(final long chainId,
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
+ AbstractDOMBroker broker, final TransactionChainListener listener) {
+ super(chains);
+ this.chainId = chainId;
+ this.broker = Preconditions.checkNotNull(broker);
+ this.listener = Preconditions.checkNotNull(listener);
+ }
+
+ private void checkNotFailed() {
+ Preconditions.checkState(state != State.FAILED, "Transaction chain has failed");
+ }
+
+ @Override
+ protected Object newTransactionIdentifier() {
+ return "DOM-CHAIN-" + chainId + "-" + txNum.getAndIncrement();
+ }
+
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit(
+ final DOMDataWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
+ checkNotFailed();
+ checkNotClosed();
+
+ final CheckedFuture<Void, TransactionCommitFailedException> ret = broker.submit(transaction, cohorts);
+
+ COUNTER_UPDATER.incrementAndGet(this);
+ Futures.addCallback(ret, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ transactionCompleted();
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ transactionFailed(transaction, t);
+ }
+ });
+
+ return ret;
+ }
+
+ @Override
+ public void close() {
+ final boolean success = STATE_UPDATER.compareAndSet(this, State.RUNNING, State.CLOSING);
+ if (!success) {
+ LOG.debug("Chain {} is no longer running", this);
+ return;
+ }
+
+ super.close();
+ for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
+ subChain.close();
+ }
+
+ if (counter == 0) {
+ finishClose();
+ }
+ }
+
+ private void finishClose() {
+ state = State.CLOSED;
+ listener.onTransactionChainSuccessful(this);
+ }
+
+ private void transactionCompleted() {
+ if (COUNTER_UPDATER.decrementAndGet(this) == 0 && state == State.CLOSING) {
+ finishClose();
+ }
+ }
+
+ private void transactionFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
+ state = State.FAILED;
+ LOG.debug("Transaction chain {}Â failed.", this, cause);
+ listener.onTransactionChainFailed(this, tx, cause);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class DOMBrokerWriteOnlyTransaction<T extends DOMStoreWriteTransaction>
+ extends AbstractDOMBrokerTransaction<LogicalDatastoreType, T> implements DOMDataWriteTransaction {
+
+ private static final AtomicReferenceFieldUpdater<DOMBrokerWriteOnlyTransaction, AbstractDOMTransactionFactory> IMPL_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMBrokerWriteOnlyTransaction.class, AbstractDOMTransactionFactory.class, "commitImpl");
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<DOMBrokerWriteOnlyTransaction, Future> FUTURE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMBrokerWriteOnlyTransaction.class, Future.class, "commitFuture");
+ private static final Logger LOG = LoggerFactory.getLogger(DOMBrokerWriteOnlyTransaction.class);
+ private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
+
+ /**
+ * Implementation of real commit. It also acts as an indication that
+ * the transaction is running -- which we flip atomically using
+ * {@link #IMPL_UPDATER}.
+ */
+ private volatile AbstractDOMTransactionFactory<?> commitImpl;
+
+ /**
+ * Future task of transaction commit. It starts off as null, but is
+ * set appropriately on {@link #submit()} and {@link #cancel()} via
+ * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
+ *
+ * Lazy set is safe for use because it is only referenced to in the
+ * {@link #cancel()} slow path, where we will busy-wait for it. The
+ * fast path gets the benefit of a store-store barrier instead of the
+ * usual store-load barrier.
+ */
+ private volatile Future<?> commitFuture;
+
+ protected DOMBrokerWriteOnlyTransaction(final Object identifier,
+ Map storeTxFactories, final AbstractDOMTransactionFactory<?> commitImpl) {
+ super(identifier, storeTxFactories);
+ this.commitImpl = Preconditions.checkNotNull(commitImpl, "commitImpl must not be null.");
+ }
+
+ @Override
+ protected T createTransaction(LogicalDatastoreType key) {
+ // FIXME : Casting shouldn't be necessary here
+ return (T) getTxFactory(key).newWriteOnlyTransaction();
+ }
+
+ @Override
+ public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ checkRunning(commitImpl);
+ getSubtransaction(store).write(path, data);
+ }
+
+ @Override
+ public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ checkRunning(commitImpl);
+ getSubtransaction(store).delete(path);
+ }
+
+ @Override
+ public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ checkRunning(commitImpl);
+ getSubtransaction(store).merge(path, data);
+ }
+
+ @Override
+ public boolean cancel() {
+ final AbstractDOMTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
+ if (impl != null) {
+ LOG.trace("Transaction {} cancelled before submit", getIdentifier());
+ FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
+ closeSubtransactions();
+ return true;
+ }
+
+ // The transaction is in process of being submitted or cancelled. Busy-wait
+ // for the corresponding future.
+ Future<?> future;
+ do {
+ future = commitFuture;
+ } while (future == null);
+
+ return future.cancel(false);
+ }
+
+ @Deprecated
+ @Override
+ public ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ return AbstractDataTransaction.convertToLegacyCommitFuture(submit());
+ }
+
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ final AbstractDOMTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
+ checkRunning(impl);
+
+ final Collection<T> txns = getSubtransactions();
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
+
+ // FIXME: deal with errors thrown by backed (ready and submit can fail in theory)
+ for (DOMStoreWriteTransaction txn : txns) {
+ cohorts.add(txn.ready());
+ }
+
+ final CheckedFuture<Void, TransactionCommitFailedException> ret = impl.submit(this, cohorts);
+ FUTURE_UPDATER.lazySet(this, ret);
+ return ret;
+ }
+
+ private void checkRunning(final AbstractDOMTransactionFactory<?> impl) {
+ Preconditions.checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+
+/**
+ * Abstract base class for our internal implementation of {@link DataTreeCandidateNode},
+ * which we instantiate from a serialized stream. We do not retain the before-image and
+ * do not implement {@link #getModifiedChild(PathArgument)}, as that method is only
+ * useful for end users. Instances based on this class should never be leaked outside of
+ * this component.
+ */
+abstract class AbstractDataTreeCandidateNode implements DataTreeCandidateNode {
+ private final ModificationType type;
+
+ protected AbstractDataTreeCandidateNode(final ModificationType type) {
+ this.type = Preconditions.checkNotNull(type);
+ }
+
+ @Override
+ public final DataTreeCandidateNode getModifiedChild(final PathArgument identifier) {
+ throw new UnsupportedOperationException("Not implemented");
+ }
+
+ @Override
+ public final ModificationType getModificationType() {
+ return type;
+ }
+
+ @Override
+ public final Optional<NormalizedNode<?, ?>> getDataBefore() {
+ throw new UnsupportedOperationException("Before-image not available after serialization");
+ }
+
+ static DataTreeCandidateNode createUnmodified() {
+ return new AbstractDataTreeCandidateNode(ModificationType.UNMODIFIED) {
+ @Override
+ public PathArgument getIdentifier() {
+ throw new UnsupportedOperationException("Root node does not have an identifier");
+ }
+
+ @Override
+ public Optional<NormalizedNode<?, ?>> getDataAfter() {
+ throw new UnsupportedOperationException("After-image not available after serialization");
+ }
+
+ @Override
+ public Collection<DataTreeCandidateNode> getChildNodes() {
+ throw new UnsupportedOperationException("Children not available after serialization");
+ }
+ };
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Preconditions;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+
+/**
+ * Abstract base for transactions running on SharrdDataTree.
+ *
+ * @param <T> Backing transaction type.
+ */
+@NotThreadSafe
+abstract class AbstractShardDataTreeTransaction<T extends DataTreeSnapshot> {
+ private final T snapshot;
+ private final String id;
+ private boolean closed;
+
+ protected AbstractShardDataTreeTransaction(final String id, final T snapshot) {
+ this.snapshot = Preconditions.checkNotNull(snapshot);
+ this.id = Preconditions.checkNotNull(id);
+ }
+
+ final T getSnapshot() {
+ return snapshot;
+ }
+
+ final boolean isClosed() {
+ return closed;
+ }
+
+ /**
+ * Close this transaction and mark it as closed, allowing idempotent invocations.
+ *
+ * @return True if the transaction got closed by this method invocation.
+ */
+ protected final boolean close() {
+ if (closed) {
+ return false;
+ }
+
+ closed = true;
+ return true;
+ }
+
+ @Override
+ public final String toString() {
+ return MoreObjects.toStringHelper(this).add("id", id).add("closed", closed).add("snapshot", snapshot).toString();
+ }
+
+ abstract void abort();
+}
*/
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorSelection;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import java.util.List;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import scala.concurrent.Future;
* implementation. In addition to the usual set of methods it also contains the list of actor
* futures.
*/
-abstract class AbstractThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
- abstract List<Future<ActorSelection>> getCohortFutures();
+public abstract class AbstractThreePhaseCommitCohort<T> implements DOMStoreThreePhaseCommitCohort {
+ protected static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS = Futures.immediateFuture(null);
+ protected static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS = Futures.immediateFuture(Boolean.TRUE);
+
+ abstract List<Future<T>> getCohortFutures();
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class ChainedCommitCohort extends ShardDataTreeCohort {
+ private static final Logger LOG = LoggerFactory.getLogger(ChainedCommitCohort.class);
+ private final ReadWriteShardDataTreeTransaction transaction;
+ private final ShardDataTreeTransactionChain chain;
+ private final ShardDataTreeCohort delegate;
+
+ ChainedCommitCohort(final ShardDataTreeTransactionChain chain, final ReadWriteShardDataTreeTransaction transaction, final ShardDataTreeCohort delegate) {
+ this.transaction = Preconditions.checkNotNull(transaction);
+ this.delegate = Preconditions.checkNotNull(delegate);
+ this.chain = Preconditions.checkNotNull(chain);
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ final ListenableFuture<Void> ret = delegate.commit();
+
+ Futures.addCallback(ret, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(Void result) {
+ chain.clearTransaction(transaction);
+ LOG.debug("Committed transaction {}", transaction);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error("Transaction {} commit failed, cannot recover", transaction, t);
+ }
+ });
+
+ return ret;
+ }
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ return delegate.canCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ return delegate.preCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ return delegate.abort();
+ }
+
+ @Override
+ DataTreeCandidateTip getCandidate() {
+ return delegate.getCandidate();
+ }
+}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorSelection;
import akka.dispatch.OnComplete;
import java.util.List;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Stores the ready Futures from the previous Tx in the chain.
*/
- private final List<Future<ActorSelection>> previousReadyFutures;
+ private final List<Future<Object>> previousReadyFutures;
/**
* Stores the ready Futures from this transaction when it is readied.
*/
- private volatile List<Future<ActorSelection>> readyFutures;
+ private volatile List<Future<Object>> readyFutures;
ChainedTransactionProxy(ActorContext actorContext, TransactionType transactionType,
- String transactionChainId, List<Future<ActorSelection>> previousReadyFutures) {
+ String transactionChainId, List<Future<Object>> previousReadyFutures) {
super(actorContext, transactionType, transactionChainId);
this.previousReadyFutures = previousReadyFutures;
}
- List<Future<ActorSelection>> getReadyFutures() {
+ List<Future<Object>> getReadyFutures() {
return readyFutures;
}
return readyFutures != null;
}
+ @SuppressWarnings({ "unchecked", "rawtypes" })
@Override
- public AbstractThreePhaseCommitCohort ready() {
- final AbstractThreePhaseCommitCohort ret = super.ready();
- readyFutures = ret.getCohortFutures();
+ public AbstractThreePhaseCommitCohort<?> ready() {
+ final AbstractThreePhaseCommitCohort<?> ret = super.ready();
+ readyFutures = (List)ret.getCohortFutures();
LOG.debug("onTransactionReady {} pending readyFutures size {} chain {}", getIdentifier(),
readyFutures.size(), getTransactionChainId());
return ret;
* previous Tx's ready operations haven't completed yet.
*/
@Override
- protected Future<ActorSelection> sendFindPrimaryShardAsync(final String shardName) {
+ protected Future<PrimaryShardInfo> sendFindPrimaryShardAsync(final String shardName) {
// Check if there are any previous ready Futures, otherwise let the super class handle it.
if(previousReadyFutures.isEmpty()) {
return super.sendFindPrimaryShardAsync(shardName);
}
// Combine the ready Futures into 1.
- Future<Iterable<ActorSelection>> combinedFutures = akka.dispatch.Futures.sequence(
+ Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
previousReadyFutures, getActorContext().getClientDispatcher());
// Add a callback for completion of the combined Futures.
- final Promise<ActorSelection> returnPromise = akka.dispatch.Futures.promise();
- OnComplete<Iterable<ActorSelection>> onComplete = new OnComplete<Iterable<ActorSelection>>() {
+ final Promise<PrimaryShardInfo> returnPromise = akka.dispatch.Futures.promise();
+ OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
@Override
- public void onComplete(Throwable failure, Iterable<ActorSelection> notUsed) {
+ public void onComplete(Throwable failure, Iterable<Object> notUsed) {
if(failure != null) {
// A Ready Future failed so fail the returned Promise.
returnPromise.failure(failure);
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.cluster.databroker.AbstractDOMBroker;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.broker.impl.AbstractDOMDataBroker;
import org.opendaylight.controller.md.sal.dom.broker.impl.TransactionCommitFailedExceptionMapper;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.slf4j.LoggerFactory;
/**
- * Implementation of DOMDataCommitExecutor that coordinates transaction commits concurrently. The 3
+ * ConcurrentDOMDataBroker commits transactions concurrently. The 3
* commit phases (canCommit, preCommit, and commit) are performed serially and non-blocking
* (ie async) per transaction but multiple transaction commits can run concurrent.
*
* @author Thomas Pantelis
*/
-public class ConcurrentDOMDataBroker extends AbstractDOMDataBroker {
+public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
private static final Logger LOG = LoggerFactory.getLogger(ConcurrentDOMDataBroker.class);
private static final String CAN_COMMIT = "CAN_COMMIT";
private static final String PRE_COMMIT = "PRE_COMMIT";
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import java.util.HashMap;
-import java.util.Map;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
-import org.slf4j.Logger;
-
-/**
- * A factory for creating DOM transactions, either normal or chained.
- *
- * @author Thomas Pantelis
- */
-public class DOMTransactionFactory {
-
- private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
- private final InMemoryDOMDataStore store;
- private final ShardStats shardMBean;
- private final Logger log;
- private final String name;
-
- public DOMTransactionFactory(InMemoryDOMDataStore store, ShardStats shardMBean, Logger log, String name) {
- this.store = store;
- this.shardMBean = shardMBean;
- this.log = log;
- this.name = name;
- }
-
- @SuppressWarnings("unchecked")
- public <T extends DOMStoreTransaction> T newTransaction(TransactionProxy.TransactionType type,
- String transactionID, String transactionChainID) {
-
- DOMStoreTransactionFactory factory = store;
-
- if(!transactionChainID.isEmpty()) {
- factory = transactionChains.get(transactionChainID);
- if(factory == null) {
- if(log.isDebugEnabled()) {
- log.debug("{}: Creating transaction with ID {} from chain {}", name, transactionID,
- transactionChainID);
- }
-
- DOMStoreTransactionChain transactionChain = store.createTransactionChain();
- transactionChains.put(transactionChainID, transactionChain);
- factory = transactionChain;
- }
- } else {
- log.debug("{}: Creating transaction with ID {}", name, transactionID);
- }
-
- T transaction = null;
- switch(type) {
- case READ_ONLY:
- transaction = (T) factory.newReadOnlyTransaction();
- shardMBean.incrementReadOnlyTransactionCount();
- break;
- case READ_WRITE:
- transaction = (T) factory.newReadWriteTransaction();
- shardMBean.incrementReadWriteTransactionCount();
- break;
- case WRITE_ONLY:
- transaction = (T) factory.newWriteOnlyTransaction();
- shardMBean.incrementWriteOnlyTransactionCount();
- break;
- }
-
- return transaction;
- }
-
- public void closeTransactionChain(String transactionChainID) {
- DOMStoreTransactionChain chain =
- transactionChains.remove(transactionChainID);
-
- if(chain != null) {
- chain.close();
- }
- }
-
- public void closeAllTransactionChains() {
- for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
- entry.getValue().close();
- }
-
- transactionChains.clear();
- }
-}
*/
package org.opendaylight.controller.cluster.datastore;
-import java.util.ArrayList;
-import java.util.List;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-final class DataChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterChangeListener, ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>> {
+final class DataChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterChangeListener, ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>, DOMImmutableDataChangeEvent> {
private static final Logger LOG = LoggerFactory.getLogger(DataChangeListenerSupport.class);
private final List<DelayedListenerRegistration> delayedListenerRegistrations = new ArrayList<>();
private final List<ActorSelection> dataChangeListeners = new ArrayList<>();
if (isLeader) {
for (DelayedListenerRegistration reg: delayedListenerRegistrations) {
if(!reg.isClosed()) {
- reg.setDelegate(createDelegate(reg.getRegisterChangeListener()));
+ final Entry<ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>, DOMImmutableDataChangeEvent> res =
+ createDelegate(reg.getRegisterChangeListener());
+ reg.setDelegate(res.getKey());
+ if (res.getValue() != null) {
+ reg.getInstance().onDataChanged(res.getValue());
+ }
}
}
LOG.debug("{}: registerDataChangeListener for {}, leader: {}", persistenceId(), message.getPath(), isLeader);
- ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
NormalizedNode<?, ?>>> registration;
+ final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> event;
if (isLeader) {
- registration = createDelegate(message);
+ final Entry<ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>, DOMImmutableDataChangeEvent> res =
+ createDelegate(message);
+ registration = res.getKey();
+ event = res.getValue();
} else {
LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
DelayedListenerRegistration delayedReg = new DelayedListenerRegistration(message);
delayedListenerRegistrations.add(delayedReg);
registration = delayedReg;
+ event = null;
}
ActorRef listenerRegistration = createActor(DataChangeListenerRegistration.props(registration));
persistenceId(), listenerRegistration.path());
tellSender(new RegisterChangeListenerReply(listenerRegistration));
+ if (event != null) {
+ registration.getInstance().onDataChanged(event);
+ }
}
@Override
- ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> createDelegate(
+ Entry<ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>, DOMImmutableDataChangeEvent> createDelegate(
final RegisterChangeListener message) {
ActorSelection dataChangeListenerPath = selectActor(message.getDataChangeListenerPath());
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.io.ByteArrayDataInput;
+import com.google.common.io.ByteArrayDataOutput;
+import com.google.common.io.ByteStreams;
+import com.google.protobuf.GeneratedMessage.GeneratedExtension;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputStreamReader;
+import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeOutputStreamWriter;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages.AppendEntries;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNodes;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DataTreeCandidatePayload extends Payload implements Externalizable {
+ private static final Logger LOG = LoggerFactory.getLogger(DataTreeCandidatePayload.class);
+ private static final long serialVersionUID = 1L;
+ private static final byte DELETE = 0;
+ private static final byte SUBTREE_MODIFIED = 1;
+ private static final byte UNMODIFIED = 2;
+ private static final byte WRITE = 3;
+
+ private transient byte[] serialized;
+
+ public DataTreeCandidatePayload() {
+ // Required by Externalizable
+ }
+
+ private DataTreeCandidatePayload(final byte[] serialized) {
+ this.serialized = Preconditions.checkNotNull(serialized);
+ }
+
+ private static void writeChildren(final NormalizedNodeOutputStreamWriter writer, final DataOutput out,
+ final Collection<DataTreeCandidateNode> children) throws IOException {
+ out.writeInt(children.size());
+ for (DataTreeCandidateNode child : children) {
+ writeNode(writer, out, child);
+ }
+ }
+
+ private static void writeNode(final NormalizedNodeOutputStreamWriter writer, final DataOutput out,
+ final DataTreeCandidateNode node) throws IOException {
+ switch (node.getModificationType()) {
+ case DELETE:
+ out.writeByte(DELETE);
+ writer.writePathArgument(node.getIdentifier());
+ break;
+ case SUBTREE_MODIFIED:
+ out.writeByte(SUBTREE_MODIFIED);
+ writer.writePathArgument(node.getIdentifier());
+ writeChildren(writer, out, node.getChildNodes());
+ break;
+ case WRITE:
+ out.writeByte(WRITE);
+ writer.writeNormalizedNode(node.getDataAfter().get());
+ break;
+ case UNMODIFIED:
+ throw new IllegalArgumentException("Unmodified candidate should never be in the payload");
+ default:
+ throw new IllegalArgumentException("Unhandled node type " + node.getModificationType());
+ }
+ }
+
+ static DataTreeCandidatePayload create(DataTreeCandidate candidate) {
+ final ByteArrayDataOutput out = ByteStreams.newDataOutput();
+ try (final NormalizedNodeOutputStreamWriter writer = new NormalizedNodeOutputStreamWriter(out)) {
+ writer.writeYangInstanceIdentifier(candidate.getRootPath());
+
+ final DataTreeCandidateNode node = candidate.getRootNode();
+ switch (node.getModificationType()) {
+ case DELETE:
+ out.writeByte(DELETE);
+ break;
+ case SUBTREE_MODIFIED:
+ out.writeByte(SUBTREE_MODIFIED);
+ writeChildren(writer, out, node.getChildNodes());
+ break;
+ case UNMODIFIED:
+ out.writeByte(UNMODIFIED);
+ break;
+ case WRITE:
+ out.writeByte(WRITE);
+ writer.writeNormalizedNode(node.getDataAfter().get());
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled node type " + node.getModificationType());
+ }
+
+ writer.close();
+ } catch (IOException e) {
+ throw new IllegalArgumentException(String.format("Failed to serialize candidate %s", candidate), e);
+ }
+
+ return new DataTreeCandidatePayload(out.toByteArray());
+ }
+
+ private static Collection<DataTreeCandidateNode> readChildren(final NormalizedNodeInputStreamReader reader,
+ final DataInput in) throws IOException {
+ final int size = in.readInt();
+ if (size != 0) {
+ final Collection<DataTreeCandidateNode> ret = new ArrayList<>(size);
+ for (int i = 0; i < size; ++i) {
+ final DataTreeCandidateNode child = readNode(reader, in);
+ if (child != null) {
+ ret.add(child);
+ }
+ }
+ return ret;
+ } else {
+ return Collections.emptyList();
+ }
+ }
+
+ private static DataTreeCandidateNode readNode(final NormalizedNodeInputStreamReader reader,
+ final DataInput in) throws IOException {
+ final byte type = in.readByte();
+ switch (type) {
+ case DELETE:
+ return DeletedDataTreeCandidateNode.create(reader.readPathArgument());
+ case SUBTREE_MODIFIED:
+ final PathArgument identifier = reader.readPathArgument();
+ final Collection<DataTreeCandidateNode> children = readChildren(reader, in);
+ if (children.isEmpty()) {
+ LOG.debug("Modified node {} does not have any children, not instantiating it", identifier);
+ return null;
+ } else {
+ return ModifiedDataTreeCandidateNode.create(identifier, children);
+ }
+ case UNMODIFIED:
+ return null;
+ case WRITE:
+ return DataTreeCandidateNodes.fromNormalizedNode(reader.readNormalizedNode());
+ default:
+ throw new IllegalArgumentException("Unhandled node type " + type);
+ }
+ }
+
+ private static DataTreeCandidate parseCandidate(final ByteArrayDataInput in) throws IOException {
+ final NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(in);
+ final YangInstanceIdentifier rootPath = reader.readYangInstanceIdentifier();
+ final byte type = in.readByte();
+
+ final DataTreeCandidateNode rootNode;
+ switch (type) {
+ case DELETE:
+ rootNode = DeletedDataTreeCandidateNode.create();
+ break;
+ case SUBTREE_MODIFIED:
+ rootNode = ModifiedDataTreeCandidateNode.create(readChildren(reader, in));
+ break;
+ case WRITE:
+ rootNode = DataTreeCandidateNodes.fromNormalizedNode(reader.readNormalizedNode());
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled node type " + type);
+ }
+
+ return DataTreeCandidates.newDataTreeCandidate(rootPath, rootNode);
+ }
+
+ DataTreeCandidate getCandidate() throws IOException {
+ return parseCandidate(ByteStreams.newDataInput(serialized));
+ }
+
+ @Override
+ @Deprecated
+ @SuppressWarnings("rawtypes")
+ public <T> Map<GeneratedExtension, T> encode() {
+ return null;
+ }
+
+ @Override
+ @Deprecated
+ public Payload decode(final AppendEntries.ReplicatedLogEntry.Payload payload) {
+ return null;
+ }
+
+ @Override
+ public int size() {
+ return serialized.length;
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ out.writeByte((byte)serialVersionUID);
+ out.writeInt(serialized.length);
+ out.write(serialized);
+ }
+
+ @Override
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ final long version = in.readByte();
+ Preconditions.checkArgument(version == serialVersionUID, "Unsupported serialization version %s", version);
+
+ final int length = in.readInt();
+ serialized = new byte[length];
+ in.readFully(serialized);
+ }
+}
import akka.actor.ActorSelection;
import akka.actor.PoisonPill;
import akka.dispatch.OnComplete;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import javax.annotation.concurrent.GuardedBy;
import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
} else {
RegisterDataTreeChangeListenerReply reply = (RegisterDataTreeChangeListenerReply) result;
setListenerRegistrationActor(actorContext.actorSelection(
- reply.getListenerRegistrationPath().path()));
+ reply.getListenerRegistrationPath()));
}
}
}, actorContext.getClientDispatcher());
}
+
+ @VisibleForTesting
+ ActorSelection getListenerRegistrationActor() {
+ return listenerRegistrationActor;
+ }
+
+ @VisibleForTesting
+ ActorRef getDataChangeListenerActor() {
+ return dataChangeListenerActor;
+ }
}
import akka.actor.ActorSelection;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
+import java.util.Map.Entry;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterDataTreeChangeListener, ListenerRegistration<DOMDataTreeChangeListener>> {
+final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterDataTreeChangeListener, ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> {
private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerSupport.class);
private final ArrayList<DelayedDataTreeListenerRegistration> delayedRegistrations = new ArrayList<>();
private final Collection<ActorSelection> actors = new ArrayList<>();
LOG.debug("{}: registerTreeChangeListener for {}, leader: {}", persistenceId(), registerTreeChangeListener.getPath(), isLeader);
final ListenerRegistration<DOMDataTreeChangeListener> registration;
+ final DataTreeCandidate event;
if (!isLeader) {
LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
new DelayedDataTreeListenerRegistration(registerTreeChangeListener);
delayedRegistrations.add(delayedReg);
registration = delayedReg;
+ event = null;
} else {
- registration = createDelegate(registerTreeChangeListener);
+ final Entry<ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> res = createDelegate(registerTreeChangeListener);
+ registration = res.getKey();
+ event = res.getValue();
}
ActorRef listenerRegistration = createActor(DataTreeChangeListenerRegistrationActor.props(registration));
persistenceId(), listenerRegistration.path());
tellSender(new RegisterDataTreeChangeListenerReply(listenerRegistration));
+ if (event != null) {
+ registration.getInstance().onDataTreeChanged(Collections.singletonList(event));
+ }
}
@Override
- ListenerRegistration<DOMDataTreeChangeListener> createDelegate(final RegisterDataTreeChangeListener message) {
+ Entry<ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> createDelegate(final RegisterDataTreeChangeListener message) {
ActorSelection dataChangeListenerPath = selectActor(message.getDataTreeChangeListenerPath());
// Notify the listener if notifications should be enabled or not
private final DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
private String dataStoreType = UNKNOWN_DATA_STORE_TYPE;
private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
- private boolean writeOnlyTransactionOptimizationsEnabled = false;
+ private boolean writeOnlyTransactionOptimizationsEnabled = true;
public static Set<String> getGlobalDatastoreTypes() {
return globalDatastoreTypes;
package org.opendaylight.controller.cluster.datastore;
import com.google.common.base.Preconditions;
+import java.util.Collections;
+import java.util.Map.Entry;
import javax.annotation.concurrent.GuardedBy;
import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
/**
* Intermediate proxy registration returned to the user when we cannot
this.registerTreeChangeListener = Preconditions.checkNotNull(registerTreeChangeListener);
}
- synchronized void createDelegate(final DelegateFactory<RegisterDataTreeChangeListener, ListenerRegistration<DOMDataTreeChangeListener>> factory) {
+ synchronized void createDelegate(final DelegateFactory<RegisterDataTreeChangeListener, ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> factory) {
if (!closed) {
- this.delegate = factory.createDelegate(registerTreeChangeListener);
+ final Entry<ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> res = factory.createDelegate(registerTreeChangeListener);
+ this.delegate = res.getKey();
+ if (res.getValue() != null) {
+ delegate.getInstance().onDataTreeChanged(Collections.singletonList(res.getValue()));
+ }
}
}
*/
package org.opendaylight.controller.cluster.datastore;
+import java.util.Map.Entry;
+
/**
* Base class for factories instantiating delegates.
*
* <D> delegate type
* <M> message type
+ * <I> initial state type
*/
-abstract class DelegateFactory<M, D> {
- abstract D createDelegate(M message);
+abstract class DelegateFactory<M, D, I> {
+ abstract Entry<D, I> createDelegate(M message);
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Optional;
+import java.util.Collection;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+
+/**
+ * A deserialized {@link DataTreeCandidateNode} which represents a deletion.
+ */
+abstract class DeletedDataTreeCandidateNode extends AbstractDataTreeCandidateNode {
+ private DeletedDataTreeCandidateNode() {
+ super(ModificationType.DELETE);
+ }
+
+ static DataTreeCandidateNode create() {
+ return new DeletedDataTreeCandidateNode() {
+ @Override
+ public PathArgument getIdentifier() {
+ throw new UnsupportedOperationException("Root node does not have an identifier");
+ }
+ };
+ }
+
+ static DataTreeCandidateNode create(final PathArgument identifier) {
+ return new DeletedDataTreeCandidateNode() {
+ @Override
+ public final PathArgument getIdentifier() {
+ return identifier;
+ }
+ };
+ }
+
+ @Override
+ public final Optional<NormalizedNode<?, ?>> getDataAfter() {
+ return Optional.absent();
+ }
+
+ @Override
+ public final Collection<DataTreeCandidateNode> getChildNodes() {
+ // We would require the before-image to reconstruct the list of nodes which
+ // were deleted.
+ throw new UnsupportedOperationException("Children not available after serialization");
+ }
+}
*
* <D> delegate type
* <M> message type
+ * <I> initial state type
*/
-abstract class LeaderLocalDelegateFactory<M, D> extends DelegateFactory<M, D> {
+abstract class LeaderLocalDelegateFactory<M, D, I> extends DelegateFactory<M, D, I> {
private final Shard shard;
protected LeaderLocalDelegateFactory(final Shard shard) {
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+
+/**
+ * A deserialized {@link DataTreeCandidateNode} which represents a modification in
+ * one of its children.
+ */
+abstract class ModifiedDataTreeCandidateNode extends AbstractDataTreeCandidateNode {
+ private final Collection<DataTreeCandidateNode> children;
+
+ private ModifiedDataTreeCandidateNode(final Collection<DataTreeCandidateNode> children) {
+ super(ModificationType.SUBTREE_MODIFIED);
+ this.children = Preconditions.checkNotNull(children);
+ }
+
+ static DataTreeCandidateNode create(final Collection<DataTreeCandidateNode> children) {
+ return new ModifiedDataTreeCandidateNode(children) {
+ @Override
+ public PathArgument getIdentifier() {
+ throw new UnsupportedOperationException("Root node does not have an identifier");
+ }
+ };
+ }
+
+ static DataTreeCandidateNode create(final PathArgument identifier, final Collection<DataTreeCandidateNode> children) {
+ return new ModifiedDataTreeCandidateNode(children) {
+ @Override
+ public final PathArgument getIdentifier() {
+ return identifier;
+ }
+ };
+ }
+
+ @Override
+ public final Optional<NormalizedNode<?, ?>> getDataAfter() {
+ throw new UnsupportedOperationException("After-image not available after serialization");
+ }
+
+ @Override
+ public final Collection<DataTreeCandidateNode> getChildNodes() {
+ return children;
+ }
+}
*/
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.Collections;
import java.util.List;
* A {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort}
* instance given out for empty transactions.
*/
-final class NoOpDOMStoreThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort {
+final class NoOpDOMStoreThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort<Object> {
static final NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort();
- private static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS = Futures.immediateFuture(null);
- private static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS = Futures.immediateFuture(Boolean.TRUE);
-
private NoOpDOMStoreThreePhaseCommitCohort() {
// Hidden to prevent instantiation
}
}
@Override
- List<Future<ActorSelection>> getCohortFutures() {
+ List<Future<Object>> getCohortFutures() {
return Collections.emptyList();
}
}
LOG.debug("NoOpTransactionContext {} closeTransaction called", getIdentifier());
}
+ @Override
+ public boolean supportsDirectCommit() {
+ return true;
+ }
+
+ @Override
+ public Future<Object> directCommit() {
+ LOG.debug("Tx {} directCommit called, failure: {}", getIdentifier(), failure);
+ operationLimiter.release();
+ return akka.dispatch.Futures.failed(failure);
+ }
+
@Override
public Future<ActorSelection> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called", getIdentifier());
+ LOG.debug("Tx {} readyTransaction called, failure: {}", getIdentifier(), failure);
operationLimiter.release();
return akka.dispatch.Futures.failed(failure);
}
package org.opendaylight.controller.cluster.datastore;
-public interface OperationCallback {
+import java.util.concurrent.atomic.AtomicReference;
+
+interface OperationCallback {
+ OperationCallback NO_OP_CALLBACK = new OperationCallback() {
+ @Override
+ public void run() {
+ }
+
+ @Override
+ public void success() {
+ }
+
+ @Override
+ public void failure() {
+ }
+ };
+
+ class Reference extends AtomicReference<OperationCallback> {
+ private static final long serialVersionUID = 1L;
+
+ public Reference(OperationCallback initialValue) {
+ super(initialValue);
+ }
+ }
+
void run();
void success();
void failure();
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+
+final class ReadOnlyShardDataTreeTransaction extends AbstractShardDataTreeTransaction<DataTreeSnapshot> {
+ ReadOnlyShardDataTreeTransaction(final String id, final DataTreeSnapshot snapshot) {
+ super(id, snapshot);
+ }
+
+ @Override
+ void abort() {
+ close();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+
+final class ReadWriteShardDataTreeTransaction extends AbstractShardDataTreeTransaction<DataTreeModification> {
+ private final ShardDataTreeTransactionParent parent;
+
+ protected ReadWriteShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final String id, final DataTreeModification modification) {
+ super(id, modification);
+ this.parent = Preconditions.checkNotNull(parent);
+ }
+
+ @Override
+ void abort() {
+ Preconditions.checkState(close(), "Transaction is already closed");
+
+ parent.abortTransaction(this);
+ }
+
+ ShardDataTreeCohort ready() {
+ Preconditions.checkState(close(), "Transaction is already closed");
+
+ return parent.finishTransaction(this);
+ }
+}
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nonnull;
import org.opendaylight.controller.cluster.common.actor.CommonConfig;
import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
-import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort;
import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
/**
* A Shard represents a portion of the logical data tree <br/>
* <p>
- * Our Shard uses InMemoryDataStore as it's internal representation and delegates all requests it
+ * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
* </p>
*/
public class Shard extends RaftActor {
static final String DEFAULT_NAME = "default";
// The state of this Shard
- private final InMemoryDOMDataStore store;
+ private final ShardDataTree store;
/// The name of this shard
private final String name;
private final MessageTracker appendEntriesReplyTracker;
- private final ReadyTransactionReply READY_TRANSACTION_REPLY = new ReadyTransactionReply(
- Serialization.serializedActorPath(getSelf()));
-
- private final DOMTransactionFactory domTransactionFactory;
-
private final ShardTransactionActorFactory transactionActorFactory;
private final ShardSnapshotCohort snapshotCohort;
LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
- store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
- datastoreContext.getDataStoreProperties());
-
- if (schemaContext != null) {
- store.onGlobalContextUpdated(schemaContext);
- }
+ store = new ShardDataTree(schemaContext);
shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
datastoreContext.getDataStoreMXBeanType());
- shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
shardMBean.setShardActor(getSelf());
if (isMetricsCaptureEnabled()) {
getContext().become(new MeteringBehavior(this));
}
- domTransactionFactory = new DOMTransactionFactory(store, shardMBean, LOG, this.name);
-
- commitCoordinator = new ShardCommitCoordinator(domTransactionFactory,
+ commitCoordinator = new ShardCommitCoordinator(store,
TimeUnit.SECONDS.convert(5, TimeUnit.MINUTES),
datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
- transactionActorFactory = new ShardTransactionActorFactory(domTransactionFactory, datastoreContext,
+ transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
new Dispatchers(context().system().dispatchers()).getDispatcherPath(
Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
} else if (BatchedModifications.class.isInstance(message)) {
handleBatchedModifications((BatchedModifications)message);
} else if (message instanceof ForwardedReadyTransaction) {
- handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
+ commitCoordinator.handleForwardedReadyTransaction((ForwardedReadyTransaction) message,
+ getSender(), this);
} else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
} else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
}
}
- private void handleCommitTransaction(final CommitTransaction commit) {
- final String transactionID = commit.getTransactionID();
+ private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
+ return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
+ }
- LOG.debug("{}: Committing transaction {}", persistenceId(), transactionID);
+ void continueCommit(final CohortEntry cohortEntry) throws Exception {
+ final DataTreeCandidate candidate = cohortEntry.getCohort().getCandidate();
- // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
- // this transaction.
- final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
- if(cohortEntry == null) {
- // We're not the current Tx - the Tx was likely expired b/c it took too long in
- // between the canCommit and commit messages.
- IllegalStateException ex = new IllegalStateException(
- String.format("%s: Cannot commit transaction %s - it is not the current transaction",
- persistenceId(), transactionID));
- LOG.error(ex.getMessage());
+ // If we do not have any followers and we are not using persistence
+ // or if cohortEntry has no modifications
+ // we can apply modification to the state immediately
+ if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
+ applyModificationToState(getSender(), cohortEntry.getTransactionID(), candidate);
+ } else {
+ Shard.this.persistData(getSender(), cohortEntry.getTransactionID(),
+ DataTreeCandidatePayload.create(candidate));
+ }
+ }
+
+ private void handleCommitTransaction(final CommitTransaction commit) {
+ if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
shardMBean.incrementFailedTransactionsCount();
- getSender().tell(new akka.actor.Status.Failure(ex), getSelf());
- return;
}
+ }
- // We perform the preCommit phase here atomically with the commit phase. This is an
- // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
- // coordination of preCommit across shards in case of failure but preCommit should not
- // normally fail since we ensure only one concurrent 3-phase commit.
+ private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
+ LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
try {
// We block on the future here so we don't have to worry about possibly accessing our
// state on a different thread outside of our dispatcher. Also, the data store
// currently uses a same thread executor anyway.
- cohortEntry.getCohort().preCommit().get();
+ cohortEntry.getCohort().commit().get();
+
+ sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
+
+ shardMBean.incrementCommittedTransactionCount();
+ shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
- // If we do not have any followers and we are not using persistence
- // or if cohortEntry has no modifications
- // we can apply modification to the state immediately
- if((!hasFollowers() && !persistence().isRecoveryApplicable()) || (!cohortEntry.hasModifications())){
- applyModificationToState(getSender(), transactionID, cohortEntry.getModification());
- } else {
- Shard.this.persistData(getSender(), transactionID,
- new ModificationPayload(cohortEntry.getModification()));
- }
} catch (Exception e) {
- LOG.error("{} An exception occurred while preCommitting transaction {}",
- persistenceId(), cohortEntry.getTransactionID(), e);
+ sender.tell(new akka.actor.Status.Failure(e), getSelf());
+
+ LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
+ transactionID, e);
shardMBean.incrementFailedTransactionsCount();
- getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ } finally {
+ commitCoordinator.currentTransactionComplete(transactionID, true);
}
-
- cohortEntry.updateLastAccessTime();
}
private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
// after the commit has been replicated to a majority of the followers.
CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
- if(cohortEntry == null) {
+ if (cohortEntry == null) {
// The transaction is no longer the current commit. This can happen if the transaction
// was aborted prior, most likely due to timeout in the front-end. We need to finish
// committing the transaction though since it was successfully persisted and replicated
// transaction.
cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
if(cohortEntry != null) {
- commitWithNewTransaction(cohortEntry.getModification());
+ try {
+ store.applyForeignCandidate(transactionID, cohortEntry.getCohort().getCandidate());
+ } catch (DataValidationFailedException e) {
+ shardMBean.incrementFailedTransactionsCount();
+ LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
+ }
+
sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
} else {
// This really shouldn't happen - it likely means that persistence or replication
LOG.error(ex.getMessage());
sender.tell(new akka.actor.Status.Failure(ex), getSelf());
}
-
- return;
- }
-
- LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
-
- try {
- // We block on the future here so we don't have to worry about possibly accessing our
- // state on a different thread outside of our dispatcher. Also, the data store
- // currently uses a same thread executor anyway.
- cohortEntry.getCohort().commit().get();
-
- sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
-
- shardMBean.incrementCommittedTransactionCount();
- shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
-
- } catch (Exception e) {
- sender.tell(new akka.actor.Status.Failure(e), getSelf());
-
- LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
- transactionID, e);
- shardMBean.incrementFailedTransactionsCount();
- } finally {
- commitCoordinator.currentTransactionComplete(transactionID, true);
+ } else {
+ finishCommit(sender, transactionID, cohortEntry);
}
}
private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
- commitCoordinator.handleCanCommit(canCommit, getSender(), self());
+ commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
}
private void handleBatchedModifications(BatchedModifications batched) {
//
if(isLeader()) {
try {
- boolean ready = commitCoordinator.handleTransactionModifications(batched);
- if(ready) {
- sender().tell(READY_TRANSACTION_REPLY, self());
- } else {
- sender().tell(new BatchedModificationsReply(batched.getModifications().size()), self());
- }
+ commitCoordinator.handleBatchedModifications(batched, getSender(), this);
} catch (Exception e) {
LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
batched.getTransactionID(), e);
}
}
- private void handleForwardedReadyTransaction(ForwardedReadyTransaction ready) {
- LOG.debug("{}: Readying transaction {}, client version {}", persistenceId(),
- ready.getTransactionID(), ready.getTxnClientVersion());
-
- // This message is forwarded by the ShardTransaction on ready. We cache the cohort in the
- // commitCoordinator in preparation for the subsequent three phase commit initiated by
- // the front-end.
- commitCoordinator.transactionReady(ready.getTransactionID(), ready.getCohort(),
- (MutableCompositeModification) ready.getModification());
-
- // Return our actor path as we'll handle the three phase commit, except if the Tx client
- // version < 1 (Helium-1 version). This means the Tx was initiated by a base Helium version
- // node. In that case, the subsequent 3-phase commit messages won't contain the
- // transactionId so to maintain backwards compatibility, we create a separate cohort actor
- // to provide the compatible behavior.
- if(ready.getTxnClientVersion() < DataStoreVersions.LITHIUM_VERSION) {
- ActorRef replyActorPath = getSelf();
- if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
- LOG.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", persistenceId());
- replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
- ready.getTransactionID()));
- }
-
- ReadyTransactionReply readyTransactionReply =
- new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath),
- ready.getTxnClientVersion());
- getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
- readyTransactionReply, getSelf());
- } else {
- getSender().tell(READY_TRANSACTION_REPLY, getSelf());
- }
- }
-
private void handleAbortTransaction(final AbortTransaction abort) {
doAbortTransaction(abort.getTransactionID(), getSender());
}
}
private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
- domTransactionFactory.closeTransactionChain(closeTransactionChain.getTransactionChainId());
+ store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
}
private ActorRef createTypedTransactionActor(int transactionType,
}
private void commitWithNewTransaction(final Modification modification) {
- DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
- modification.apply(tx);
+ ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
+ modification.apply(tx.getSnapshot());
try {
snapshotCohort.syncCommitTransaction(tx);
shardMBean.incrementCommittedTransactionCount();
shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
- } catch (InterruptedException | ExecutionException e) {
+ } catch (Exception e) {
shardMBean.incrementFailedTransactionsCount();
LOG.error("{}: Failed to commit", persistenceId(), e);
}
@VisibleForTesting
void updateSchemaContext(final SchemaContext schemaContext) {
- store.onGlobalContextUpdated(schemaContext);
+ store.updateSchemaContext(schemaContext);
}
private boolean isMetricsCaptureEnabled() {
@Override
protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
-
- if(data instanceof ModificationPayload) {
+ if (data instanceof DataTreeCandidatePayload) {
+ if (clientActor == null) {
+ // No clientActor indicates a replica coming from the leader
+ try {
+ store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
+ } catch (DataValidationFailedException | IOException e) {
+ LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
+ }
+ } else {
+ // Replication consensus reached, proceed to commit
+ finishCommit(clientActor, identifier);
+ }
+ } else if (data instanceof ModificationPayload) {
try {
applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
} catch (ClassNotFoundException | IOException e) {
LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
}
- }
- else if (data instanceof CompositeModificationPayload) {
+ } else if (data instanceof CompositeModificationPayload) {
Object modification = ((CompositeModificationPayload) data).getModification();
applyModificationToState(clientActor, identifier, modification);
persistenceId(), getId());
}
- domTransactionFactory.closeAllTransactionChains();
+ store.closeAllTransactionChains();
}
}
}
@VisibleForTesting
- public InMemoryDOMDataStore getDataStore() {
+ public ShardDataTree getDataStore() {
return store;
}
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
/**
// Interface hook for unit tests to replace or decorate the DOMStoreThreePhaseCommitCohorts.
public interface CohortDecorator {
- DOMStoreThreePhaseCommitCohort decorate(String transactionID, DOMStoreThreePhaseCommitCohort actual);
+ ShardDataTreeCohort decorate(String transactionID, ShardDataTreeCohort actual);
}
private final Cache<String, CohortEntry> cohortCache;
private CohortEntry currentCohortEntry;
- private final DOMTransactionFactory transactionFactory;
+ private final ShardDataTree dataTree;
private final Queue<CohortEntry> queuedCohortEntries;
private final String name;
- private final String shardActorPath;
-
private final RemovalListener<String, CohortEntry> cacheRemovalListener =
new RemovalListener<String, CohortEntry>() {
@Override
// This is a hook for unit tests to replace or decorate the DOMStoreThreePhaseCommitCohorts.
private CohortDecorator cohortDecorator;
- public ShardCommitCoordinator(DOMTransactionFactory transactionFactory,
+ private ReadyTransactionReply readyTransactionReply;
+
+ public ShardCommitCoordinator(ShardDataTree dataTree,
long cacheExpiryTimeoutInSec, int queueCapacity, ActorRef shardActor, Logger log, String name) {
this.queueCapacity = queueCapacity;
this.log = log;
this.name = name;
- this.transactionFactory = transactionFactory;
-
- shardActorPath = Serialization.serializedActorPath(shardActor);
+ this.dataTree = Preconditions.checkNotNull(dataTree);
cohortCache = CacheBuilder.newBuilder().expireAfterAccess(cacheExpiryTimeoutInSec, TimeUnit.SECONDS).
removalListener(cacheRemovalListener).build();
this.queueCapacity = queueCapacity;
}
+ private ReadyTransactionReply readyTransactionReply(Shard shard) {
+ if(readyTransactionReply == null) {
+ readyTransactionReply = new ReadyTransactionReply(Serialization.serializedActorPath(shard.self()));
+ }
+
+ return readyTransactionReply;
+ }
+
/**
* This method is called to ready a transaction that was prepared by ShardTransaction actor. It caches
* the prepared cohort entry for the given transactions ID in preparation for the subsequent 3-phase commit.
- *
- * @param transactionID the ID of the transaction
- * @param cohort the cohort to participate in the transaction commit
- * @param modification the modifications made by the transaction
*/
- public void transactionReady(String transactionID, DOMStoreThreePhaseCommitCohort cohort,
- MutableCompositeModification modification) {
+ public void handleForwardedReadyTransaction(ForwardedReadyTransaction ready, ActorRef sender, Shard shard) {
+ log.debug("{}: Readying transaction {}, client version {}", name,
+ ready.getTransactionID(), ready.getTxnClientVersion());
+
+ CohortEntry cohortEntry = new CohortEntry(ready.getTransactionID(), ready.getCohort(),
+ (MutableCompositeModification) ready.getModification());
+ cohortCache.put(ready.getTransactionID(), cohortEntry);
+
+ if(ready.getTxnClientVersion() < DataStoreVersions.LITHIUM_VERSION) {
+ // Return our actor path as we'll handle the three phase commit except if the Tx client
+ // version < Helium-1 version which means the Tx was initiated by a base Helium version node.
+ // In that case, the subsequent 3-phase commit messages won't contain the transactionId so to
+ // maintain backwards compatibility, we create a separate cohort actor to provide the compatible behavior.
+ ActorRef replyActorPath = shard.self();
+ if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
+ log.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", name);
+ replyActorPath = shard.getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
+ ready.getTransactionID()));
+ }
- cohortCache.put(transactionID, new CohortEntry(transactionID, cohort, modification));
+ ReadyTransactionReply readyTransactionReply =
+ new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath),
+ ready.getTxnClientVersion());
+ sender.tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
+ readyTransactionReply, shard.self());
+ } else {
+ if(ready.isDoImmediateCommit()) {
+ cohortEntry.setDoImmediateCommit(true);
+ cohortEntry.setReplySender(sender);
+ cohortEntry.setShard(shard);
+ handleCanCommit(cohortEntry);
+ } else {
+ // The caller does not want immediate commit - the 3-phase commit will be coordinated by the
+ // front-end so send back a ReadyTransactionReply with our actor path.
+ sender.tell(readyTransactionReply(shard), shard.self());
+ }
+ }
}
/**
*
* @throws ExecutionException if an error occurs loading the cache
*/
- public boolean handleTransactionModifications(BatchedModifications batched)
+ boolean handleBatchedModifications(BatchedModifications batched, ActorRef sender, Shard shard)
throws ExecutionException {
CohortEntry cohortEntry = cohortCache.getIfPresent(batched.getTransactionID());
if(cohortEntry == null) {
cohortEntry = new CohortEntry(batched.getTransactionID(),
- transactionFactory.<DOMStoreWriteTransaction>newTransaction(
- TransactionProxy.TransactionType.WRITE_ONLY, batched.getTransactionID(),
+ dataTree.newReadWriteTransaction(batched.getTransactionID(),
batched.getTransactionChainID()));
cohortCache.put(batched.getTransactionID(), cohortEntry);
}
batched.getTransactionID(), batched.getVersion());
}
- cohortEntry.ready(cohortDecorator);
+ cohortEntry.ready(cohortDecorator, batched.isDoCommitOnReady());
+
+ if(batched.isDoCommitOnReady()) {
+ cohortEntry.setReplySender(sender);
+ cohortEntry.setShard(shard);
+ handleCanCommit(cohortEntry);
+ } else {
+ sender.tell(readyTransactionReply(shard), shard.self());
+ }
+ } else {
+ sender.tell(new BatchedModificationsReply(batched.getModifications().size()), shard.self());
}
return batched.isReady();
}
- /**
- * This method handles the canCommit phase for a transaction.
- *
- * @param canCommit the CanCommitTransaction message
- * @param sender the actor that sent the message
- * @param shard the transaction's shard actor
- */
- public void handleCanCommit(CanCommitTransaction canCommit, final ActorRef sender,
- final ActorRef shard) {
- String transactionID = canCommit.getTransactionID();
+ private void handleCanCommit(CohortEntry cohortEntry) {
+ String transactionID = cohortEntry.getTransactionID();
+
if(log.isDebugEnabled()) {
log.debug("{}: Processing canCommit for transaction {} for shard {}",
- name, transactionID, shard.path());
- }
-
- // Lookup the cohort entry that was cached previously (or should have been) by
- // transactionReady (via the ForwardedReadyTransaction message).
- final CohortEntry cohortEntry = cohortCache.getIfPresent(transactionID);
- if(cohortEntry == null) {
- // Either canCommit was invoked before ready(shouldn't happen) or a long time passed
- // between canCommit and ready and the entry was expired from the cache.
- IllegalStateException ex = new IllegalStateException(
- String.format("%s: No cohort entry found for transaction %s", name, transactionID));
- log.error(ex.getMessage());
- sender.tell(new Status.Failure(ex), shard);
- return;
+ name, transactionID, cohortEntry.getShard().self().path());
}
- cohortEntry.setCanCommitSender(sender);
- cohortEntry.setShard(shard);
-
if(currentCohortEntry != null) {
// There's already a Tx commit in progress - attempt to queue this entry to be
// committed after the current Tx completes.
" capacity %d has been reached.",
name, transactionID, queueCapacity));
log.error(ex.getMessage());
- sender.tell(new Status.Failure(ex), shard);
+ cohortEntry.getReplySender().tell(new Status.Failure(ex), cohortEntry.getShard().self());
}
} else {
// No Tx commit currently in progress - make this the current entry and proceed with
}
}
+ /**
+ * This method handles the canCommit phase for a transaction.
+ *
+ * @param canCommit the CanCommitTransaction message
+ * @param sender the actor that sent the message
+ * @param shard the transaction's shard actor
+ */
+ public void handleCanCommit(String transactionID, final ActorRef sender, final Shard shard) {
+ // Lookup the cohort entry that was cached previously (or should have been) by
+ // transactionReady (via the ForwardedReadyTransaction message).
+ final CohortEntry cohortEntry = cohortCache.getIfPresent(transactionID);
+ if(cohortEntry == null) {
+ // Either canCommit was invoked before ready(shouldn't happen) or a long time passed
+ // between canCommit and ready and the entry was expired from the cache.
+ IllegalStateException ex = new IllegalStateException(
+ String.format("%s: No cohort entry found for transaction %s", name, transactionID));
+ log.error(ex.getMessage());
+ sender.tell(new Status.Failure(ex), shard.self());
+ return;
+ }
+
+ cohortEntry.setReplySender(sender);
+ cohortEntry.setShard(shard);
+
+ handleCanCommit(cohortEntry);
+ }
+
private void doCanCommit(final CohortEntry cohortEntry) {
+ boolean canCommit = false;
try {
// We block on the future here so we don't have to worry about possibly accessing our
// state on a different thread outside of our dispatcher. Also, the data store
// currently uses a same thread executor anyway.
- Boolean canCommit = cohortEntry.getCohort().canCommit().get();
+ canCommit = cohortEntry.getCohort().canCommit().get();
+
+ if(cohortEntry.isDoImmediateCommit()) {
+ if(canCommit) {
+ doCommit(cohortEntry);
+ } else {
+ cohortEntry.getReplySender().tell(new Status.Failure(new TransactionCommitFailedException(
+ "Can Commit failed, no detailed cause available.")), cohortEntry.getShard().self());
+ }
+ } else {
+ cohortEntry.getReplySender().tell(
+ canCommit ? CanCommitTransactionReply.YES.toSerializable() :
+ CanCommitTransactionReply.NO.toSerializable(), cohortEntry.getShard().self());
+ }
+ } catch (Exception e) {
+ log.debug("{}: An exception occurred during canCommit: {}", name, e);
- cohortEntry.getCanCommitSender().tell(
- canCommit ? CanCommitTransactionReply.YES.toSerializable() :
- CanCommitTransactionReply.NO.toSerializable(), cohortEntry.getShard());
+ Throwable failure = e;
+ if(e instanceof ExecutionException) {
+ failure = e.getCause();
+ }
+ cohortEntry.getReplySender().tell(new Status.Failure(failure), cohortEntry.getShard().self());
+ } finally {
if(!canCommit) {
- // Remove the entry from the cache now since the Tx will be aborted.
- removeCohortEntry(cohortEntry.getTransactionID());
+ // Remove the entry from the cache now.
+ currentTransactionComplete(cohortEntry.getTransactionID(), true);
}
- } catch (InterruptedException | ExecutionException e) {
- log.debug("{}: An exception occurred during canCommit: {}", name, e);
+ }
+ }
+
+ private boolean doCommit(CohortEntry cohortEntry) {
+ log.debug("{}: Committing transaction {}", name, cohortEntry.getTransactionID());
+
+ boolean success = false;
+
+ // We perform the preCommit phase here atomically with the commit phase. This is an
+ // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
+ // coordination of preCommit across shards in case of failure but preCommit should not
+ // normally fail since we ensure only one concurrent 3-phase commit.
+
+ try {
+ // We block on the future here so we don't have to worry about possibly accessing our
+ // state on a different thread outside of our dispatcher. Also, the data store
+ // currently uses a same thread executor anyway.
+ cohortEntry.getCohort().preCommit().get();
+
+ cohortEntry.getShard().continueCommit(cohortEntry);
+
+ cohortEntry.updateLastAccessTime();
- // Remove the entry from the cache now since the Tx will be aborted.
- removeCohortEntry(cohortEntry.getTransactionID());
- cohortEntry.getCanCommitSender().tell(new Status.Failure(e), cohortEntry.getShard());
+ success = true;
+ } catch (Exception e) {
+ log.error("{} An exception occurred while preCommitting transaction {}",
+ name, cohortEntry.getTransactionID(), e);
+ cohortEntry.getReplySender().tell(new akka.actor.Status.Failure(e), cohortEntry.getShard().self());
+
+ currentTransactionComplete(cohortEntry.getTransactionID(), true);
}
+
+ return success;
+ }
+
+ boolean handleCommit(final String transactionID, final ActorRef sender, final Shard shard) {
+ // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
+ // this transaction.
+ final CohortEntry cohortEntry = getCohortEntryIfCurrent(transactionID);
+ if(cohortEntry == null) {
+ // We're not the current Tx - the Tx was likely expired b/c it took too long in
+ // between the canCommit and commit messages.
+ IllegalStateException ex = new IllegalStateException(
+ String.format("%s: Cannot commit transaction %s - it is not the current transaction",
+ name, transactionID));
+ log.error(ex.getMessage());
+ sender.tell(new akka.actor.Status.Failure(ex), shard.self());
+ return false;
+ }
+
+ return doCommit(cohortEntry);
}
/**
static class CohortEntry {
private final String transactionID;
- private DOMStoreThreePhaseCommitCohort cohort;
- private final MutableCompositeModification compositeModification;
- private final DOMStoreWriteTransaction transaction;
- private ActorRef canCommitSender;
- private ActorRef shard;
+ private ShardDataTreeCohort cohort;
+ private final ReadWriteShardDataTreeTransaction transaction;
+ private ActorRef replySender;
+ private Shard shard;
private long lastAccessTime;
+ private boolean doImmediateCommit;
- CohortEntry(String transactionID, DOMStoreWriteTransaction transaction) {
- this.compositeModification = new MutableCompositeModification();
- this.transaction = transaction;
+ CohortEntry(String transactionID, ReadWriteShardDataTreeTransaction transaction) {
+ this.transaction = Preconditions.checkNotNull(transaction);
this.transactionID = transactionID;
}
- CohortEntry(String transactionID, DOMStoreThreePhaseCommitCohort cohort,
+ CohortEntry(String transactionID, ShardDataTreeCohort cohort,
MutableCompositeModification compositeModification) {
this.transactionID = transactionID;
this.cohort = cohort;
- this.compositeModification = compositeModification;
this.transaction = null;
}
return transactionID;
}
- DOMStoreThreePhaseCommitCohort getCohort() {
+ ShardDataTreeCohort getCohort() {
return cohort;
}
- MutableCompositeModification getModification() {
- return compositeModification;
- }
-
void applyModifications(Iterable<Modification> modifications) {
- for(Modification modification: modifications) {
- compositeModification.addModification(modification);
- modification.apply(transaction);
+ for (Modification modification : modifications) {
+ modification.apply(transaction.getSnapshot());
}
}
- void ready(CohortDecorator cohortDecorator) {
+ void ready(CohortDecorator cohortDecorator, boolean doImmediateCommit) {
Preconditions.checkState(cohort == null, "cohort was already set");
+ setDoImmediateCommit(doImmediateCommit);
+
cohort = transaction.ready();
if(cohortDecorator != null) {
}
}
- ActorRef getCanCommitSender() {
- return canCommitSender;
+ boolean isDoImmediateCommit() {
+ return doImmediateCommit;
}
- void setCanCommitSender(ActorRef canCommitSender) {
- this.canCommitSender = canCommitSender;
+ void setDoImmediateCommit(boolean doImmediateCommit) {
+ this.doImmediateCommit = doImmediateCommit;
}
- ActorRef getShard() {
- return shard;
+ ActorRef getReplySender() {
+ return replySender;
}
- void setShard(ActorRef shard) {
- this.shard = shard;
+ void setReplySender(ActorRef replySender) {
+ this.replySender = replySender;
+ }
+
+ Shard getShard() {
+ return shard;
}
- boolean hasModifications(){
- return compositeModification.getModifications().size() > 0;
+ void setShard(Shard shard) {
+ this.shard = shard;
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Optional;
+import com.google.common.base.Strings;
+import java.util.AbstractMap.SimpleEntry;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent;
+import org.opendaylight.controller.md.sal.dom.store.impl.ResolveDataChangeEventsTask;
+import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.TipProducingDataTree;
+import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Internal shard state, similar to a DOMStore, but optimized for use in the actor system,
+ * e.g. it does not expose public interfaces and assumes it is only ever called from a
+ * single thread.
+ *
+ * This class is not part of the API contract and is subject to change at any time.
+ */
+@NotThreadSafe
+@VisibleForTesting
+public final class ShardDataTree extends ShardDataTreeTransactionParent {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardDataTree.class);
+ private static final ShardDataTreeNotificationManager MANAGER = new ShardDataTreeNotificationManager();
+ private final Map<String, ShardDataTreeTransactionChain> transactionChains = new HashMap<>();
+ private final ShardDataTreeChangePublisher treeChangePublisher = new ShardDataTreeChangePublisher();
+ private final ListenerTree listenerTree = ListenerTree.create();
+ private final TipProducingDataTree dataTree;
+
+ ShardDataTree(final SchemaContext schemaContext) {
+ dataTree = InMemoryDataTreeFactory.getInstance().create();
+ if (schemaContext != null) {
+ dataTree.setSchemaContext(schemaContext);
+ }
+ }
+
+ TipProducingDataTree getDataTree() {
+ return dataTree;
+ }
+
+ void updateSchemaContext(final SchemaContext schemaContext) {
+ dataTree.setSchemaContext(schemaContext);
+ }
+
+ private ShardDataTreeTransactionChain ensureTransactionChain(final String chainId) {
+ ShardDataTreeTransactionChain chain = transactionChains.get(chainId);
+ if (chain == null) {
+ chain = new ShardDataTreeTransactionChain(chainId, this);
+ transactionChains.put(chainId, chain);
+ }
+
+ return chain;
+ }
+
+ ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final String txId, final String chainId) {
+ if (Strings.isNullOrEmpty(chainId)) {
+ return new ReadOnlyShardDataTreeTransaction(txId, dataTree.takeSnapshot());
+ }
+
+ return ensureTransactionChain(chainId).newReadOnlyTransaction(txId);
+ }
+
+ ReadWriteShardDataTreeTransaction newReadWriteTransaction(final String txId, final String chainId) {
+ if (Strings.isNullOrEmpty(chainId)) {
+ return new ReadWriteShardDataTreeTransaction(this, txId, dataTree.takeSnapshot().newModification());
+ }
+
+ return ensureTransactionChain(chainId).newReadWriteTransaction(txId);
+ }
+
+ void notifyListeners(final DataTreeCandidate candidate) {
+ LOG.debug("Notifying listeners on candidate {}", candidate);
+
+ // DataTreeChanges first, as they are more light-weight
+ treeChangePublisher.publishChanges(candidate);
+
+ // DataChanges second, as they are heavier
+ ResolveDataChangeEventsTask.create(candidate, listenerTree).resolve(MANAGER);
+ }
+
+ void closeAllTransactionChains() {
+ for (ShardDataTreeTransactionChain chain : transactionChains.values()) {
+ chain.close();
+ }
+
+ transactionChains.clear();
+ }
+
+ void closeTransactionChain(final String transactionChainId) {
+ final ShardDataTreeTransactionChain chain = transactionChains.remove(transactionChainId);
+ if (chain != null) {
+ chain.close();
+ } else {
+ LOG.debug("Closing non-existent transaction chain {}", transactionChainId);
+ }
+ }
+
+ Entry<ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>, DOMImmutableDataChangeEvent> registerChangeListener(
+ final YangInstanceIdentifier path,
+ final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener, final DataChangeScope scope) {
+ final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> reg =
+ listenerTree.registerDataChangeListener(path, listener, scope);
+
+ final Optional<NormalizedNode<?, ?>> currentState = dataTree.takeSnapshot().readNode(path);
+ final DOMImmutableDataChangeEvent event;
+ if (currentState.isPresent()) {
+ final NormalizedNode<?, ?> data = currentState.get();
+ event = DOMImmutableDataChangeEvent.builder(DataChangeScope.BASE).setAfter(data).addCreated(path, data).build();
+ } else {
+ event = null;
+ }
+
+ return new SimpleEntry<>(reg, event);
+ }
+
+ Entry<ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> registerTreeChangeListener(final YangInstanceIdentifier path,
+ final DOMDataTreeChangeListener listener) {
+ final ListenerRegistration<DOMDataTreeChangeListener> reg = treeChangePublisher.registerTreeChangeListener(path, listener);
+
+ final Optional<NormalizedNode<?, ?>> currentState = dataTree.takeSnapshot().readNode(path);
+ final DataTreeCandidate event;
+ if (currentState.isPresent()) {
+ event = DataTreeCandidates.fromNormalizedNode(path, currentState.get());
+ } else {
+ event = null;
+ }
+ return new SimpleEntry<>(reg, event);
+ }
+
+ void applyForeignCandidate(final String identifier, final DataTreeCandidate foreign) throws DataValidationFailedException {
+ LOG.debug("Applying foreign transaction {}", identifier);
+
+ final DataTreeModification mod = dataTree.takeSnapshot().newModification();
+ DataTreeCandidates.applyToModification(mod, foreign);
+ mod.ready();
+
+ LOG.trace("Applying foreign modification {}", mod);
+ dataTree.validate(mod);
+ final DataTreeCandidate candidate = dataTree.prepare(mod);
+ dataTree.commit(candidate);
+ notifyListeners(candidate);
+ }
+
+ @Override
+ void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction) {
+ // Intentional no-op
+ }
+
+ @Override
+ ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction) {
+ final DataTreeModification snapshot = transaction.getSnapshot();
+ snapshot.ready();
+ return new SimpleShardDataTreeCohort(this, snapshot);
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.util.Collection;
+import java.util.Collections;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.controller.md.sal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTreeChangePublisher;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.spi.DefaultDataTreeCandidate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@NotThreadSafe
+final class ShardDataTreeChangePublisher extends AbstractDOMStoreTreeChangePublisher {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardDataTreeChangePublisher.class);
+
+ void publishChanges(final DataTreeCandidate candidate) {
+ processCandidateTree(candidate);
+ }
+
+ @Override
+ protected void notifyListeners(final Collection<AbstractDOMDataTreeChangeListenerRegistration<?>> registrations,
+ final YangInstanceIdentifier path, final DataTreeCandidateNode node) {
+ final Collection<DataTreeCandidate> changes = Collections.<DataTreeCandidate>singleton(new DefaultDataTreeCandidate(path, node));
+
+ for (AbstractDOMDataTreeChangeListenerRegistration<?> reg : registrations) {
+ reg.getInstance().onDataTreeChanged(changes);
+ }
+ }
+
+ @Override
+ protected void registrationRemoved(final AbstractDOMDataTreeChangeListenerRegistration<?> registration) {
+ LOG.debug("Registration {} removed", registration);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
+
+public abstract class ShardDataTreeCohort {
+ ShardDataTreeCohort() {
+ // Prevent foreign instantiation
+ }
+
+ abstract DataTreeCandidateTip getCandidate();
+
+ @VisibleForTesting
+ public abstract ListenableFuture<Boolean> canCommit();
+ @VisibleForTesting
+ public abstract ListenableFuture<Void> preCommit();
+ @VisibleForTesting
+ public abstract ListenableFuture<Void> abort();
+ @VisibleForTesting
+ public abstract ListenableFuture<Void> commit();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent;
+import org.opendaylight.controller.md.sal.dom.store.impl.DataChangeListenerRegistration;
+import org.opendaylight.yangtools.util.concurrent.NotificationManager;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class ShardDataTreeNotificationManager implements NotificationManager<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardDataTreeNotificationManager.class);
+
+ @Override
+ public void submitNotification(final DataChangeListenerRegistration<?> listener, final DOMImmutableDataChangeEvent notification) {
+ LOG.debug("Notifying listener {} about {}", listener.getInstance(), notification);
+
+ listener.getInstance().onDataChanged(notification);
+ }
+
+ @Override
+ public void submitNotifications(final DataChangeListenerRegistration<?> listener, final Iterable<DOMImmutableDataChangeEvent> notifications) {
+ final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> instance = listener.getInstance();
+ LOG.debug("Notifying listener {} about {}", instance, notifications);
+
+ for (DOMImmutableDataChangeEvent n : notifications) {
+ instance.onDataChanged(n);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Preconditions;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A transaction chain attached to a Shard.
+ */
+@NotThreadSafe
+final class ShardDataTreeTransactionChain extends ShardDataTreeTransactionParent {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardDataTreeTransactionChain.class);
+ private final ShardDataTree dataTree;
+ private final String chainId;
+
+ private ReadWriteShardDataTreeTransaction previousTx;
+ private ReadWriteShardDataTreeTransaction openTransaction;
+ private boolean closed;
+
+ ShardDataTreeTransactionChain(final String chainId, final ShardDataTree dataTree) {
+ this.dataTree = Preconditions.checkNotNull(dataTree);
+ this.chainId = Preconditions.checkNotNull(chainId);
+ }
+
+ private DataTreeSnapshot getSnapshot() {
+ Preconditions.checkState(!closed, "TransactionChain %s has been closed", this);
+ Preconditions.checkState(openTransaction == null, "Transaction %s is open", openTransaction);
+
+ if (previousTx == null) {
+ return dataTree.getDataTree().takeSnapshot();
+ } else {
+ return previousTx.getSnapshot();
+ }
+ }
+
+ ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final String txId) {
+ final DataTreeSnapshot snapshot = getSnapshot();
+ LOG.debug("Allocated read-only transaction {} snapshot {}", txId, snapshot);
+
+ return new ReadOnlyShardDataTreeTransaction(txId, snapshot);
+ }
+
+ ReadWriteShardDataTreeTransaction newReadWriteTransaction(final String txId) {
+ final DataTreeSnapshot snapshot = getSnapshot();
+ LOG.debug("Allocated read-write transaction {} snapshot {}", txId, snapshot);
+
+ openTransaction = new ReadWriteShardDataTreeTransaction(this, txId, snapshot.newModification());
+ return openTransaction;
+ }
+
+ void close() {
+ closed = true;
+ }
+
+ @Override
+ protected void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction) {
+ if (transaction instanceof ReadWriteShardDataTreeTransaction) {
+ Preconditions.checkState(openTransaction != null, "Attempted to abort transaction %s while none is outstanding", transaction);
+ LOG.debug("Aborted transaction {}", transaction);
+ openTransaction = null;
+ }
+ }
+
+ @Override
+ protected ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction) {
+ Preconditions.checkState(openTransaction != null, "Attempted to finish transaction %s while none is outstanding", transaction);
+
+ // dataTree is finalizing ready the transaction, we just record it for the next
+ // transaction in chain
+ final ShardDataTreeCohort delegate = dataTree.finishTransaction(transaction);
+ openTransaction = null;
+ previousTx = transaction;
+ LOG.debug("Committing transaction {}", transaction);
+
+ return new ChainedCommitCohort(this, transaction, delegate);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("id", chainId).toString();
+ }
+
+ void clearTransaction(ReadWriteShardDataTreeTransaction transaction) {
+ if (transaction.equals(previousTx)) {
+ previousTx = null;
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+abstract class ShardDataTreeTransactionParent {
+ abstract void abortTransaction(AbstractShardDataTreeTransaction<?> transaction);
+ abstract ShardDataTreeCohort finishTransaction(ReadWriteShardDataTreeTransaction transaction);
+}
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import com.google.common.base.Optional;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
public class ShardReadTransaction extends ShardTransaction {
private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build();
- private final DOMStoreReadTransaction transaction;
+ private final AbstractShardDataTreeTransaction<?> transaction;
- public ShardReadTransaction(DOMStoreReadTransaction transaction, ActorRef shardActor,
+ public ShardReadTransaction(AbstractShardDataTreeTransaction<?> transaction, ActorRef shardActor,
ShardStats shardStats, String transactionID, short clientTxVersion) {
super(shardActor, shardStats, transactionID, clientTxVersion);
this.transaction = transaction;
final ActorRef sender = getSender();
final ActorRef self = getSelf();
- final ListenableFuture<Optional<NormalizedNode<?, ?>>> future = transaction.read(DATASTORE_ROOT);
+ final Optional<NormalizedNode<?, ?>> result = transaction.getSnapshot().readNode(DATASTORE_ROOT);
- Futures.addCallback(future, new FutureCallback<Optional<NormalizedNode<?, ?>>>() {
- @Override
- public void onSuccess(Optional<NormalizedNode<?, ?>> result) {
- byte[] serialized = SerializationUtils.serializeNormalizedNode(result.get());
- sender.tell(new CaptureSnapshotReply(serialized), self);
+ byte[] serialized = SerializationUtils.serializeNormalizedNode(result.get());
+ sender.tell(new CaptureSnapshotReply(serialized), self);
- self.tell(PoisonPill.getInstance(), self);
- }
-
- @Override
- public void onFailure(Throwable t) {
- sender.tell(new akka.actor.Status.Failure(t), self);
-
- self.tell(PoisonPill.getInstance(), self);
- }
- });
+ self.tell(PoisonPill.getInstance(), self);
}
@Override
- protected DOMStoreTransaction getDOMStoreTransaction() {
+ protected AbstractShardDataTreeTransaction<?> getDOMStoreTransaction() {
return transaction;
}
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
/**
* @author: syedbahm
* Date: 8/6/14
*/
public class ShardReadWriteTransaction extends ShardWriteTransaction {
- private final DOMStoreReadWriteTransaction transaction;
-
- public ShardReadWriteTransaction(DOMStoreReadWriteTransaction transaction, ActorRef shardActor,
+ public ShardReadWriteTransaction(ReadWriteShardDataTreeTransaction transaction, ActorRef shardActor,
ShardStats shardStats, String transactionID, short clientTxVersion) {
super(transaction, shardActor, shardStats, transactionID, clientTxVersion);
- this.transaction = transaction;
}
@Override
public void handleReceive(Object message) throws Exception {
if (message instanceof ReadData) {
- readData(transaction, (ReadData) message, !SERIALIZED_REPLY);
+ readData((ReadData) message, !SERIALIZED_REPLY);
} else if (message instanceof DataExists) {
- dataExists(transaction, (DataExists) message, !SERIALIZED_REPLY);
+ dataExists((DataExists) message, !SERIALIZED_REPLY);
} else if(ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readData(transaction, ReadData.fromSerializable(message), SERIALIZED_REPLY);
+ readData(ReadData.fromSerializable(message), SERIALIZED_REPLY);
} else if(DataExists.SERIALIZABLE_CLASS.equals(message.getClass())) {
- dataExists(transaction, DataExists.fromSerializable(message), SERIALIZED_REPLY);
-
+ dataExists(DataExists.fromSerializable(message), SERIALIZED_REPLY);
} else {
super.handleReceive(message);
}
*/
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.collect.Lists;
import java.io.IOException;
-import java.util.List;
import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
import org.slf4j.Logger;
/**
* committed to the data store in the order the corresponding snapshot or log batch are received
* to preserve data store integrity.
*
- * @author Thomas Panetelis
+ * @author Thomas Pantelis
*/
class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
-
- private final InMemoryDOMDataStore store;
- private List<ModificationPayload> currentLogRecoveryBatch;
+ private static final YangInstanceIdentifier ROOT = YangInstanceIdentifier.builder().build();
+ private final DataTree store;
private final String shardName;
private final Logger log;
+ private DataTreeModification transaction;
+ private int size;
- ShardRecoveryCoordinator(InMemoryDOMDataStore store, String shardName, Logger log) {
- this.store = store;
+ ShardRecoveryCoordinator(ShardDataTree store, String shardName, Logger log) {
+ this.store = store.getDataTree();
this.shardName = shardName;
this.log = log;
}
@Override
public void startLogRecoveryBatch(int maxBatchSize) {
- currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
-
log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
+ transaction = store.takeSnapshot().newModification();
+ size = 0;
}
@Override
public void appendRecoveredLogEntry(Payload payload) {
try {
- if(payload instanceof ModificationPayload) {
- currentLogRecoveryBatch.add((ModificationPayload) payload);
+ if (payload instanceof DataTreeCandidatePayload) {
+ DataTreeCandidates.applyToModification(transaction, ((DataTreeCandidatePayload)payload).getCandidate());
+ size++;
+ } else if (payload instanceof ModificationPayload) {
+ MutableCompositeModification.fromSerializable(
+ ((ModificationPayload) payload).getModification()).apply(transaction);
+ size++;
} else if (payload instanceof CompositeModificationPayload) {
- currentLogRecoveryBatch.add(new ModificationPayload(MutableCompositeModification.fromSerializable(
- ((CompositeModificationPayload) payload).getModification())));
+ MutableCompositeModification.fromSerializable(
+ ((CompositeModificationPayload) payload).getModification()).apply(transaction);
+ size++;
} else if (payload instanceof CompositeModificationByteStringPayload) {
- currentLogRecoveryBatch.add(new ModificationPayload(MutableCompositeModification.fromSerializable(
- ((CompositeModificationByteStringPayload) payload).getModification())));
+ MutableCompositeModification.fromSerializable(
+ ((CompositeModificationByteStringPayload) payload).getModification()).apply(transaction);
+ size++;
} else {
log.error("{}: Unknown payload {} received during recovery", shardName, payload);
}
- } catch (IOException e) {
+ } catch (IOException | ClassNotFoundException e) {
log.error("{}: Error extracting ModificationPayload", shardName, e);
}
-
}
- private void commitTransaction(DOMStoreWriteTransaction transaction) {
- DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
- try {
- commitCohort.preCommit().get();
- commitCohort.commit().get();
- } catch (Exception e) {
- log.error("{}: Failed to commit Tx on recovery", shardName, e);
- }
+ private void commitTransaction(DataTreeModification tx) throws DataValidationFailedException {
+ tx.ready();
+ store.validate(tx);
+ store.commit(store.prepare(tx));
}
/**
*/
@Override
public void applyCurrentLogRecoveryBatch() {
- log.debug("{}: Applying current log recovery batch with size {}", shardName, currentLogRecoveryBatch.size());
-
- DOMStoreWriteTransaction writeTx = store.newWriteOnlyTransaction();
- for(ModificationPayload payload: currentLogRecoveryBatch) {
- try {
- MutableCompositeModification.fromSerializable(payload.getModification()).apply(writeTx);
- } catch (Exception e) {
- log.error("{}: Error extracting ModificationPayload", shardName, e);
- }
+ log.debug("{}: Applying current log recovery batch with size {}", shardName, size);
+ try {
+ commitTransaction(transaction);
+ } catch (DataValidationFailedException e) {
+ log.error("{}: Failed to apply recovery batch", shardName, e);
}
-
- commitTransaction(writeTx);
-
- currentLogRecoveryBatch = null;
+ transaction = null;
}
/**
*/
@Override
public void applyRecoverySnapshot(final byte[] snapshotBytes) {
- log.debug("{}: Applyng recovered sbapshot", shardName);
-
- DOMStoreWriteTransaction writeTx = store.newWriteOnlyTransaction();
-
- NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
+ log.debug("{}: Applying recovered snapshot", shardName);
- writeTx.write(YangInstanceIdentifier.builder().build(), node);
-
- commitTransaction(writeTx);
+ final NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
+ final DataTreeModification tx = store.takeSnapshot().newModification();
+ tx.write(ROOT, node);
+ try {
+ commitTransaction(tx);
+ } catch (DataValidationFailedException e) {
+ log.error("{}: Failed to apply recovery snapshot", shardName, e);
+ }
}
}
*/
package org.opendaylight.controller.cluster.datastore;
+import com.google.common.base.Preconditions;
import akka.actor.ActorRef;
import java.util.concurrent.ExecutionException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
private int createSnapshotTransactionCounter;
private final ShardTransactionActorFactory transactionActorFactory;
- private final InMemoryDOMDataStore store;
+ private final ShardDataTree store;
private final Logger log;
private final String logId;
- ShardSnapshotCohort(ShardTransactionActorFactory transactionActorFactory, InMemoryDOMDataStore store,
+ ShardSnapshotCohort(ShardTransactionActorFactory transactionActorFactory, ShardDataTree store,
Logger log, String logId) {
this.transactionActorFactory = transactionActorFactory;
- this.store = store;
+ this.store = Preconditions.checkNotNull(store);
this.log = log;
this.logId = logId;
}
log.info("{}: Applying snapshot", logId);
try {
- DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
+ ReadWriteShardDataTreeTransaction transaction = store.newReadWriteTransaction("snapshot-" + logId, null);
NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
// delete everything first
- transaction.delete(DATASTORE_ROOT);
+ transaction.getSnapshot().delete(DATASTORE_ROOT);
// Add everything from the remote node back
- transaction.write(DATASTORE_ROOT, node);
+ transaction.getSnapshot().write(DATASTORE_ROOT, node);
syncCommitTransaction(transaction);
} catch (InterruptedException | ExecutionException e) {
log.error("{}: An exception occurred when applying snapshot", logId, e);
}
- void syncCommitTransaction(final DOMStoreWriteTransaction transaction)
+ void syncCommitTransaction(final ReadWriteShardDataTreeTransaction transaction)
throws ExecutionException, InterruptedException {
- DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
+ ShardDataTreeCohort commitCohort = store.finishTransaction(transaction);
commitCohort.preCommit().get();
commitCohort.commit().get();
}
import akka.actor.ReceiveTimeout;
import akka.japi.Creator;
import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
this.clientTxVersion = clientTxVersion;
}
- public static Props props(DOMStoreTransaction transaction, ActorRef shardActor,
+ public static Props props(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, ActorRef shardActor,
DatastoreContext datastoreContext, ShardStats shardStats, String transactionID, short txnClientVersion) {
- return Props.create(new ShardTransactionCreator(transaction, shardActor,
+ return Props.create(new ShardTransactionCreator(type, transaction, shardActor,
datastoreContext, shardStats, transactionID, txnClientVersion));
}
- protected abstract DOMStoreTransaction getDOMStoreTransaction();
+ protected abstract AbstractShardDataTreeTransaction<?> getDOMStoreTransaction();
protected ActorRef getShardActor() {
return shardActor;
}
private void closeTransaction(boolean sendReply) {
- getDOMStoreTransaction().close();
+ getDOMStoreTransaction().abort();
if(sendReply && returnCloseTransactionReply()) {
getSender().tell(CloseTransactionReply.INSTANCE.toSerializable(), getSelf());
getSelf().tell(PoisonPill.getInstance(), getSelf());
}
- protected void readData(DOMStoreReadTransaction transaction, ReadData message,
- final boolean returnSerialized) {
-
- final YangInstanceIdentifier path = message.getPath();
- try {
- final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future = transaction.read(path);
- Optional<NormalizedNode<?, ?>> optional = future.checkedGet();
- ReadDataReply readDataReply = new ReadDataReply(optional.orNull(), clientTxVersion);
+ private boolean checkClosed(AbstractShardDataTreeTransaction<?> transaction) {
+ final boolean ret = transaction.isClosed();
+ if (ret) {
+ shardStats.incrementFailedReadTransactionsCount();
+ getSender().tell(new akka.actor.Status.Failure(new ReadFailedException("Transaction is closed")), getSelf());
+ }
+ return ret;
+ }
- sender().tell((returnSerialized ? readDataReply.toSerializable(): readDataReply), self());
+ protected void readData(AbstractShardDataTreeTransaction<?> transaction, ReadData message,
+ final boolean returnSerialized) {
- } catch (Exception e) {
- LOG.debug(String.format("Unexpected error reading path %s", path), e);
- shardStats.incrementFailedReadTransactionsCount();
- sender().tell(new akka.actor.Status.Failure(e), self());
+ if (checkClosed(transaction)) {
+ return;
}
+
+ final YangInstanceIdentifier path = message.getPath();
+ Optional<NormalizedNode<?, ?>> optional = transaction.getSnapshot().readNode(path);
+ ReadDataReply readDataReply = new ReadDataReply(optional.orNull(), clientTxVersion);
+ sender().tell((returnSerialized ? readDataReply.toSerializable(): readDataReply), self());
}
- protected void dataExists(DOMStoreReadTransaction transaction, DataExists message,
+ protected void dataExists(AbstractShardDataTreeTransaction<?> transaction, DataExists message,
final boolean returnSerialized) {
- final YangInstanceIdentifier path = message.getPath();
- try {
- Boolean exists = transaction.exists(path).checkedGet();
- DataExistsReply dataExistsReply = new DataExistsReply(exists);
- getSender().tell(returnSerialized ? dataExistsReply.toSerializable() :
- dataExistsReply, getSelf());
- } catch (ReadFailedException e) {
- getSender().tell(new akka.actor.Status.Failure(e),getSelf());
+ if (checkClosed(transaction)) {
+ return;
}
+ final YangInstanceIdentifier path = message.getPath();
+ boolean exists = transaction.getSnapshot().readNode(path).isPresent();
+ DataExistsReply dataExistsReply = DataExistsReply.create(exists);
+ getSender().tell(returnSerialized ? dataExistsReply.toSerializable() :
+ dataExistsReply, getSelf());
}
private static class ShardTransactionCreator implements Creator<ShardTransaction> {
private static final long serialVersionUID = 1L;
- final DOMStoreTransaction transaction;
+ final AbstractShardDataTreeTransaction<?> transaction;
final ActorRef shardActor;
final DatastoreContext datastoreContext;
final ShardStats shardStats;
final String transactionID;
final short txnClientVersion;
+ final TransactionType type;
- ShardTransactionCreator(DOMStoreTransaction transaction, ActorRef shardActor,
+ ShardTransactionCreator(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, ActorRef shardActor,
DatastoreContext datastoreContext, ShardStats shardStats, String transactionID, short txnClientVersion) {
- this.transaction = transaction;
+ this.transaction = Preconditions.checkNotNull(transaction);
this.shardActor = shardActor;
this.shardStats = shardStats;
this.datastoreContext = datastoreContext;
this.transactionID = transactionID;
this.txnClientVersion = txnClientVersion;
+ this.type = type;
}
@Override
public ShardTransaction create() throws Exception {
- ShardTransaction tx;
- if(transaction instanceof DOMStoreReadWriteTransaction) {
- tx = new ShardReadWriteTransaction((DOMStoreReadWriteTransaction)transaction,
- shardActor, shardStats, transactionID, txnClientVersion);
- } else if(transaction instanceof DOMStoreReadTransaction) {
- tx = new ShardReadTransaction((DOMStoreReadTransaction)transaction, shardActor,
- shardStats, transactionID, txnClientVersion);
- } else {
- tx = new ShardWriteTransaction((DOMStoreWriteTransaction)transaction,
- shardActor, shardStats, transactionID, txnClientVersion);
+ final ShardTransaction tx;
+ switch (type) {
+ case READ_ONLY:
+ tx = new ShardReadTransaction(transaction, shardActor,
+ shardStats, transactionID, txnClientVersion);
+ break;
+ case READ_WRITE:
+ tx = new ShardReadWriteTransaction((ReadWriteShardDataTreeTransaction)transaction,
+ shardActor, shardStats, transactionID, txnClientVersion);
+ break;
+ case WRITE_ONLY:
+ tx = new ShardWriteTransaction((ReadWriteShardDataTreeTransaction)transaction,
+ shardActor, shardStats, transactionID, txnClientVersion);
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled transaction type " + type);
}
tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout());
package org.opendaylight.controller.cluster.datastore;
+import com.google.common.base.Preconditions;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.japi.Creator;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChainReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
*/
public class ShardTransactionChain extends AbstractUntypedActor {
- private final DOMStoreTransactionChain chain;
+ private final ShardDataTreeTransactionChain chain;
private final DatastoreContext datastoreContext;
private final ShardStats shardStats;
- public ShardTransactionChain(DOMStoreTransactionChain chain, DatastoreContext datastoreContext,
+ public ShardTransactionChain(ShardDataTreeTransactionChain chain, DatastoreContext datastoreContext,
ShardStats shardStats) {
- this.chain = chain;
+ this.chain = Preconditions.checkNotNull(chain);
this.datastoreContext = datastoreContext;
this.shardStats = shardStats;
}
private ActorRef createTypedTransactionActor(CreateTransaction createTransaction) {
String transactionName = "shard-" + createTransaction.getTransactionId();
- if(createTransaction.getTransactionType() ==
- TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
- return getContext().actorOf(
- ShardTransaction.props( chain.newReadOnlyTransaction(), getShardActor(),
- datastoreContext, shardStats, createTransaction.getTransactionId(),
- createTransaction.getVersion()), transactionName);
- } else if (createTransaction.getTransactionType() ==
- TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
- return getContext().actorOf(
- ShardTransaction.props( chain.newReadWriteTransaction(), getShardActor(),
- datastoreContext, shardStats, createTransaction.getTransactionId(),
- createTransaction.getVersion()), transactionName);
- } else if (createTransaction.getTransactionType() ==
- TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
- return getContext().actorOf(
- ShardTransaction.props( chain.newWriteOnlyTransaction(), getShardActor(),
- datastoreContext, shardStats, createTransaction.getTransactionId(),
- createTransaction.getVersion()), transactionName);
- } else {
- throw new IllegalArgumentException (
- "CreateTransaction message has unidentified transaction type=" +
- createTransaction.getTransactionType());
+
+ final TransactionType type = TransactionType.fromInt(createTransaction.getTransactionType());
+ final AbstractShardDataTreeTransaction<?> transaction;
+ switch (type) {
+ case READ_ONLY:
+ transaction = chain.newReadOnlyTransaction(transactionName);
+ break;
+ case READ_WRITE:
+ case WRITE_ONLY:
+ transaction = chain.newReadWriteTransaction(transactionName);
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled transaction type " + type);
}
+
+ return getContext().actorOf(
+ ShardTransaction.props(type, transaction, getShardActor(),
+ datastoreContext, shardStats, createTransaction.getTransactionId(),
+ createTransaction.getVersion()), transactionName);
}
private void createTransaction(CreateTransaction createTransaction) {
createTransaction.getTransactionId()).toSerializable(), getSelf());
}
- public static Props props(DOMStoreTransactionChain chain, SchemaContext schemaContext,
+ public static Props props(ShardDataTreeTransactionChain chain, SchemaContext schemaContext,
DatastoreContext datastoreContext, ShardStats shardStats) {
return Props.create(new ShardTransactionChainCreator(chain, datastoreContext, shardStats));
}
private static class ShardTransactionChainCreator implements Creator<ShardTransactionChain> {
private static final long serialVersionUID = 1L;
- final DOMStoreTransactionChain chain;
+ final ShardDataTreeTransactionChain chain;
final DatastoreContext datastoreContext;
final ShardStats shardStats;
-
- ShardTransactionChainCreator(DOMStoreTransactionChain chain, DatastoreContext datastoreContext,
+ ShardTransactionChainCreator(ShardDataTreeTransactionChain chain, DatastoreContext datastoreContext,
ShardStats shardStats) {
this.chain = chain;
this.datastoreContext = datastoreContext;
*/
package org.opendaylight.controller.cluster.datastore;
+import com.google.common.base.Preconditions;
import akka.actor.ActorRef;
import akka.actor.UntypedActorContext;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
/**
* A factory for creating ShardTransaction actors.
*/
class ShardTransactionActorFactory {
- private final DOMTransactionFactory domTransactionFactory;
+ private final ShardDataTree dataTree;
private final DatastoreContext datastoreContext;
private final String txnDispatcherPath;
private final ShardStats shardMBean;
private final UntypedActorContext actorContext;
private final ActorRef shardActor;
- ShardTransactionActorFactory(DOMTransactionFactory domTransactionFactory, DatastoreContext datastoreContext,
+ ShardTransactionActorFactory(ShardDataTree dataTree, DatastoreContext datastoreContext,
String txnDispatcherPath, ActorRef shardActor, UntypedActorContext actorContext, ShardStats shardMBean) {
- this.domTransactionFactory = domTransactionFactory;
+ this.dataTree = Preconditions.checkNotNull(dataTree);
this.datastoreContext = datastoreContext;
this.txnDispatcherPath = txnDispatcherPath;
this.shardMBean = shardMBean;
ActorRef newShardTransaction(TransactionProxy.TransactionType type, ShardTransactionIdentifier transactionID,
String transactionChainID, short clientVersion) {
+ final AbstractShardDataTreeTransaction<?> transaction;
+ switch (type) {
+ case READ_ONLY:
+ transaction = dataTree.newReadOnlyTransaction(transactionID.toString(), transactionChainID);
+ break;
+ case READ_WRITE:
+ case WRITE_ONLY:
+ transaction = dataTree.newReadWriteTransaction(transactionID.toString(), transactionChainID);
+ break;
+ default:
+ throw new IllegalArgumentException("Unsupported transaction type " + type);
+ }
- DOMStoreTransaction transaction = domTransactionFactory.newTransaction(type, transactionID.toString(),
- transactionChainID);
-
- return actorContext.actorOf(ShardTransaction.props(transaction, shardActor, datastoreContext, shardMBean,
+ return actorContext.actorOf(ShardTransaction.props(type, transaction, shardActor, datastoreContext, shardMBean,
transactionID.getRemoteTransactionId(), clientVersion).withDispatcher(txnDispatcherPath),
transactionID.toString());
}
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.MergeDataReply;
+import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
/**
* @author: syedbahm
private final MutableCompositeModification compositeModification = new MutableCompositeModification();
private int totalBatchedModificationsReceived;
private Exception lastBatchedModificationsException;
- private final DOMStoreWriteTransaction transaction;
+ private final ReadWriteShardDataTreeTransaction transaction;
- public ShardWriteTransaction(DOMStoreWriteTransaction transaction, ActorRef shardActor,
+ public ShardWriteTransaction(ReadWriteShardDataTreeTransaction transaction, ActorRef shardActor,
ShardStats shardStats, String transactionID, short clientTxVersion) {
super(shardActor, shardStats, transactionID, clientTxVersion);
this.transaction = transaction;
}
@Override
- protected DOMStoreTransaction getDOMStoreTransaction() {
+ protected ReadWriteShardDataTreeTransaction getDOMStoreTransaction() {
return transaction;
}
if (message instanceof BatchedModifications) {
batchedModifications((BatchedModifications)message);
} else if (message instanceof ReadyTransaction) {
- readyTransaction(transaction, !SERIALIZED_REPLY);
+ readyTransaction(!SERIALIZED_REPLY, false);
} else if(ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readyTransaction(transaction, SERIALIZED_REPLY);
+ readyTransaction(SERIALIZED_REPLY, false);
} else if(WriteData.isSerializedType(message)) {
- writeData(transaction, WriteData.fromSerializable(message), SERIALIZED_REPLY);
+ writeData(WriteData.fromSerializable(message), SERIALIZED_REPLY);
} else if(MergeData.isSerializedType(message)) {
- mergeData(transaction, MergeData.fromSerializable(message), SERIALIZED_REPLY);
+ mergeData(MergeData.fromSerializable(message), SERIALIZED_REPLY);
} else if(DeleteData.isSerializedType(message)) {
- deleteData(transaction, DeleteData.fromSerializable(message), SERIALIZED_REPLY);
+ deleteData(DeleteData.fromSerializable(message), SERIALIZED_REPLY);
} else if (message instanceof GetCompositedModification) {
// This is here for testing only
}
private void batchedModifications(BatchedModifications batched) {
+ if (checkClosed()) {
+ if (batched.isReady()) {
+ getSelf().tell(PoisonPill.getInstance(), getSelf());
+ }
+ return;
+ }
+
try {
for(Modification modification: batched.getModifications()) {
compositeModification.addModification(modification);
- modification.apply(transaction);
+ modification.apply(transaction.getSnapshot());
}
totalBatchedModificationsReceived++;
totalBatchedModificationsReceived, batched.getTotalMessagesSent()));
}
- readyTransaction(transaction, false);
+ readyTransaction(false, batched.isDoCommitOnReady());
} else {
getSender().tell(new BatchedModificationsReply(batched.getModifications().size()), getSelf());
}
}
}
- private void writeData(DOMStoreWriteTransaction transaction, WriteData message,
- boolean returnSerialized) {
+ protected final void dataExists(DataExists message, final boolean returnSerialized) {
+ super.dataExists(transaction, message, returnSerialized);
+ }
+
+ protected final void readData(ReadData message, final boolean returnSerialized) {
+ super.readData(transaction, message, returnSerialized);
+ }
+
+ private boolean checkClosed() {
+ if (transaction.isClosed()) {
+ getSender().tell(new akka.actor.Status.Failure(new IllegalStateException("Transaction is closed, no modifications allowed")), getSelf());
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private void writeData(WriteData message, boolean returnSerialized) {
LOG.debug("writeData at path : {}", message.getPath());
+ if (checkClosed()) {
+ return;
+ }
compositeModification.addModification(
new WriteModification(message.getPath(), message.getData()));
try {
- transaction.write(message.getPath(), message.getData());
+ transaction.getSnapshot().write(message.getPath(), message.getData());
WriteDataReply writeDataReply = WriteDataReply.INSTANCE;
getSender().tell(returnSerialized ? writeDataReply.toSerializable(message.getVersion()) :
writeDataReply, getSelf());
}
}
- private void mergeData(DOMStoreWriteTransaction transaction, MergeData message,
- boolean returnSerialized) {
+ private void mergeData(MergeData message, boolean returnSerialized) {
LOG.debug("mergeData at path : {}", message.getPath());
+ if (checkClosed()) {
+ return;
+ }
compositeModification.addModification(
new MergeModification(message.getPath(), message.getData()));
try {
- transaction.merge(message.getPath(), message.getData());
+ transaction.getSnapshot().merge(message.getPath(), message.getData());
MergeDataReply mergeDataReply = MergeDataReply.INSTANCE;
getSender().tell(returnSerialized ? mergeDataReply.toSerializable(message.getVersion()) :
mergeDataReply, getSelf());
}
}
- private void deleteData(DOMStoreWriteTransaction transaction, DeleteData message,
- boolean returnSerialized) {
+ private void deleteData(DeleteData message, boolean returnSerialized) {
LOG.debug("deleteData at path : {}", message.getPath());
+ if (checkClosed()) {
+ return;
+ }
compositeModification.addModification(new DeleteModification(message.getPath()));
try {
- transaction.delete(message.getPath());
+ transaction.getSnapshot().delete(message.getPath());
DeleteDataReply deleteDataReply = DeleteDataReply.INSTANCE;
getSender().tell(returnSerialized ? deleteDataReply.toSerializable(message.getVersion()) :
deleteDataReply, getSelf());
- }catch(Exception e){
+ } catch(Exception e) {
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
}
}
- private void readyTransaction(DOMStoreWriteTransaction transaction, boolean returnSerialized) {
+ private void readyTransaction(boolean returnSerialized, boolean doImmediateCommit) {
String transactionID = getTransactionID();
LOG.debug("readyTransaction : {}", transactionID);
- DOMStoreThreePhaseCommitCohort cohort = transaction.ready();
+ ShardDataTreeCohort cohort = transaction.ready();
getShardActor().forward(new ForwardedReadyTransaction(transactionID, getClientTxVersion(),
- cohort, compositeModification, returnSerialized), getContext());
+ cohort, compositeModification, returnSerialized, doImmediateCommit), getContext());
// The shard will handle the commit from here so we're no longer needed - self-destruct.
getSelf().tell(PoisonPill.getInstance(), getSelf());
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
+ private static final Logger LOG = LoggerFactory.getLogger(SimpleShardDataTreeCohort.class);
+ private static final ListenableFuture<Boolean> TRUE_FUTURE = Futures.immediateFuture(Boolean.TRUE);
+ private static final ListenableFuture<Void> VOID_FUTURE = Futures.immediateFuture(null);
+ private final DataTreeModification transaction;
+ private final ShardDataTree dataTree;
+ private DataTreeCandidateTip candidate;
+
+ SimpleShardDataTreeCohort(final ShardDataTree dataTree, final DataTreeModification transaction) {
+ this.dataTree = Preconditions.checkNotNull(dataTree);
+ this.transaction = Preconditions.checkNotNull(transaction);
+ }
+
+ @Override
+ DataTreeCandidateTip getCandidate() {
+ return candidate;
+ }
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ try {
+ dataTree.getDataTree().validate(transaction);
+ LOG.debug("Transaction {} validated", transaction);
+ return TRUE_FUTURE;
+ } catch (Exception e) {
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ try {
+ candidate = dataTree.getDataTree().prepare(transaction);
+ /*
+ * FIXME: this is the place where we should be interacting with persistence, specifically by invoking
+ * persist on the candidate (which gives us a Future).
+ */
+ LOG.debug("Transaction {} prepared candidate {}", transaction, candidate);
+ return VOID_FUTURE;
+ } catch (Exception e) {
+ LOG.debug("Transaction {} failed to prepare", transaction, e);
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ // No-op, really
+ return VOID_FUTURE;
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ try {
+ dataTree.getDataTree().commit(candidate);
+ } catch (Exception e) {
+ LOG.error("Transaction {} failed to commit", transaction, e);
+ return Futures.immediateFailedFuture(e);
+ }
+
+ LOG.debug("Transaction {} committed, proceeding to notify", transaction);
+ dataTree.notifyListeners(candidate);
+ return VOID_FUTURE;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import akka.dispatch.Futures;
+import akka.dispatch.OnComplete;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import java.util.Arrays;
+import java.util.List;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+
+/**
+ * A cohort proxy implementation for a single-shard transaction commit. If the transaction was a direct commit
+ * to the shard, this implementation elides the CanCommitTransaction and CommitTransaction messages to the
+ * shard as an optimization. Otherwise the 3-phase commit to the shard is delegated to a
+ * ThreePhaseCommitCohortProxy instance (this is for backwards compatibility with pre-Lithium versions).
+ *
+ * @author Thomas Pantelis
+ */
+class SingleCommitCohortProxy extends AbstractThreePhaseCommitCohort<Object> {
+ private static final Logger LOG = LoggerFactory.getLogger(SingleCommitCohortProxy.class);
+
+ private final ActorContext actorContext;
+ private final Future<Object> cohortFuture;
+ private final String transactionId;
+ private volatile DOMStoreThreePhaseCommitCohort delegateCohort = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
+ private final OperationCallback.Reference operationCallbackRef;
+
+ SingleCommitCohortProxy(ActorContext actorContext, Future<Object> cohortFuture, String transactionId,
+ OperationCallback.Reference operationCallbackRef) {
+ this.actorContext = actorContext;
+ this.cohortFuture = cohortFuture;
+ this.transactionId = transactionId;
+ this.operationCallbackRef = operationCallbackRef;
+ }
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ LOG.debug("Tx {} canCommit", transactionId);
+
+ final SettableFuture<Boolean> returnFuture = SettableFuture.create();
+
+ cohortFuture.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(Throwable failure, Object cohortResponse) {
+ if(failure != null) {
+ operationCallbackRef.get().failure();
+ returnFuture.setException(failure);
+ return;
+ }
+
+ operationCallbackRef.get().success();
+
+ if(cohortResponse instanceof ActorSelection) {
+ handlePreLithiumActorCohort((ActorSelection)cohortResponse, returnFuture);
+ return;
+ }
+
+ LOG.debug("Tx {} successfully completed direct commit", transactionId);
+
+ // The Future was the result of a direct commit to the shard, essentially eliding the
+ // front-end 3PC coordination. We don't really care about the specific Future
+ // response object, only that it completed successfully. At this point the Tx is complete
+ // so return true. The subsequent preCommit and commit phases will be no-ops, ie return
+ // immediate success, to complete the 3PC for the front-end.
+ returnFuture.set(Boolean.TRUE);
+ }
+ }, actorContext.getClientDispatcher());
+
+ return returnFuture;
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ return delegateCohort.preCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ return delegateCohort.abort();
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ return delegateCohort.commit();
+ }
+
+ @Override
+ List<Future<Object>> getCohortFutures() {
+ return Arrays.asList(cohortFuture);
+ }
+
+ private void handlePreLithiumActorCohort(ActorSelection actorSelection, final SettableFuture<Boolean> returnFuture) {
+ // Handle backwards compatibility. An ActorSelection response would be returned from a
+ // pre-Lithium version. In this case delegate to a ThreePhaseCommitCohortProxy.
+ delegateCohort = new ThreePhaseCommitCohortProxy(actorContext,
+ Arrays.asList(Futures.successful(actorSelection)), transactionId);
+ com.google.common.util.concurrent.Futures.addCallback(delegateCohort.canCommit(), new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(Boolean canCommit) {
+ returnFuture.set(canCommit);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ returnFuture.setException(t);
+ }
+ });
+ }
+}
/**
* ThreePhaseCommitCohortProxy represents a set of remote cohort proxies
*/
-public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort {
+public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<ActorSelection> {
private static final Logger LOG = LoggerFactory.getLogger(ThreePhaseCommitCohortProxy.class);
- private static final ListenableFuture<Void> IMMEDIATE_SUCCESS =
- com.google.common.util.concurrent.Futures.immediateFuture(null);
-
private final ActorContext actorContext;
private final List<Future<ActorSelection>> cohortFutures;
private volatile List<ActorSelection> cohorts;
private final String transactionId;
- private static final OperationCallback NO_OP_CALLBACK = new OperationCallback() {
- @Override
- public void run() {
- }
-
- @Override
- public void success() {
- }
-
- @Override
- public void failure() {
- }
- };
public ThreePhaseCommitCohortProxy(ActorContext actorContext,
List<Future<ActorSelection>> cohortFutures, String transactionId) {
public ListenableFuture<Void> preCommit() {
// We don't need to do anything here - preCommit is done atomically with the commit phase
// by the shard.
- return IMMEDIATE_SUCCESS;
+ return IMMEDIATE_VOID_SUCCESS;
}
@Override
@Override
public ListenableFuture<Void> commit() {
- OperationCallback operationCallback = cohortFutures.isEmpty() ? NO_OP_CALLBACK :
+ OperationCallback operationCallback = cohortFutures.isEmpty() ? OperationCallback.NO_OP_CALLBACK :
new TransactionRateLimitingCallback(actorContext);
return voidOperation("commit", new CommitTransaction(transactionId).toSerializable(),
private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
final Class<?> expectedResponseClass, final boolean propagateException) {
- return voidOperation(operationName, message, expectedResponseClass, propagateException, NO_OP_CALLBACK);
+ return voidOperation(operationName, message, expectedResponseClass, propagateException,
+ OperationCallback.NO_OP_CALLBACK);
}
private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorSelection;
import com.google.common.base.Preconditions;
import java.util.Collections;
import java.util.List;
private interface State {
boolean isReady();
- List<Future<ActorSelection>> getPreviousReadyFutures();
+ List<Future<Object>> getPreviousReadyFutures();
}
private static class Allocated implements State {
}
@Override
- public List<Future<ActorSelection>> getPreviousReadyFutures() {
+ public List<Future<Object>> getPreviousReadyFutures() {
return transaction.getReadyFutures();
}
}
private static abstract class AbstractDefaultState implements State {
@Override
- public List<Future<ActorSelection>> getPreviousReadyFutures() {
+ public List<Future<Object>> getPreviousReadyFutures() {
return Collections.emptyList();
}
}
currentState = CLOSED_STATE;
// Send a close transaction chain request to each and every shard
- actorContext.broadcast(new CloseTransactionChain(transactionChainId));
+ actorContext.broadcast(new CloseTransactionChain(transactionChainId).toSerializable());
}
private ChainedTransactionProxy allocateWriteTransaction(TransactionProxy.TransactionType type) {
void readData(final YangInstanceIdentifier path, SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture);
void dataExists(YangInstanceIdentifier path, SettableFuture<Boolean> proxyFuture);
+
+ boolean supportsDirectCommit();
+
+ Future<Object> directCommit();
}
actorContext.sendOperationAsync(getActor(), CloseTransaction.INSTANCE.toSerializable());
}
+ @Override
+ public boolean supportsDirectCommit() {
+ return true;
+ }
+
+ @Override
+ public Future<Object> directCommit() {
+ LOG.debug("Tx {} directCommit called", getIdentifier());
+
+ // Send the remaining batched modifications, if any, with the ready flag set.
+
+ return sendBatchedModifications(true, true);
+ }
+
@Override
public Future<ActorSelection> readyTransaction() {
LOG.debug("Tx {} readyTransaction called", getIdentifier());
// Send the remaining batched modifications, if any, with the ready flag set.
- Future<Object> lastModificationsFuture = sendBatchedModifications(true);
+ Future<Object> lastModificationsFuture = sendBatchedModifications(true, false);
return transformReadyReply(lastModificationsFuture);
}
}
protected Future<Object> sendBatchedModifications() {
- return sendBatchedModifications(false);
+ return sendBatchedModifications(false, false);
}
- protected Future<Object> sendBatchedModifications(boolean ready) {
+ protected Future<Object> sendBatchedModifications(boolean ready, boolean doCommitOnReady) {
Future<Object> sent = null;
if(ready || (batchedModifications != null && !batchedModifications.getModifications().isEmpty())) {
if(batchedModifications == null) {
}
batchedModifications.setReady(ready);
+ batchedModifications.setDoCommitOnReady(doCommitOnReady);
batchedModifications.setTotalMessagesSent(++totalBatchedModificationsSent);
sent = executeOperationAsync(batchedModifications);
import java.util.concurrent.atomic.AtomicLong;
import org.opendaylight.controller.cluster.datastore.compat.PreLithiumTransactionContextImpl;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregator;
return false;
}
- private boolean isRootPath(YangInstanceIdentifier path){
+ private static boolean isRootPath(YangInstanceIdentifier path) {
return !path.getPathArguments().iterator().hasNext();
}
}
@Override
- public AbstractThreePhaseCommitCohort ready() {
+ public AbstractThreePhaseCommitCohort<?> ready() {
Preconditions.checkState(transactionType != TransactionType.READ_ONLY,
"Read-only transactions cannot be readied");
throttleOperation(txFutureCallbackMap.size());
+ final boolean isSingleShard = txFutureCallbackMap.size() == 1;
+ return isSingleShard ? createSingleCommitCohort() : createMultiCommitCohort();
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ private AbstractThreePhaseCommitCohort<Object> createSingleCommitCohort() {
+ TransactionFutureCallback txFutureCallback = txFutureCallbackMap.values().iterator().next();
+
+ LOG.debug("Tx {} Readying transaction for shard {} on chain {}", getIdentifier(),
+ txFutureCallback.getShardName(), transactionChainId);
+
+ final OperationCallback.Reference operationCallbackRef =
+ new OperationCallback.Reference(OperationCallback.NO_OP_CALLBACK);
+ final TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ final Future future;
+ if (transactionContext != null) {
+ // avoid the creation of a promise and a TransactionOperation
+ future = getReadyOrDirectCommitFuture(transactionContext, operationCallbackRef);
+ } else {
+ final Promise promise = akka.dispatch.Futures.promise();
+ txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ promise.completeWith(getReadyOrDirectCommitFuture(transactionContext, operationCallbackRef));
+ }
+ });
+ future = promise.future();
+ }
+
+ return new SingleCommitCohortProxy(actorContext, future, getIdentifier().toString(), operationCallbackRef);
+ }
+
+ private Future<?> getReadyOrDirectCommitFuture(TransactionContext transactionContext,
+ OperationCallback.Reference operationCallbackRef) {
+ if(transactionContext.supportsDirectCommit()) {
+ TransactionRateLimitingCallback rateLimitingCallback = new TransactionRateLimitingCallback(actorContext);
+ operationCallbackRef.set(rateLimitingCallback);
+ rateLimitingCallback.run();
+ return transactionContext.directCommit();
+ } else {
+ return transactionContext.readyTransaction();
+ }
+ }
+
+ private AbstractThreePhaseCommitCohort<ActorSelection> createMultiCommitCohort() {
List<Future<ActorSelection>> cohortFutures = new ArrayList<>(txFutureCallbackMap.size());
for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
- LOG.debug("Tx {} Readying transaction for shard {} chain {}", getIdentifier(),
+ LOG.debug("Tx {} Readying transaction for shard {} on chain {}", getIdentifier(),
txFutureCallback.getShardName(), transactionChainId);
final TransactionContext transactionContext = txFutureCallback.getTransactionContext();
cohortFutures.add(future);
}
- return new ThreePhaseCommitCohortProxy(actorContext, cohortFutures,
- getIdentifier().toString());
+ return new ThreePhaseCommitCohortProxy(actorContext, cohortFutures, getIdentifier().toString());
}
@Override
return ShardStrategyFactory.getStrategy(path).findShard(path);
}
- protected Future<ActorSelection> sendFindPrimaryShardAsync(String shardName) {
+ protected Future<PrimaryShardInfo> sendFindPrimaryShardAsync(String shardName) {
return actorContext.findPrimaryShardAsync(shardName);
}
private TransactionFutureCallback getOrCreateTxFutureCallback(String shardName) {
TransactionFutureCallback txFutureCallback = txFutureCallbackMap.get(shardName);
if(txFutureCallback == null) {
- Future<ActorSelection> findPrimaryFuture = sendFindPrimaryShardAsync(shardName);
+ Future<PrimaryShardInfo> findPrimaryFuture = sendFindPrimaryShardAsync(shardName);
final TransactionFutureCallback newTxFutureCallback = new TransactionFutureCallback(this, shardName);
txFutureCallback = newTxFutureCallback;
txFutureCallbackMap.put(shardName, txFutureCallback);
- findPrimaryFuture.onComplete(new OnComplete<ActorSelection>() {
+ findPrimaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
@Override
- public void onComplete(Throwable failure, ActorSelection primaryShard) {
+ public void onComplete(Throwable failure, PrimaryShardInfo primaryShardInfo) {
if(failure != null) {
newTxFutureCallback.createTransactionContext(failure, null);
} else {
- newTxFutureCallback.setPrimaryShard(primaryShard);
+ newTxFutureCallback.setPrimaryShard(primaryShardInfo.getPrimaryShardActor());
}
}
}, actorContext.getClientDispatcher());
return readyTxReply.getCohortPath();
}
+
+ @Override
+ public boolean supportsDirectCommit() {
+ return false;
+ }
+
+ @Override
+ public Future<Object> directCommit() {
+ throw new UnsupportedOperationException("directCommit is not supported for " + getClass());
+ }
}
private static final long serialVersionUID = 1L;
private boolean ready;
+ private boolean doCommitOnReady;
private int totalMessagesSent;
private String transactionID;
private String transactionChainID;
this.ready = ready;
}
+ public boolean isDoCommitOnReady() {
+ return doCommitOnReady;
+ }
+
+ public void setDoCommitOnReady(boolean doCommitOnReady) {
+ this.doCommitOnReady = doCommitOnReady;
+ }
+
public int getTotalMessagesSent() {
return totalMessagesSent;
}
transactionChainID = in.readUTF();
ready = in.readBoolean();
totalMessagesSent = in.readInt();
+ doCommitOnReady = in.readBoolean();
}
@Override
out.writeUTF(transactionChainID);
out.writeBoolean(ready);
out.writeInt(totalMessagesSent);
+ out.writeBoolean(doCommitOnReady);
}
@Override
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
-public class DataExistsReply implements SerializableMessage{
+public class DataExistsReply implements SerializableMessage {
public static final Class<ShardTransactionMessages.DataExistsReply> SERIALIZABLE_CLASS =
ShardTransactionMessages.DataExistsReply.class;
+ private static final DataExistsReply TRUE = new DataExistsReply(true, null);
+ private static final DataExistsReply FALSE = new DataExistsReply(false, null);
+ private static final ShardTransactionMessages.DataExistsReply SERIALIZABLE_TRUE =
+ ShardTransactionMessages.DataExistsReply.newBuilder().setExists(true).build();
+ private static final ShardTransactionMessages.DataExistsReply SERIALIZABLE_FALSE =
+ ShardTransactionMessages.DataExistsReply.newBuilder().setExists(false).build();
+
private final boolean exists;
- public DataExistsReply(final boolean exists) {
+ private DataExistsReply(final boolean exists, final Void dummy) {
this.exists = exists;
}
+ /**
+ * @deprecated Use {@link #create(boolean)} instead.
+ * @param exists
+ */
+ @Deprecated
+ public DataExistsReply(final boolean exists) {
+ this(exists, null);
+ }
+
+ public static DataExistsReply create(final boolean exists) {
+ return exists ? TRUE : FALSE;
+ }
+
public boolean exists() {
return exists;
}
- @Override public Object toSerializable() {
- return ShardTransactionMessages.DataExistsReply.newBuilder()
- .setExists(exists).build();
+ @Override
+ public Object toSerializable() {
+ return exists ? SERIALIZABLE_TRUE : SERIALIZABLE_FALSE;
}
- public static DataExistsReply fromSerializable(final Object serializable){
+ public static DataExistsReply fromSerializable(final Object serializable) {
ShardTransactionMessages.DataExistsReply o = (ShardTransactionMessages.DataExistsReply) serializable;
- return new DataExistsReply(o.getExists());
+ return create(o.getExists());
}
-
}
*/
package org.opendaylight.controller.cluster.datastore.messages;
+import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
/**
* Transaction ReadyTransaction message that is forwarded to the local Shard from the ShardTransaction.
*/
public class ForwardedReadyTransaction {
private final String transactionID;
- private final DOMStoreThreePhaseCommitCohort cohort;
+ private final ShardDataTreeCohort cohort;
private final Modification modification;
private final boolean returnSerialized;
+ private final boolean doImmediateCommit;
private final short txnClientVersion;
public ForwardedReadyTransaction(String transactionID, short txnClientVersion,
- DOMStoreThreePhaseCommitCohort cohort, Modification modification,
- boolean returnSerialized) {
+ ShardDataTreeCohort cohort, Modification modification,
+ boolean returnSerialized, boolean doImmediateCommit) {
this.transactionID = transactionID;
this.cohort = cohort;
this.modification = modification;
this.returnSerialized = returnSerialized;
this.txnClientVersion = txnClientVersion;
+ this.doImmediateCommit = doImmediateCommit;
}
public String getTransactionID() {
return transactionID;
}
- public DOMStoreThreePhaseCommitCohort getCohort() {
+ public ShardDataTreeCohort getCohort() {
return cohort;
}
public short getTxnClientVersion() {
return txnClientVersion;
}
+
+ public boolean isDoImmediateCommit() {
+ return doImmediateCommit;
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import akka.actor.ActorSelection;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+
+/**
+ * Local message DTO that contains information about the primary shard.
+ *
+ * @author Thomas Pantelis
+ */
+public class PrimaryShardInfo {
+ private final ActorSelection primaryShardActor;
+ private final Optional<DataTree> localShardDataTree;
+
+ public PrimaryShardInfo(@Nonnull ActorSelection primaryShardActor, @Nonnull Optional<DataTree> localShardDataTree) {
+ this.primaryShardActor = Preconditions.checkNotNull(primaryShardActor);
+ this.localShardDataTree = Preconditions.checkNotNull(localShardDataTree);
+ }
+
+ /**
+ * Returns an ActorSelection representing the primary shard actor.
+ */
+ public @Nonnull ActorSelection getPrimaryShardActor() {
+ return primaryShardActor;
+ }
+
+ /**
+ * Returns an Optional whose value contains the primary shard's DataTree if the primary shard is local
+ * to the caller. Otherwise the Optional value is absent.
+ */
+ public @Nonnull Optional<DataTree> getLocalShardDataTree() {
+ return localShardDataTree;
+ }
+}
*/
package org.opendaylight.controller.cluster.datastore.messages;
+import akka.actor.ActorPath;
import akka.actor.ActorRef;
import com.google.common.base.Preconditions;
+
import java.io.Serializable;
/**
this.listenerRegistrationPath = Preconditions.checkNotNull(listenerRegistrationPath);
}
- public ActorRef getListenerRegistrationPath() {
- return listenerRegistrationPath;
+ public ActorPath getListenerRegistrationPath() {
+ return listenerRegistrationPath.path();
}
}
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* DeleteModification store all the parameters required to delete a path from the data tree
transaction.delete(getPath());
}
+ @Override
+ public void apply(DataTreeModification transaction) {
+ transaction.delete(getPath());
+ }
+
@Override
public byte getType() {
return DELETE;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* MergeModification stores all the parameters required to merge data into the specified path
transaction.merge(getPath(), getData());
}
+ @Override
+ public void apply(final DataTreeModification transaction) {
+ transaction.merge(getPath(), getData());
+ }
+
@Override
public byte getType() {
return MERGE;
import java.io.Externalizable;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* Represents a modification to the data store.
*/
void apply(DOMStoreWriteTransaction transaction);
+ /**
+ * Apply the modification to the specified transaction
+ *
+ * @param transaction
+ */
+ void apply(DataTreeModification transaction);
+
byte getType();
@Deprecated
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* MutableCompositeModification is just a mutable version of a
}
}
+ @Override
+ public void apply(DataTreeModification transaction) {
+ for (Modification modification : modifications) {
+ modification.apply(transaction);
+ }
+ }
+
@Override
public byte getType() {
return COMPOSITE;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* WriteModification stores all the parameters required to write data to the specified path
transaction.write(getPath(), data);
}
+ @Override
+ public void apply(final DataTreeModification transaction) {
+ transaction.write(getPath(), data);
+ }
+
public NormalizedNode<?, ?> getData() {
return data;
}
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.cluster.reporting.MetricsReporter;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private Timeout transactionCommitOperationTimeout;
private Timeout shardInitializationTimeout;
private final Dispatchers dispatchers;
- private Cache<String, Future<ActorSelection>> primaryShardActorSelectionCache;
+ private Cache<String, Future<PrimaryShardInfo>> primaryShardInfoCache;
private volatile SchemaContext schemaContext;
private volatile boolean updated;
shardInitializationTimeout = new Timeout(datastoreContext.getShardInitializationTimeout().duration().$times(2));
- primaryShardActorSelectionCache = CacheBuilder.newBuilder()
+ primaryShardInfoCache = CacheBuilder.newBuilder()
.expireAfterWrite(datastoreContext.getShardLeaderElectionTimeout().duration().toMillis(), TimeUnit.MILLISECONDS)
.build();
}
return schemaContext;
}
- public Future<ActorSelection> findPrimaryShardAsync(final String shardName) {
- Future<ActorSelection> ret = primaryShardActorSelectionCache.getIfPresent(shardName);
+ public Future<PrimaryShardInfo> findPrimaryShardAsync(final String shardName) {
+ Future<PrimaryShardInfo> ret = primaryShardInfoCache.getIfPresent(shardName);
if(ret != null){
return ret;
}
Future<Object> future = executeOperationAsync(shardManager,
new FindPrimary(shardName, true), shardInitializationTimeout);
- return future.transform(new Mapper<Object, ActorSelection>() {
+ return future.transform(new Mapper<Object, PrimaryShardInfo>() {
@Override
- public ActorSelection checkedApply(Object response) throws Exception {
+ public PrimaryShardInfo checkedApply(Object response) throws Exception {
if(response instanceof PrimaryFound) {
PrimaryFound found = (PrimaryFound)response;
LOG.debug("Primary found {}", found.getPrimaryPath());
ActorSelection actorSelection = actorSystem.actorSelection(found.getPrimaryPath());
- primaryShardActorSelectionCache.put(shardName, Futures.successful(actorSelection));
- return actorSelection;
+ PrimaryShardInfo info = new PrimaryShardInfo(actorSelection, Optional.<DataTree>absent());
+ primaryShardInfoCache.put(shardName, Futures.successful(info));
+ return info;
} else if(response instanceof NotInitializedException) {
throw (NotInitializedException)response;
} else if(response instanceof PrimaryNotFoundException) {
public void broadcast(final Object message){
for(final String shardName : configuration.getAllShardNames()){
- Future<ActorSelection> primaryFuture = findPrimaryShardAsync(shardName);
- primaryFuture.onComplete(new OnComplete<ActorSelection>() {
+ Future<PrimaryShardInfo> primaryFuture = findPrimaryShardAsync(shardName);
+ primaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
@Override
- public void onComplete(Throwable failure, ActorSelection primaryShard) {
+ public void onComplete(Throwable failure, PrimaryShardInfo primaryShardInfo) {
if(failure != null) {
LOG.warn("broadcast failed to send message {} to shard {}: {}",
message.getClass().getSimpleName(), shardName, failure);
} else {
- primaryShard.tell(message, ActorRef.noSender());
+ primaryShardInfo.getPrimaryShardActor().tell(message, ActorRef.noSender());
}
}
}, getClientDispatcher());
}
@VisibleForTesting
- Cache<String, Future<ActorSelection>> getPrimaryShardActorSelectionCache() {
- return primaryShardActorSelectionCache;
+ Cache<String, Future<PrimaryShardInfo>> getPrimaryShardInfoCache() {
+ return primaryShardInfoCache;
}
}
import akka.testkit.TestActorRef;
import com.google.common.base.Function;
import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
import java.util.Collections;
import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
Assert.fail(String.format("Expected last applied: %d, Actual: %d", expectedValue, lastApplied));
}
- protected NormalizedNode<?, ?> readStore(final InMemoryDOMDataStore store) throws ReadFailedException {
- DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
- CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read =
- transaction.read(YangInstanceIdentifier.builder().build());
-
- Optional<NormalizedNode<?, ?>> optional = read.checkedGet();
-
- NormalizedNode<?, ?> normalizedNode = optional.get();
-
- transaction.close();
-
- return normalizedNode;
- }
-
- protected DOMStoreThreePhaseCommitCohort setupMockWriteTransaction(final String cohortName,
- final InMemoryDOMDataStore dataStore, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
+ protected ShardDataTreeCohort setupMockWriteTransaction(final String cohortName,
+ final ShardDataTree dataStore, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
final MutableCompositeModification modification) {
return setupMockWriteTransaction(cohortName, dataStore, path, data, modification, null);
}
- protected DOMStoreThreePhaseCommitCohort setupMockWriteTransaction(final String cohortName,
- final InMemoryDOMDataStore dataStore, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
+ protected ShardDataTreeCohort setupMockWriteTransaction(final String cohortName,
+ final ShardDataTree dataStore, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
final MutableCompositeModification modification,
- final Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>> preCommit) {
+ final Function<ShardDataTreeCohort, ListenableFuture<Void>> preCommit) {
- DOMStoreWriteTransaction tx = dataStore.newWriteOnlyTransaction();
- tx.write(path, data);
- DOMStoreThreePhaseCommitCohort cohort = createDelegatingMockCohort(cohortName, tx.ready(), preCommit);
+ ReadWriteShardDataTreeTransaction tx = dataStore.newReadWriteTransaction("setup-mock-" + cohortName, null);
+ tx.getSnapshot().write(path, data);
+ ShardDataTreeCohort cohort = createDelegatingMockCohort(cohortName, dataStore.finishTransaction(tx), preCommit);
modification.addModification(new WriteModification(path, data));
return cohort;
}
- protected DOMStoreThreePhaseCommitCohort createDelegatingMockCohort(final String cohortName,
- final DOMStoreThreePhaseCommitCohort actual) {
+ protected ShardDataTreeCohort createDelegatingMockCohort(final String cohortName,
+ final ShardDataTreeCohort actual) {
return createDelegatingMockCohort(cohortName, actual, null);
}
- protected DOMStoreThreePhaseCommitCohort createDelegatingMockCohort(final String cohortName,
- final DOMStoreThreePhaseCommitCohort actual,
- final Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>> preCommit) {
- DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, cohortName);
+ protected ShardDataTreeCohort createDelegatingMockCohort(final String cohortName,
+ final ShardDataTreeCohort actual,
+ final Function<ShardDataTreeCohort, ListenableFuture<Void>> preCommit) {
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, cohortName);
doAnswer(new Answer<ListenableFuture<Boolean>>() {
@Override
doAnswer(new Answer<ListenableFuture<Void>>() {
@Override
public ListenableFuture<Void> answer(final InvocationOnMock invocation) throws Throwable {
- return actual.preCommit();
+ if(preCommit != null) {
+ return preCommit.apply(actual);
+ } else {
+ return actual.preCommit();
+ }
}
}).when(cohort).preCommit();
}
}).when(cohort).abort();
+ doAnswer(new Answer<DataTreeCandidateTip>() {
+ @Override
+ public DataTreeCandidateTip answer(final InvocationOnMock invocation) {
+ return actual.getCandidate();
+ }
+ }).when(cohort).getCandidate();
+
return cohort;
}
public static NormalizedNode<?,?> readStore(final TestActorRef<Shard> shard, final YangInstanceIdentifier id)
throws ExecutionException, InterruptedException {
- return readStore(shard.underlyingActor().getDataStore(), id);
+ return readStore(shard.underlyingActor().getDataStore().getDataTree(), id);
}
- public static NormalizedNode<?,?> readStore(final InMemoryDOMDataStore store, final YangInstanceIdentifier id)
- throws ExecutionException, InterruptedException {
- DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
-
- CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
- transaction.read(id);
+ public static NormalizedNode<?,?> readStore(final DataTree store, final YangInstanceIdentifier id) {
+ DataTreeSnapshot transaction = store.takeSnapshot();
- Optional<NormalizedNode<?, ?>> optional = future.get();
+ Optional<NormalizedNode<?, ?>> optional = transaction.readNode(id);
NormalizedNode<?, ?> node = optional.isPresent()? optional.get() : null;
- transaction.close();
-
return node;
}
public static void writeToStore(final TestActorRef<Shard> shard, final YangInstanceIdentifier id,
- final NormalizedNode<?,?> node) throws ExecutionException, InterruptedException {
+ final NormalizedNode<?,?> node) throws InterruptedException, ExecutionException {
writeToStore(shard.underlyingActor().getDataStore(), id, node);
}
- public static void writeToStore(final InMemoryDOMDataStore store, final YangInstanceIdentifier id,
- final NormalizedNode<?,?> node) throws ExecutionException, InterruptedException {
- DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
+ public static void writeToStore(final ShardDataTree store, final YangInstanceIdentifier id,
+ final NormalizedNode<?,?> node) throws InterruptedException, ExecutionException {
+ ReadWriteShardDataTreeTransaction transaction = store.newReadWriteTransaction("writeToStore", null);
- transaction.write(id, node);
+ transaction.getSnapshot().write(id, node);
+ ShardDataTreeCohort cohort = transaction.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit();
+ }
- DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
- commitCohort.preCommit().get();
- commitCohort.commit().get();
+ public static void writeToStore(final DataTree store, final YangInstanceIdentifier id,
+ final NormalizedNode<?,?> node) throws DataValidationFailedException {
+ DataTreeModification transaction = store.takeSnapshot().newModification();
+
+ transaction.write(id, node);
+ transaction.ready();
+ store.validate(transaction);
+ final DataTreeCandidate candidate = store.prepare(transaction);
+ store.commit(candidate);
}
@SuppressWarnings("serial")
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.argThat;
import akka.actor.Props;
import akka.dispatch.Futures;
import akka.testkit.JavaTestKit;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+import com.google.common.base.Objects;
+import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.CheckedFuture;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.junit.AfterClass;
import org.junit.Before;
import org.opendaylight.controller.cluster.datastore.TransactionProxyTest.TestException;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static ActorSystem system;
- private final Configuration configuration = new MockConfiguration();
+ private final Configuration configuration = new MockConfiguration() {
+ @Override
+ public Map<String, ShardStrategy> getModuleNameToShardStrategyMap() {
+ return ImmutableMap.<String, ShardStrategy>builder().put(
+ "junk", new ShardStrategy() {
+ @Override
+ public String findShard(YangInstanceIdentifier path) {
+ return "junk";
+ }
+ }).build();
+ }
+
+ @Override
+ public Optional<String> getModuleNameFromNameSpace(String nameSpace) {
+ return TestModel.JUNK_QNAME.getNamespace().toASCIIString().equals(nameSpace) ?
+ Optional.of("junk") : Optional.<String>absent();
+ }
+ };
@Mock
protected ActorContext mockActorContext;
doReturn(dataStoreContextBuilder.build()).when(mockActorContext).getDatastoreContext();
doReturn(10).when(mockActorContext).getTransactionOutstandingOperationLimit();
+ Timer timer = new MetricRegistry().timer("test");
+ doReturn(timer).when(mockActorContext).getOperationTimer(any(String.class));
+
ShardStrategyFactory.setConfiguration(configuration);
}
}
protected Future<Object> dataExistsSerializedReply(boolean exists) {
- return Futures.successful(new DataExistsReply(exists).toSerializable());
+ return Futures.successful(DataExistsReply.create(exists).toSerializable());
}
protected Future<DataExistsReply> dataExistsReply(boolean exists) {
- return Futures.successful(new DataExistsReply(exists));
+ return Futures.successful(DataExistsReply.create(exists));
}
protected Future<BatchedModificationsReply> batchedModificationsReply(int count) {
return Futures.successful(new BatchedModificationsReply(count));
}
- protected Future<Object> incompleteFuture(){
+ protected Future<Object> incompleteFuture() {
return mock(Future.class);
}
}
protected void expectBatchedModificationsReady(ActorRef actorRef) {
- doReturn(readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+ expectBatchedModificationsReady(actorRef, false);
+ }
+
+ protected void expectBatchedModificationsReady(ActorRef actorRef, boolean doCommitOnReady) {
+ doReturn(doCommitOnReady ? Futures.successful(new CommitTransactionReply().toSerializable()) :
+ readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
protected void expectBatchedModifications(int count) {
}
protected ActorRef setupActorContextWithoutInitialCreateTransaction(ActorSystem actorSystem) {
+ return setupActorContextWithoutInitialCreateTransaction(actorSystem, DefaultShardStrategy.DEFAULT_SHARD);
+ }
+
+ protected Future<PrimaryShardInfo> primaryShardInfoReply(ActorSystem actorSystem, ActorRef actorRef) {
+ return Futures.successful(new PrimaryShardInfo(actorSystem.actorSelection(actorRef.path()),
+ Optional.<DataTree>absent()));
+ }
+
+ protected ActorRef setupActorContextWithoutInitialCreateTransaction(ActorSystem actorSystem, String shardName) {
ActorRef actorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
log.info("Created mock shard actor {}", actorRef);
doReturn(actorSystem.actorSelection(actorRef.path())).
when(mockActorContext).actorSelection(actorRef.path().toString());
- doReturn(Futures.successful(actorSystem.actorSelection(actorRef.path()))).
- when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+ doReturn(primaryShardInfoReply(actorSystem, actorRef)).
+ when(mockActorContext).findPrimaryShardAsync(eq(shardName));
doReturn(false).when(mockActorContext).isPathLocal(actorRef.path().toString());
}
protected ActorRef setupActorContextWithInitialCreateTransaction(ActorSystem actorSystem,
- TransactionType type, int transactionVersion) {
- ActorRef shardActorRef = setupActorContextWithoutInitialCreateTransaction(actorSystem);
+ TransactionType type, int transactionVersion, String shardName) {
+ ActorRef shardActorRef = setupActorContextWithoutInitialCreateTransaction(actorSystem, shardName);
return setupActorContextWithInitialCreateTransaction(actorSystem, type, transactionVersion,
memberName, shardActorRef);
}
protected ActorRef setupActorContextWithInitialCreateTransaction(ActorSystem actorSystem, TransactionType type) {
- return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION);
+ return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION,
+ DefaultShardStrategy.DEFAULT_SHARD);
}
+ protected ActorRef setupActorContextWithInitialCreateTransaction(ActorSystem actorSystem, TransactionType type,
+ String shardName) {
+ return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION,
+ shardName);
+ }
protected void propagateReadFailedExceptionCause(CheckedFuture<?, ReadFailedException> future)
throws Throwable {
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), expIsReady, expected);
+ verifyBatchedModifications(batchedModifications.get(0), expIsReady, expIsReady, expected);
}
protected void verifyBatchedModifications(Object message, boolean expIsReady, Modification... expected) {
+ verifyBatchedModifications(message, expIsReady, false, expected);
+ }
+
+ protected void verifyBatchedModifications(Object message, boolean expIsReady, boolean expIsDoCommitOnReady,
+ Modification... expected) {
assertEquals("Message type", BatchedModifications.class, message.getClass());
BatchedModifications batchedModifications = (BatchedModifications)message;
assertEquals("BatchedModifications size", expected.length, batchedModifications.getModifications().size());
assertEquals("isReady", expIsReady, batchedModifications.isReady());
+ assertEquals("isDoCommitOnReady", expIsDoCommitOnReady, batchedModifications.isDoCommitOnReady());
for(int i = 0; i < batchedModifications.getModifications().size(); i++) {
Modification actual = batchedModifications.getModifications().get(i);
assertEquals("Modification type", expected[i].getClass(), actual.getClass());
}
}
- protected void verifyCohortFutures(ThreePhaseCommitCohortProxy proxy,
+ protected void verifyCohortFutures(AbstractThreePhaseCommitCohort<?> proxy,
Object... expReplies) throws Exception {
assertEquals("getReadyOperationFutures size", expReplies.length,
proxy.getCohortFutures().size());
- int i = 0;
- for( Future<ActorSelection> future: proxy.getCohortFutures()) {
+ List<Object> futureResults = new ArrayList<>();
+ for( Future<?> future: proxy.getCohortFutures()) {
assertNotNull("Ready operation Future is null", future);
+ try {
+ futureResults.add(Await.result(future, Duration.create(5, TimeUnit.SECONDS)));
+ } catch(Exception e) {
+ futureResults.add(e);
+ }
+ }
+
+ for(int i = 0; i < expReplies.length; i++) {
+ Object expReply = expReplies[i];
+ boolean found = false;
+ Iterator<?> iter = futureResults.iterator();
+ while(iter.hasNext()) {
+ Object actual = iter.next();
+ if(CommitTransactionReply.SERIALIZABLE_CLASS.isInstance(expReply) &&
+ CommitTransactionReply.SERIALIZABLE_CLASS.isInstance(actual)) {
+ found = true;
+ } else if(expReply instanceof ActorSelection && Objects.equal(expReply, actual)) {
+ found = true;
+ } else if(expReply instanceof Class && ((Class<?>)expReply).isInstance(actual)) {
+ found = true;
+ }
- Object expReply = expReplies[i++];
- if(expReply instanceof ActorSelection) {
- ActorSelection actual = Await.result(future, Duration.create(5, TimeUnit.SECONDS));
- assertEquals("Cohort actor path", expReply, actual);
- } else {
- try {
- Await.result(future, Duration.create(5, TimeUnit.SECONDS));
- fail("Expected exception from ready operation Future");
- } catch(Exception e) {
- assertTrue(String.format("Expected exception type %s. Actual %s",
- expReply, e.getClass()), ((Class<?>)expReply).isInstance(e));
+ if(found) {
+ iter.remove();
+ break;
}
}
+
+ if(!found) {
+ fail(String.format("No cohort Future response found for %s. Actual: %s", expReply, futureResults));
+ }
}
}
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
/**
* Unit tests for DOMConcurrentDataCommitCoordinator.
assertFailure(future, cause, mockCohort1, mockCohort2);
}
+
+ @Test
+ public void testCreateReadWriteTransaction(){
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+ dataBroker.newReadWriteTransaction();
+
+ verify(domStore, never()).newReadWriteTransaction();
+ }
+
+
+ @Test
+ public void testCreateWriteOnlyTransaction(){
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+ dataBroker.newWriteOnlyTransaction();
+
+ verify(domStore, never()).newWriteOnlyTransaction();
+ }
+
+ @Test
+ public void testCreateReadOnlyTransaction(){
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+ dataBroker.newReadOnlyTransaction();
+
+ verify(domStore, never()).newReadOnlyTransaction();
+ }
+
+ @Test
+ public void testLazySubTransactionCreationForReadWriteTransactions(){
+ DOMStore configDomStore = mock(DOMStore.class);
+ DOMStore operationalDomStore = mock(DOMStore.class);
+ DOMStoreReadWriteTransaction storeTxn = mock(DOMStoreReadWriteTransaction.class);
+
+ doReturn(storeTxn).when(operationalDomStore).newReadWriteTransaction();
+ doReturn(storeTxn).when(configDomStore).newReadWriteTransaction();
+
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ operationalDomStore, LogicalDatastoreType.CONFIGURATION, configDomStore), futureExecutor);
+ DOMDataReadWriteTransaction dataTxn = dataBroker.newReadWriteTransaction();
+
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+ dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build());
+
+ verify(configDomStore, never()).newReadWriteTransaction();
+ verify(operationalDomStore, times(1)).newReadWriteTransaction();
+
+ dataTxn.put(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+
+ verify(configDomStore, times(1)).newReadWriteTransaction();
+ verify(operationalDomStore, times(1)).newReadWriteTransaction();
+
+ }
+
+ @Test
+ public void testLazySubTransactionCreationForWriteOnlyTransactions(){
+ DOMStore configDomStore = mock(DOMStore.class);
+ DOMStore operationalDomStore = mock(DOMStore.class);
+ DOMStoreWriteTransaction storeTxn = mock(DOMStoreWriteTransaction.class);
+
+ doReturn(storeTxn).when(operationalDomStore).newWriteOnlyTransaction();
+ doReturn(storeTxn).when(configDomStore).newWriteOnlyTransaction();
+
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ operationalDomStore, LogicalDatastoreType.CONFIGURATION, configDomStore), futureExecutor);
+ DOMDataWriteTransaction dataTxn = dataBroker.newWriteOnlyTransaction();
+
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+
+ verify(configDomStore, never()).newWriteOnlyTransaction();
+ verify(operationalDomStore, times(1)).newWriteOnlyTransaction();
+
+ dataTxn.put(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+
+ verify(configDomStore, times(1)).newWriteOnlyTransaction();
+ verify(operationalDomStore, times(1)).newWriteOnlyTransaction();
+
+ }
+
+
+ @Test
+ public void testLazySubTransactionCreationForReadOnlyTransactions(){
+ DOMStore configDomStore = mock(DOMStore.class);
+ DOMStore operationalDomStore = mock(DOMStore.class);
+ DOMStoreReadTransaction storeTxn = mock(DOMStoreReadTransaction.class);
+
+ doReturn(storeTxn).when(operationalDomStore).newReadOnlyTransaction();
+ doReturn(storeTxn).when(configDomStore).newReadOnlyTransaction();
+
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ operationalDomStore, LogicalDatastoreType.CONFIGURATION, configDomStore), futureExecutor);
+ DOMDataReadOnlyTransaction dataTxn = dataBroker.newReadOnlyTransaction();
+
+ dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build());
+ dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build());
+
+ verify(configDomStore, never()).newReadOnlyTransaction();
+ verify(operationalDomStore, times(1)).newReadOnlyTransaction();
+
+ dataTxn.read(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.builder().build());
+
+ verify(configDomStore, times(1)).newReadOnlyTransaction();
+ verify(operationalDomStore, times(1)).newReadOnlyTransaction();
+
+ }
+
+ @Test
+ public void testSubmitWithOnlyOneSubTransaction() throws InterruptedException {
+ DOMStore configDomStore = mock(DOMStore.class);
+ DOMStore operationalDomStore = mock(DOMStore.class);
+ DOMStoreReadWriteTransaction mockStoreReadWriteTransaction = mock(DOMStoreReadWriteTransaction.class);
+ DOMStoreThreePhaseCommitCohort mockCohort = mock(DOMStoreThreePhaseCommitCohort.class);
+
+ doReturn(mockStoreReadWriteTransaction).when(operationalDomStore).newReadWriteTransaction();
+ doReturn(mockCohort).when(mockStoreReadWriteTransaction).ready();
+ doReturn(Futures.immediateFuture(false)).when(mockCohort).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort).abort();
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ final List<DOMStoreThreePhaseCommitCohort> commitCohorts = new ArrayList();
+
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ operationalDomStore, LogicalDatastoreType.CONFIGURATION, configDomStore), futureExecutor) {
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit(DOMDataWriteTransaction transaction, Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
+ commitCohorts.addAll(cohorts);
+ latch.countDown();
+ return super.submit(transaction, cohorts);
+ }
+ };
+ DOMDataReadWriteTransaction domDataReadWriteTransaction = dataBroker.newReadWriteTransaction();
+
+ domDataReadWriteTransaction.delete(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build());
+
+ domDataReadWriteTransaction.submit();
+
+ latch.await(10, TimeUnit.SECONDS);
+
+ assertTrue(commitCohorts.size() == 1);
+ }
+
+ @Test
+ public void testSubmitWithOnlyTwoSubTransactions() throws InterruptedException {
+ DOMStore configDomStore = mock(DOMStore.class);
+ DOMStore operationalDomStore = mock(DOMStore.class);
+ DOMStoreReadWriteTransaction operationalTransaction = mock(DOMStoreReadWriteTransaction.class);
+ DOMStoreReadWriteTransaction configTransaction = mock(DOMStoreReadWriteTransaction.class);
+ DOMStoreThreePhaseCommitCohort mockCohortOperational = mock(DOMStoreThreePhaseCommitCohort.class);
+ DOMStoreThreePhaseCommitCohort mockCohortConfig = mock(DOMStoreThreePhaseCommitCohort.class);
+
+ doReturn(operationalTransaction).when(operationalDomStore).newReadWriteTransaction();
+ doReturn(configTransaction).when(configDomStore).newReadWriteTransaction();
+
+ doReturn(mockCohortOperational).when(operationalTransaction).ready();
+ doReturn(Futures.immediateFuture(false)).when(mockCohortOperational).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohortOperational).abort();
+
+ doReturn(mockCohortConfig).when(configTransaction).ready();
+ doReturn(Futures.immediateFuture(false)).when(mockCohortConfig).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohortConfig).abort();
+
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ final List<DOMStoreThreePhaseCommitCohort> commitCohorts = new ArrayList();
+
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ operationalDomStore, LogicalDatastoreType.CONFIGURATION, configDomStore), futureExecutor) {
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit(DOMDataWriteTransaction transaction, Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
+ commitCohorts.addAll(cohorts);
+ latch.countDown();
+ return super.submit(transaction, cohorts);
+ }
+ };
+ DOMDataReadWriteTransaction domDataReadWriteTransaction = dataBroker.newReadWriteTransaction();
+
+ domDataReadWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+ domDataReadWriteTransaction.merge(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+
+ domDataReadWriteTransaction.submit();
+
+ latch.await(10, TimeUnit.SECONDS);
+
+ assertTrue(commitCohorts.size() == 2);
+ }
+
+ @Test
+ public void testCreateTransactionChain(){
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+
+ dataBroker.createTransactionChain(mock(TransactionChainListener.class));
+
+ verify(domStore, times(2)).createTransactionChain();
+
+ }
+
+ @Test
+ public void testCreateTransactionOnChain(){
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+
+ DOMStoreReadWriteTransaction operationalTransaction = mock(DOMStoreReadWriteTransaction.class);
+ DOMStoreTransactionChain mockChain = mock(DOMStoreTransactionChain.class);
+
+ doReturn(mockChain).when(domStore).createTransactionChain();
+ doReturn(operationalTransaction).when(mockChain).newWriteOnlyTransaction();
+
+ DOMTransactionChain transactionChain = dataBroker.createTransactionChain(mock(TransactionChainListener.class));
+
+ DOMDataWriteTransaction domDataWriteTransaction = transactionChain.newWriteOnlyTransaction();
+
+ verify(mockChain, never()).newWriteOnlyTransaction();
+
+ domDataWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+ }
+
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+import java.io.IOException;
+import java.util.Collection;
+import org.apache.commons.lang3.SerializationUtils;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+
+public class DataTreeCandidatePayloadTest {
+ private DataTreeCandidate candidate;
+
+ private static DataTreeCandidateNode findNode(final Collection<DataTreeCandidateNode> nodes, final PathArgument arg) {
+ for (DataTreeCandidateNode node : nodes) {
+ if (arg.equals(node.getIdentifier())) {
+ return node;
+ }
+ }
+ return null;
+ }
+
+ private static void assertChildrenEquals(final Collection<DataTreeCandidateNode> expected,
+ final Collection<DataTreeCandidateNode> actual) {
+ // Make sure all expected nodes are there
+ for (DataTreeCandidateNode exp : expected) {
+ final DataTreeCandidateNode act = findNode(actual, exp.getIdentifier());
+ assertNotNull("missing expected child", act);
+ assertCandidateNodeEquals(exp, act);
+ }
+ // Make sure no nodes are present which are not in the expected set
+ for (DataTreeCandidateNode act : actual) {
+ final DataTreeCandidateNode exp = findNode(expected, act.getIdentifier());
+ assertNull("unexpected child", exp);
+ }
+ }
+
+ private static void assertCandidateEquals(final DataTreeCandidate expected, final DataTreeCandidate actual) {
+ assertEquals("root path", expected.getRootPath(), actual.getRootPath());
+
+ final DataTreeCandidateNode expRoot = expected.getRootNode();
+ final DataTreeCandidateNode actRoot = expected.getRootNode();
+ assertEquals("root type", expRoot.getModificationType(), actRoot.getModificationType());
+
+ switch (actRoot.getModificationType()) {
+ case DELETE:
+ case WRITE:
+ assertEquals("root data", expRoot.getDataAfter(), actRoot.getDataAfter());
+ break;
+ case SUBTREE_MODIFIED:
+ assertChildrenEquals(expRoot.getChildNodes(), actRoot.getChildNodes());
+ break;
+ default:
+ fail("Unexpect root type " + actRoot.getModificationType());
+ break;
+ }
+
+ assertCandidateNodeEquals(expected.getRootNode(), actual.getRootNode());
+ }
+
+ private static void assertCandidateNodeEquals(final DataTreeCandidateNode expected, final DataTreeCandidateNode actual) {
+ assertEquals("child type", expected.getModificationType(), actual.getModificationType());
+ assertEquals("child identifier", expected.getIdentifier(), actual.getIdentifier());
+
+ switch (actual.getModificationType()) {
+ case DELETE:
+ case WRITE:
+ assertEquals("child data", expected.getDataAfter(), actual.getDataAfter());
+ break;
+ case SUBTREE_MODIFIED:
+ assertChildrenEquals(expected.getChildNodes(), actual.getChildNodes());
+ break;
+ default:
+ fail("Unexpect root type " + actual.getModificationType());
+ break;
+ }
+ }
+
+ @Before
+ public void setUp() {
+ final YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ final NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+ candidate = DataTreeCandidates.fromNormalizedNode(writePath, writeData);
+ }
+
+ @Test
+ public void testCandidateSerialization() throws IOException {
+ final DataTreeCandidatePayload payload = DataTreeCandidatePayload.create(candidate);
+ assertEquals("payload size", 141, payload.size());
+ }
+
+ @Test
+ public void testCandidateSerDes() throws IOException {
+ final DataTreeCandidatePayload payload = DataTreeCandidatePayload.create(candidate);
+ assertCandidateEquals(candidate, payload.getCandidate());
+ }
+
+ @Test
+ public void testPayloadSerDes() throws IOException {
+ final DataTreeCandidatePayload payload = DataTreeCandidatePayload.create(candidate);
+ assertCandidateEquals(candidate, SerializationUtils.clone(payload).getCandidate());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.DeadLetter;
+import akka.actor.Props;
+import akka.testkit.JavaTestKit;
+import com.google.common.collect.ImmutableList;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Matchers;
+import org.mockito.Mockito;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+public class DataTreeChangeListenerActorTest extends AbstractActorTest {
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testDataChangedWhenNotificationsAreEnabled(){
+ new JavaTestKit(getSystem()) {{
+ final DataTreeCandidate mockTreeCandidate = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates = ImmutableList.of(mockTreeCandidate);
+ final DOMDataTreeChangeListener mockListener = Mockito.mock(DOMDataTreeChangeListener.class);
+ final Props props = DataTreeChangeListenerActor.props(mockListener);
+ final ActorRef subject = getSystem().actorOf(props, "testDataTreeChangedNotificationsEnabled");
+
+ // Let the DataChangeListener know that notifications should be enabled
+ subject.tell(new EnableNotification(true), getRef());
+
+ subject.tell(new DataTreeChanged(mockCandidates),
+ getRef());
+
+ expectMsgClass(DataTreeChangedReply.class);
+
+ Mockito.verify(mockListener).onDataTreeChanged(mockCandidates);
+ }};
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testDataChangedWhenNotificationsAreDisabled(){
+ new JavaTestKit(getSystem()) {{
+ final DataTreeCandidate mockTreeCandidate = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates = ImmutableList.of(mockTreeCandidate);
+ final DOMDataTreeChangeListener mockListener = Mockito.mock(DOMDataTreeChangeListener.class);
+ final Props props = DataTreeChangeListenerActor.props(mockListener);
+ final ActorRef subject =
+ getSystem().actorOf(props, "testDataTreeChangedNotificationsDisabled");
+
+ subject.tell(new DataTreeChanged(mockCandidates),
+ getRef());
+
+ new Within(duration("1 seconds")) {
+ @Override
+ protected void run() {
+ expectNoMsg();
+
+ Mockito.verify(mockListener, Mockito.never()).onDataTreeChanged(
+ Matchers.anyCollectionOf(DataTreeCandidate.class));
+ }
+ };
+ }};
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testDataChangedWithNoSender(){
+ new JavaTestKit(getSystem()) {{
+ final DataTreeCandidate mockTreeCandidate = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates = ImmutableList.of(mockTreeCandidate);
+ final DOMDataTreeChangeListener mockListener = Mockito.mock(DOMDataTreeChangeListener.class);
+ final Props props = DataTreeChangeListenerActor.props(mockListener);
+ final ActorRef subject = getSystem().actorOf(props, "testDataTreeChangedWithNoSender");
+
+ getSystem().eventStream().subscribe(getRef(), DeadLetter.class);
+
+ subject.tell(new DataTreeChanged(mockCandidates), ActorRef.noSender());
+
+ // Make sure no DataChangedReply is sent to DeadLetters.
+ while(true) {
+ DeadLetter deadLetter;
+ try {
+ deadLetter = expectMsgClass(duration("1 seconds"), DeadLetter.class);
+ } catch (AssertionError e) {
+ // Timed out - got no DeadLetter - this is good
+ break;
+ }
+
+ // We may get DeadLetters for other messages we don't care about.
+ Assert.assertFalse("Unexpected DataTreeChangedReply",
+ deadLetter.message() instanceof DataTreeChangedReply);
+ }
+ }};
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testDataChangedWithListenerRuntimeEx(){
+ new JavaTestKit(getSystem()) {{
+ final DataTreeCandidate mockTreeCandidate1 = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates1 = ImmutableList.of(mockTreeCandidate1);
+ final DataTreeCandidate mockTreeCandidate2 = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates2 = ImmutableList.of(mockTreeCandidate2);
+ final DataTreeCandidate mockTreeCandidate3 = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates3 = ImmutableList.of(mockTreeCandidate3);
+
+ final DOMDataTreeChangeListener mockListener = Mockito.mock(DOMDataTreeChangeListener.class);
+ Mockito.doThrow(new RuntimeException("mock")).when(mockListener).onDataTreeChanged(mockCandidates2);
+
+ Props props = DataTreeChangeListenerActor.props(mockListener);
+ ActorRef subject = getSystem().actorOf(props, "testDataTreeChangedWithListenerRuntimeEx");
+
+ // Let the DataChangeListener know that notifications should be enabled
+ subject.tell(new EnableNotification(true), getRef());
+
+ subject.tell(new DataTreeChanged(mockCandidates1),getRef());
+ expectMsgClass(DataTreeChangedReply.class);
+
+ subject.tell(new DataTreeChanged(mockCandidates2),getRef());
+ expectMsgClass(DataTreeChangedReply.class);
+
+ subject.tell(new DataTreeChanged(mockCandidates3),getRef());
+ expectMsgClass(DataTreeChangedReply.class);
+
+ Mockito.verify(mockListener).onDataTreeChanged(mockCandidates1);
+ Mockito.verify(mockListener).onDataTreeChanged(mockCandidates2);
+ Mockito.verify(mockListener).onDataTreeChanged(mockCandidates3);
+ }};
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.actor.Terminated;
+import akka.dispatch.ExecutionContexts;
+import akka.dispatch.Futures;
+import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.concurrent.TimeUnit;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
+import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import scala.concurrent.ExecutionContextExecutor;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+
+public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
+ @SuppressWarnings("unchecked")
+ private final DOMDataTreeChangeListener mockListener = mock(DOMDataTreeChangeListener.class);
+
+ @Test(timeout=10000)
+ public void testSuccessfulRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
+
+ final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
+ new DataTreeChangeListenerProxy<>(actorContext, mockListener);
+
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init("shard-1", path);
+ }
+
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new LocalShardFound(getRef()));
+
+ RegisterDataTreeChangeListener registerMsg = expectMsgClass(timeout, RegisterDataTreeChangeListener.class);
+ Assert.assertEquals("getPath", path, registerMsg.getPath());
+
+ reply(new RegisterDataTreeChangeListenerReply(getRef()));
+
+
+ for(int i = 0; (i < 20 * 5) && proxy.getListenerRegistrationActor() == null; i++) {
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.assertEquals("getListenerRegistrationActor", getSystem().actorSelection(getRef().path()),
+ proxy.getListenerRegistrationActor());
+
+ watch(proxy.getDataChangeListenerActor());
+
+ proxy.close();
+
+ // The listener registration actor should get a Close message
+ expectMsgClass(timeout, CloseDataTreeChangeListenerRegistration.class);
+
+ // The DataChangeListener actor should be terminated
+ expectMsgClass(timeout, Terminated.class);
+
+ proxy.close();
+
+ expectNoMsg();
+ }};
+ }
+
+ @Test(timeout=10000)
+ public void testLocalShardNotFound() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
+
+ final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
+ new DataTreeChangeListenerProxy<>(actorContext, mockListener);
+
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init("shard-1", path);
+ }
+
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new LocalShardNotFound("shard-1"));
+
+ expectNoMsg(duration("1 seconds"));
+ }};
+ }
+
+ @Test(timeout=10000)
+ public void testLocalShardNotInitialized() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
+
+ final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
+ new DataTreeChangeListenerProxy<>(actorContext, mockListener);
+
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init("shard-1", path);
+ }
+
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new NotInitializedException("not initialized"));
+
+ new Within(duration("1 seconds")) {
+ @Override
+ protected void run() {
+ expectNoMsg();
+ }
+ };
+ }};
+ }
+
+ @Test
+ public void testFailedRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorSystem mockActorSystem = mock(ActorSystem.class);
+
+ ActorRef mockActor = getSystem().actorOf(Props.create(DoNothingActor.class),
+ "testFailedRegistration");
+ doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class));
+ ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(
+ MoreExecutors.sameThreadExecutor());
+
+
+ ActorContext actorContext = mock(ActorContext.class);
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+
+ doReturn(executor).when(actorContext).getClientDispatcher();
+ doReturn(mockActorSystem).when(actorContext).getActorSystem();
+
+ String shardName = "shard-1";
+ final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
+ new DataTreeChangeListenerProxy<>(actorContext, mockListener);
+
+ doReturn(duration("5 seconds")).when(actorContext).getOperationDuration();
+ doReturn(Futures.successful(getRef())).when(actorContext).findLocalShardAsync(eq(shardName));
+ doReturn(Futures.failed(new RuntimeException("mock"))).
+ when(actorContext).executeOperationAsync(any(ActorRef.class),
+ any(Object.class), any(Timeout.class));
+ doReturn(mock(DatastoreContext.class)).when(actorContext).getDatastoreContext();
+
+ proxy.init("shard-1", path);
+
+ Assert.assertEquals("getListenerRegistrationActor", null,
+ proxy.getListenerRegistrationActor());
+ }};
+ }
+
+ @Test
+ public void testCloseBeforeRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = mock(ActorContext.class);
+
+ String shardName = "shard-1";
+
+ doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorContext).getClientDispatcher();
+ doReturn(getSystem()).when(actorContext).getActorSystem();
+ doReturn(Dispatchers.DEFAULT_DISPATCHER_PATH).when(actorContext).getNotificationDispatcherPath();
+ doReturn(getSystem().actorSelection(getRef().path())).
+ when(actorContext).actorSelection(getRef().path());
+ doReturn(duration("5 seconds")).when(actorContext).getOperationDuration();
+ doReturn(Futures.successful(getRef())).when(actorContext).findLocalShardAsync(eq(shardName));
+
+ final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
+ new DataTreeChangeListenerProxy<>(actorContext, mockListener);
+
+
+ Answer<Future<Object>> answer = new Answer<Future<Object>>() {
+ @Override
+ public Future<Object> answer(InvocationOnMock invocation) {
+ proxy.close();
+ return Futures.successful((Object)new RegisterDataTreeChangeListenerReply(getRef()));
+ }
+ };
+
+ doAnswer(answer).when(actorContext).executeOperationAsync(any(ActorRef.class),
+ any(Object.class), any(Timeout.class));
+
+ proxy.init(shardName, YangInstanceIdentifier.of(TestModel.TEST_QNAME));
+
+ expectMsgClass(duration("5 seconds"), CloseDataTreeChangeListenerRegistration.class);
+
+ Assert.assertEquals("getListenerRegistrationActor", null,
+ proxy.getListenerRegistrationActor());
+ }};
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.testkit.JavaTestKit;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistrationReply;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+public class DataTreeChangeListenerRegistrationActorTest extends AbstractActorTest {
+ private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
+
+ static {
+ store.onGlobalContextUpdated(TestModel.createTestContext());
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnReceiveCloseListenerRegistration() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ListenerRegistration mockListenerReg = Mockito.mock(ListenerRegistration.class);
+ final Props props = DataTreeChangeListenerRegistrationActor.props(mockListenerReg);
+ final ActorRef subject = getSystem().actorOf(props, "testCloseListenerRegistration");
+
+ subject.tell(CloseDataTreeChangeListenerRegistration.getInstance(), getRef());
+
+ expectMsgClass(duration("1 second"), CloseDataTreeChangeListenerRegistrationReply.class);
+
+ Mockito.verify(mockListenerReg).close();
+ }};
+ }
+}
}};
}
- private void testTransactionWritesWithShardNotInitiallyReady(final boolean writeOnly) throws Exception {
+ private void testTransactionWritesWithShardNotInitiallyReady(final String testName,
+ final boolean writeOnly) throws Exception {
new IntegrationTestKit(getSystem()) {{
- String testName = "testTransactionWritesWithShardNotInitiallyReady";
String shardName = "test-1";
// Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
@Test
public void testWriteOnlyTransactionWithShardNotInitiallyReady() throws Exception {
datastoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
- testTransactionWritesWithShardNotInitiallyReady(true);
+ testTransactionWritesWithShardNotInitiallyReady("testWriteOnlyTransactionWithShardNotInitiallyReady", true);
}
@Test
public void testReadWriteTransactionWithShardNotInitiallyReady() throws Exception {
- testTransactionWritesWithShardNotInitiallyReady(false);
+ testTransactionWritesWithShardNotInitiallyReady("testReadWriteTransactionWithShardNotInitiallyReady", false);
}
@Test
}
void doCommit(final DOMStoreThreePhaseCommitCohort cohort) throws Exception {
- Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
+ Boolean canCommit = cohort.canCommit().get(7, TimeUnit.SECONDS);
assertEquals("canCommit", true, canCommit);
cohort.preCommit().get(5, TimeUnit.SECONDS);
cohort.commit().get(5, TimeUnit.SECONDS);
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import java.util.Arrays;
+import java.util.Collection;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.cluster.datastore.utils.MessageCollectorActor;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+public class ForwardingDataTreeChangeListenerTest extends AbstractActorTest {
+
+ @Test
+ public void testOnDataChanged() throws Exception {
+ final Props props = Props.create(MessageCollectorActor.class);
+ final ActorRef actorRef = getSystem().actorOf(props);
+
+ ForwardingDataTreeChangeListener forwardingListener = new ForwardingDataTreeChangeListener(
+ getSystem().actorSelection(actorRef.path()));
+
+ Collection<DataTreeCandidate> expected = Arrays.asList(Mockito.mock(DataTreeCandidate.class));
+ forwardingListener.onDataTreeChanged(expected);
+
+ DataTreeChanged actual = MessageCollectorActor.expectFirstMatching(actorRef, DataTreeChanged.class);
+ Assert.assertSame(expected, actual.getChanges());
+ }
+}
OperationCompleter completer = new OperationCompleter(operationLimiter );
- completer.onComplete(null, new DataExistsReply(true));
+ completer.onComplete(null, DataExistsReply.create(true));
assertEquals("availablePermits", ++availablePermits, operationLimiter.availablePermits());
- completer.onComplete(null, new DataExistsReply(true));
+ completer.onComplete(null, DataExistsReply.create(true));
assertEquals("availablePermits", ++availablePermits, operationLimiter.availablePermits());
completer.onComplete(null, new IllegalArgumentException());
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
import static org.opendaylight.controller.cluster.datastore.DataStoreVersions.CURRENT_VERSION;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.Uninterruptibles;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.utils.MessageCollectorActor;
import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener;
+import org.opendaylight.controller.cluster.datastore.utils.MockDataTreeChangeListener;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListenerReply;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
public class ShardTest extends AbstractShardTest {
+ private static final QName CARS_QNAME = QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test:cars", "2014-03-13", "cars");
@Test
public void testRegisterChangeListener() throws Exception {
}};
}
+ @Test
+ public void testRegisterDataTreeChangeListener() throws Exception {
+ new ShardTestKit(getSystem()) {{
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps(), "testRegisterDataTreeChangeListener");
+
+ waitUntilLeader(shard);
+
+ shard.tell(new UpdateSchemaContext(SchemaContextHelper.full()), ActorRef.noSender());
+
+ MockDataTreeChangeListener listener = new MockDataTreeChangeListener(1);
+ ActorRef dclActor = getSystem().actorOf(DataTreeChangeListenerActor.props(listener),
+ "testRegisterDataTreeChangeListener-DataTreeChangeListener");
+
+ shard.tell(new RegisterDataTreeChangeListener(TestModel.TEST_PATH, dclActor), getRef());
+
+ RegisterDataTreeChangeListenerReply reply = expectMsgClass(duration("3 seconds"),
+ RegisterDataTreeChangeListenerReply.class);
+ String replyPath = reply.getListenerRegistrationPath().toString();
+ assertTrue("Incorrect reply path: " + replyPath, replyPath.matches(
+ "akka:\\/\\/test\\/user\\/testRegisterDataTreeChangeListener\\/\\$.*"));
+
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ writeToStore(shard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ listener.waitForChangeEvents();
+
+ dclActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @SuppressWarnings("serial")
+ @Test
+ public void testDataTreeChangeListenerNotifiedWhenNotTheLeaderOnRegistration() throws Exception {
+ new ShardTestKit(getSystem()) {{
+ final CountDownLatch onFirstElectionTimeout = new CountDownLatch(1);
+ final CountDownLatch onChangeListenerRegistered = new CountDownLatch(1);
+ Creator<Shard> creator = new Creator<Shard>() {
+ boolean firstElectionTimeout = true;
+
+ @Override
+ public Shard create() throws Exception {
+ return new Shard(shardID, Collections.<String,String>emptyMap(),
+ dataStoreContextBuilder.persistent(false).build(), SCHEMA_CONTEXT) {
+ @Override
+ public void onReceiveCommand(final Object message) throws Exception {
+ if(message instanceof ElectionTimeout && firstElectionTimeout) {
+ firstElectionTimeout = false;
+ final ActorRef self = getSelf();
+ new Thread() {
+ @Override
+ public void run() {
+ Uninterruptibles.awaitUninterruptibly(
+ onChangeListenerRegistered, 5, TimeUnit.SECONDS);
+ self.tell(message, self);
+ }
+ }.start();
+
+ onFirstElectionTimeout.countDown();
+ } else {
+ super.onReceiveCommand(message);
+ }
+ }
+ };
+ }
+ };
+
+ MockDataTreeChangeListener listener = new MockDataTreeChangeListener(1);
+ ActorRef dclActor = getSystem().actorOf(DataTreeChangeListenerActor.props(listener),
+ "testDataTreeChangeListenerNotifiedWhenNotTheLeaderOnRegistration-DataChangeListener");
+
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ Props.create(new DelegatingShardCreator(creator)),
+ "testDataTreeChangeListenerNotifiedWhenNotTheLeaderOnRegistration");
+
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ writeToStore(shard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ assertEquals("Got first ElectionTimeout", true,
+ onFirstElectionTimeout.await(5, TimeUnit.SECONDS));
+
+ shard.tell(new RegisterDataTreeChangeListener(path, dclActor), getRef());
+ RegisterDataTreeChangeListenerReply reply = expectMsgClass(duration("5 seconds"),
+ RegisterDataTreeChangeListenerReply.class);
+ assertNotNull("getListenerRegistratioznPath", reply.getListenerRegistrationPath());
+
+ shard.tell(new FindLeader(), getRef());
+ FindLeaderReply findLeadeReply =
+ expectMsgClass(duration("5 seconds"), FindLeaderReply.class);
+ assertNull("Expected the shard not to be the leader", findLeadeReply.getLeaderActor());
+
+ writeToStore(shard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ onChangeListenerRegistered.countDown();
+
+ // TODO: investigate why we do not receive data chage events
+ listener.waitForChangeEvents();
+
+ dclActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
@Test
public void testCreateTransaction(){
new ShardTestKit(getSystem()) {{
TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(),
"testApplySnapshot");
- InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
- store.onGlobalContextUpdated(SCHEMA_CONTEXT);
+ DataTree store = InMemoryDataTreeFactory.getInstance().create();
+ store.setSchemaContext(SCHEMA_CONTEXT);
writeToStore(store, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
}
@Test
- public void testRecovery() throws Exception {
+ public void testApplyStateWithCandidatePayload() throws Exception {
- // Set up the InMemorySnapshotStore.
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(), "testApplyState");
+
+ NormalizedNode<?, ?> node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ DataTreeCandidate candidate = DataTreeCandidates.fromNormalizedNode(TestModel.TEST_PATH, node);
+
+ ApplyState applyState = new ApplyState(null, "test", new ReplicatedLogImplEntry(1, 2,
+ DataTreeCandidatePayload.create(candidate)));
+
+ shard.underlyingActor().onReceiveCommand(applyState);
+
+ NormalizedNode<?,?> actual = readStore(shard, TestModel.TEST_PATH);
+ assertEquals("Applied state", node, actual);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }
- InMemoryDOMDataStore testStore = InMemoryDOMDataStoreFactory.create("Test", null, null);
- testStore.onGlobalContextUpdated(SCHEMA_CONTEXT);
+ DataTree setupInMemorySnapshotStore() throws DataValidationFailedException {
+ DataTree testStore = InMemoryDataTreeFactory.getInstance().create();
+ testStore.setSchemaContext(SCHEMA_CONTEXT);
writeToStore(testStore, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
InMemorySnapshotStore.addSnapshot(shardID.toString(), Snapshot.create(
SerializationUtils.serializeNormalizedNode(root),
Collections.<ReplicatedLogEntry>emptyList(), 0, 1, -1, -1));
+ return testStore;
+ }
+
+ private static DataTreeCandidatePayload payloadForModification(DataTree source, DataTreeModification mod) throws DataValidationFailedException {
+ source.validate(mod);
+ final DataTreeCandidate candidate = source.prepare(mod);
+ source.commit(candidate);
+ return DataTreeCandidatePayload.create(candidate);
+ }
+
+ @Test
+ public void testDataTreeCandidateRecovery() throws Exception {
+ // Set up the InMemorySnapshotStore.
+ final DataTree source = setupInMemorySnapshotStore();
+
+ final DataTreeModification writeMod = source.takeSnapshot().newModification();
+ writeMod.write(TestModel.OUTER_LIST_PATH, ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
+
+ // Set up the InMemoryJournal.
+ InMemoryJournal.addEntry(shardID.toString(), 0, new ReplicatedLogImplEntry(0, 1, payloadForModification(source, writeMod)));
+
+ int nListEntries = 16;
+ Set<Integer> listEntryKeys = new HashSet<>();
+
+ // Add some ModificationPayload entries
+ for (int i = 1; i <= nListEntries; i++) {
+ listEntryKeys.add(Integer.valueOf(i));
+
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i).build();
+
+ final DataTreeModification mod = source.takeSnapshot().newModification();
+ mod.merge(path, ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i));
+
+ InMemoryJournal.addEntry(shardID.toString(), i, new ReplicatedLogImplEntry(i, 1,
+ payloadForModification(source, mod)));
+ }
+
+ InMemoryJournal.addEntry(shardID.toString(), nListEntries + 1,
+ new ApplyJournalEntries(nListEntries));
+
+ testRecovery(listEntryKeys);
+ }
+
+ @Test
+ public void testModicationRecovery() throws Exception {
+
+ // Set up the InMemorySnapshotStore.
+ setupInMemorySnapshotStore();
// Set up the InMemoryJournal.
testRecovery(listEntryKeys);
}
- private ModificationPayload newModificationPayload(final Modification... mods) throws IOException {
+ private static ModificationPayload newModificationPayload(final Modification... mods) throws IOException {
MutableCompositeModification compMod = new MutableCompositeModification();
for(Modification mod: mods) {
compMod.addModification(mod);
return new ModificationPayload(compMod);
}
- @SuppressWarnings({ "unchecked" })
@Test
public void testConcurrentThreePhaseCommits() throws Throwable {
new ShardTestKit(getSystem()) {{
// Setup 3 simulated transactions with mock cohorts backed by real cohorts.
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ ShardDataTreeCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
String transactionID2 = "tx2";
MutableCompositeModification modification2 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ ShardDataTreeCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
TestModel.OUTER_LIST_PATH,
ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
modification2);
String transactionID3 = "tx3";
MutableCompositeModification modification3 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ ShardDataTreeCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
.nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
+ cohort1, modification1, true, false), getRef());
ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(
expectMsgClass(duration, ReadyTransactionReply.class));
assertEquals("Cohort path", shard.path().toString(), readyReply.getCohortPath());
// Send the ForwardedReadyTransaction for the next 2 Tx's.
shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
+ cohort2, modification2, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
- cohort3, modification3, true), getRef());
+ cohort3, modification3, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the next 2 Tx's. These should get queued and
// Verify data in the data store.
- NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
- assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
- assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
- outerList.getValue() instanceof Iterable);
- Object entry = ((Iterable<Object>)outerList.getValue()).iterator().next();
- assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
- entry instanceof MapEntryNode);
- MapEntryNode mapEntry = (MapEntryNode)entry;
- Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
- mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
- assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
- assertEquals(TestModel.ID_QNAME.getLocalName() + " value", 1, idLeaf.get().getValue());
+ verifyOuterListEntry(shard, 1);
verifyLastApplied(shard, 2);
}};
}
- private BatchedModifications newBatchedModifications(String transactionID, YangInstanceIdentifier path,
- NormalizedNode<?, ?> data, boolean ready) {
- return newBatchedModifications(transactionID, null, path, data, ready);
+ private static BatchedModifications newBatchedModifications(String transactionID, YangInstanceIdentifier path,
+ NormalizedNode<?, ?> data, boolean ready, boolean doCommitOnReady) {
+ return newBatchedModifications(transactionID, null, path, data, ready, doCommitOnReady);
}
- private BatchedModifications newBatchedModifications(String transactionID, String transactionChainID,
- YangInstanceIdentifier path, NormalizedNode<?, ?> data, boolean ready) {
+ private static BatchedModifications newBatchedModifications(String transactionID, String transactionChainID,
+ YangInstanceIdentifier path, NormalizedNode<?, ?> data, boolean ready, boolean doCommitOnReady) {
BatchedModifications batched = new BatchedModifications(transactionID, CURRENT_VERSION, transactionChainID);
batched.addModification(new WriteModification(path, data));
batched.setReady(ready);
+ batched.setDoCommitOnReady(doCommitOnReady);
return batched;
}
- @SuppressWarnings("unchecked")
@Test
- public void testMultipleBatchedModifications() throws Throwable {
+ public void testBatchedModificationsWithNoCommitOnReady() throws Throwable {
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
- "testMultipleBatchedModifications");
+ "testBatchedModificationsWithNoCommitOnReady");
waitUntilLeader(shard);
final String transactionID = "tx";
FiniteDuration duration = duration("5 seconds");
- final AtomicReference<DOMStoreThreePhaseCommitCohort> mockCohort = new AtomicReference<>();
+ final AtomicReference<ShardDataTreeCohort> mockCohort = new AtomicReference<>();
ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
@Override
- public DOMStoreThreePhaseCommitCohort decorate(String txID, DOMStoreThreePhaseCommitCohort actual) {
+ public ShardDataTreeCohort decorate(String txID, ShardDataTreeCohort actual) {
if(mockCohort.get() == null) {
mockCohort.set(createDelegatingMockCohort("cohort", actual));
}
// Send a BatchedModifications to start a transaction.
shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), false), getRef());
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), false, false), getRef());
expectMsgClass(duration, BatchedModificationsReply.class);
// Send a couple more BatchedModifications.
shard.tell(newBatchedModifications(transactionID, TestModel.OUTER_LIST_PATH,
- ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), false), getRef());
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), false, false), getRef());
expectMsgClass(duration, BatchedModificationsReply.class);
shard.tell(newBatchedModifications(transactionID, YangInstanceIdentifier.builder(
TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
- ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true), getRef());
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
// Verify data in the data store.
- NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
- assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
- assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
- outerList.getValue() instanceof Iterable);
- Object entry = ((Iterable<Object>)outerList.getValue()).iterator().next();
- assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
- entry instanceof MapEntryNode);
- MapEntryNode mapEntry = (MapEntryNode)entry;
- Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
- mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
- assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
- assertEquals(TestModel.ID_QNAME.getLocalName() + " value", 1, idLeaf.get().getValue());
+ verifyOuterListEntry(shard, 1);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testBatchedModificationsWithCommitOnReady() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testBatchedModificationsWithCommitOnReady");
+
+ waitUntilLeader(shard);
+
+ final String transactionID = "tx";
+ FiniteDuration duration = duration("5 seconds");
+
+ final AtomicReference<ShardDataTreeCohort> mockCohort = new AtomicReference<>();
+ ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
+ @Override
+ public ShardDataTreeCohort decorate(String txID, ShardDataTreeCohort actual) {
+ if(mockCohort.get() == null) {
+ mockCohort.set(createDelegatingMockCohort("cohort", actual));
+ }
+
+ return mockCohort.get();
+ }
+ };
+
+ shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
+
+ // Send a BatchedModifications to start a transaction.
+
+ shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), false, false), getRef());
+ expectMsgClass(duration, BatchedModificationsReply.class);
+
+ // Send a couple more BatchedModifications.
+
+ shard.tell(newBatchedModifications(transactionID, TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), false, false), getRef());
+ expectMsgClass(duration, BatchedModificationsReply.class);
+
+ shard.tell(newBatchedModifications(transactionID, YangInstanceIdentifier.builder(
+ TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true, true), getRef());
+
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
+ InOrder inOrder = inOrder(mockCohort.get());
+ inOrder.verify(mockCohort.get()).canCommit();
+ inOrder.verify(mockCohort.get()).preCommit();
+ inOrder.verify(mockCohort.get()).commit();
+
+ // Verify data in the data store.
+
+ verifyOuterListEntry(shard, 1);
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
+ @SuppressWarnings("unchecked")
+ private static void verifyOuterListEntry(final TestActorRef<Shard> shard, Object expIDValue) throws Exception {
+ NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
+ assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
+ assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
+ outerList.getValue() instanceof Iterable);
+ Object entry = ((Iterable<Object>)outerList.getValue()).iterator().next();
+ assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
+ entry instanceof MapEntryNode);
+ MapEntryNode mapEntry = (MapEntryNode)entry;
+ Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
+ mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
+ assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
+ assertEquals(TestModel.ID_QNAME.getLocalName() + " value", expIDValue, idLeaf.get().getValue());
+ }
+
@Test
public void testBatchedModificationsOnTransactionChain() throws Throwable {
new ShardTestKit(getSystem()) {{
ContainerNode containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
YangInstanceIdentifier path = TestModel.TEST_PATH;
shard.tell(newBatchedModifications(transactionID1, transactionChainID, path,
- containerNode, true), getRef());
+ containerNode, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// Create a read Tx on the same chain.
final AtomicBoolean overrideLeaderCalls = new AtomicBoolean();
new ShardTestKit(getSystem()) {{
Creator<Shard> creator = new Creator<Shard>() {
+ private static final long serialVersionUID = 1L;
+
@Override
public Shard create() throws Exception {
return new Shard(shardID, Collections.<String,String>emptyMap(),
}};
}
+ @Test
+ public void testForwardedReadyTransactionWithImmediateCommit() throws Exception{
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testForwardedReadyTransactionWithImmediateCommit");
+
+ waitUntilLeader(shard);
+
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
+
+ String transactionID = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ ShardDataTreeCohort cohort = setupMockWriteTransaction("cohort", dataStore,
+ TestModel.TEST_PATH, containerNode, modification);
+
+ FiniteDuration duration = duration("5 seconds");
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true, true), getRef());
+
+ expectMsgClass(duration, ThreePhaseCommitCohortMessages.CommitTransactionReply.class);
+
+ InOrder inOrder = inOrder(cohort);
+ inOrder.verify(cohort).canCommit();
+ inOrder.verify(cohort).preCommit();
+ inOrder.verify(cohort).commit();
+
+ NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
+ assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
@Test
public void testCommitWithPersistenceDisabled() throws Throwable {
dataStoreContextBuilder.persistent(false);
waitUntilLeader(shard);
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
// Setup a simulated transactions with a mock cohort.
String transactionID = "tx";
MutableCompositeModification modification = new MutableCompositeModification();
NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- DOMStoreThreePhaseCommitCohort cohort = setupMockWriteTransaction("cohort", dataStore,
+ ShardDataTreeCohort cohort = setupMockWriteTransaction("cohort", dataStore,
TestModel.TEST_PATH, containerNode, modification);
FiniteDuration duration = duration("5 seconds");
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
- cohort, modification, true), getRef());
+ cohort, modification, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
}};
}
+ private static DataTreeCandidateTip mockCandidate(final String name) {
+ DataTreeCandidateTip mockCandidate = mock(DataTreeCandidateTip.class, name);
+ DataTreeCandidateNode mockCandidateNode = mock(DataTreeCandidateNode.class, name + "-node");
+ doReturn(ModificationType.WRITE).when(mockCandidateNode).getModificationType();
+ doReturn(Optional.of(ImmutableNodes.containerNode(CARS_QNAME))).when(mockCandidateNode).getDataAfter();
+ doReturn(YangInstanceIdentifier.builder().build()).when(mockCandidate).getRootPath();
+ doReturn(mockCandidateNode).when(mockCandidate).getRootNode();
+ return mockCandidate;
+ }
+
+ private static DataTreeCandidateTip mockUnmodifiedCandidate(final String name) {
+ DataTreeCandidateTip mockCandidate = mock(DataTreeCandidateTip.class, name);
+ DataTreeCandidateNode mockCandidateNode = mock(DataTreeCandidateNode.class, name + "-node");
+ doReturn(ModificationType.UNMODIFIED).when(mockCandidateNode).getModificationType();
+ doReturn(YangInstanceIdentifier.builder().build()).when(mockCandidate).getRootPath();
+ doReturn(mockCandidateNode).when(mockCandidate).getRootNode();
+ return mockCandidate;
+ }
+
@Test
public void testCommitWhenTransactionHasNoModifications(){
// Note that persistence is enabled which would normally result in the entry getting written to the journal
String transactionID = "tx1";
MutableCompositeModification modification = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).preCommit();
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).commit();
+ doReturn(mockUnmodifiedCandidate("cohort1-candidate")).when(cohort).getCandidate();
FiniteDuration duration = duration("5 seconds");
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
- cohort, modification, true), getRef());
+ cohort, modification, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
String transactionID = "tx1";
MutableCompositeModification modification = new MutableCompositeModification();
modification.addModification(new DeleteModification(YangInstanceIdentifier.builder().build()));
- DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).preCommit();
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).commit();
+ doReturn(mockCandidate("cohort1-candidate")).when(cohort).getCandidate();
FiniteDuration duration = duration("5 seconds");
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
- cohort, modification, true), getRef());
+ cohort, modification, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
waitUntilLeader(shard);
- // Setup 2 simulated transactions with mock cohorts. The first one fails in the
+ // Setup 2 simulated transactions with mock cohorts. The first one fails in the
// commit phase.
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ ShardDataTreeCohort cohort1 = mock(ShardDataTreeCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
doReturn(Futures.immediateFuture(null)).when(cohort1).preCommit();
doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort1).commit();
+ doReturn(mockCandidate("cohort1-candidate")).when(cohort1).getCandidate();
String transactionID2 = "tx2";
MutableCompositeModification modification2 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
+ ShardDataTreeCohort cohort2 = mock(ShardDataTreeCohort.class, "cohort2");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
FiniteDuration duration = duration("5 seconds");
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
+ cohort1, modification1, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
+ cohort2, modification2, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the first Tx.
waitUntilLeader(shard);
- String transactionID = "tx1";
- MutableCompositeModification modification = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
- doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
- doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).preCommit();
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ ShardDataTreeCohort cohort1 = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort1).preCommit();
+
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ ShardDataTreeCohort cohort2 = mock(ShardDataTreeCohort.class, "cohort2");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
FiniteDuration duration = duration("5 seconds");
+ final Timeout timeout = new Timeout(duration);
// Simulate the ForwardedReadyTransaction messages that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
- cohort, modification, true), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
- // Send the CanCommitTransaction message.
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
+ // Send the CanCommitTransaction message for the first Tx.
+
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
assertEquals("Can commit", true, canCommitReply.getCanCommit());
- // Send the CommitTransaction message. This should send back an error
- // for preCommit failure.
+ // Send the CanCommitTransaction message for the 2nd Tx. This should get queued and
+ // processed after the first Tx completes.
- shard.tell(new CommitTransaction(transactionID).toSerializable(), getRef());
+ Future<Object> canCommitFuture = Patterns.ask(shard,
+ new CanCommitTransaction(transactionID2).toSerializable(), timeout);
+
+ // Send the CommitTransaction message for the first Tx. This should send back an error
+ // and trigger the 2nd Tx to proceed.
+
+ shard.tell(new CommitTransaction(transactionID1).toSerializable(), getRef());
expectMsgClass(duration, akka.actor.Status.Failure.class);
- InOrder inOrder = inOrder(cohort);
- inOrder.verify(cohort).canCommit();
- inOrder.verify(cohort).preCommit();
+ // Wait for the 2nd Tx to complete the canCommit phase.
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ canCommitFuture.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(final Throwable t, final Object resp) {
+ latch.countDown();
+ }
+ }, getSystem().dispatcher());
+
+ assertEquals("2nd CanCommit complete", true, latch.await(5, TimeUnit.SECONDS));
+
+ InOrder inOrder = inOrder(cohort1, cohort2);
+ inOrder.verify(cohort1).canCommit();
+ inOrder.verify(cohort1).preCommit();
+ inOrder.verify(cohort2).canCommit();
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
final FiniteDuration duration = duration("5 seconds");
- String transactionID = "tx1";
+ String transactionID1 = "tx1";
MutableCompositeModification modification = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).canCommit();
// Simulate the ForwardedReadyTransaction messages that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
- cohort, modification, true), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
- shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ // Send another can commit to ensure the failed one got cleaned up.
+
+ reset(cohort);
+
+ String transactionID2 = "tx2";
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ shard.tell(new CanCommitTransaction(transactionID2).toSerializable(), getRef());
+ CanCommitTransactionReply reply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("getCanCommit", true, reply.getCanCommit());
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testCanCommitPhaseFalseResponse() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCanCommitPhaseFalseResponse");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.FALSE)).when(cohort).canCommit();
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
+ CanCommitTransactionReply reply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("getCanCommit", false, reply.getCanCommit());
+
+ // Send another can commit to ensure the failed one got cleaned up.
+
+ reset(cohort);
+
+ String transactionID2 = "tx2";
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ shard.tell(new CanCommitTransaction(transactionID2).toSerializable(), getRef());
+ reply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("getCanCommit", true, reply.getCanCommit());
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testImmediateCommitWithCanCommitPhaseFailure() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testImmediateCommitWithCanCommitPhaseFailure");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).canCommit();
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort, modification, true, true), getRef());
+
expectMsgClass(duration, akka.actor.Status.Failure.class);
+ // Send another can commit to ensure the failed one got cleaned up.
+
+ reset(cohort);
+
+ String transactionID2 = "tx2";
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort).commit();
+ DataTreeCandidateTip candidate = mock(DataTreeCandidateTip.class);
+ DataTreeCandidateNode candidateRoot = mock(DataTreeCandidateNode.class);
+ doReturn(ModificationType.UNMODIFIED).when(candidateRoot).getModificationType();
+ doReturn(candidateRoot).when(candidate).getRootNode();
+ doReturn(candidate).when(cohort).getCandidate();
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort, modification, true, true), getRef());
+
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testImmediateCommitWithCanCommitPhaseFalseResponse() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testImmediateCommitWithCanCommitPhaseFalseResponse");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ String transactionID = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.FALSE)).when(cohort).canCommit();
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true, true), getRef());
+
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ // Send another can commit to ensure the failed one got cleaned up.
+
+ reset(cohort);
+
+ String transactionID2 = "tx2";
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort).commit();
+ DataTreeCandidateTip candidate = mock(DataTreeCandidateTip.class);
+ DataTreeCandidateNode candidateRoot = mock(DataTreeCandidateNode.class);
+ doReturn(ModificationType.UNMODIFIED).when(candidateRoot).getModificationType();
+ doReturn(candidateRoot).when(candidate).getRootNode();
+ doReturn(candidate).when(cohort).getCandidate();
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort, modification, true, true), getRef());
+
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
waitUntilLeader(shard);
final FiniteDuration duration = duration("5 seconds");
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
final String transactionID = "tx1";
- Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>> preCommit =
- new Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>>() {
+ Function<ShardDataTreeCohort, ListenableFuture<Void>> preCommit =
+ new Function<ShardDataTreeCohort, ListenableFuture<Void>>() {
@Override
- public ListenableFuture<Void> apply(final DOMStoreThreePhaseCommitCohort cohort) {
+ public ListenableFuture<Void> apply(final ShardDataTreeCohort cohort) {
ListenableFuture<Void> preCommitFuture = cohort.preCommit();
// Simulate an AbortTransaction message occurring during replication, after
};
MutableCompositeModification modification = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort = setupMockWriteTransaction("cohort1", dataStore,
+ ShardDataTreeCohort cohort = setupMockWriteTransaction("cohort1", dataStore,
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME),
modification, preCommit);
shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
- cohort, modification, true), getRef());
+ cohort, modification, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
final FiniteDuration duration = duration("5 seconds");
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
writeToStore(shard, TestModel.OUTER_LIST_PATH,
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ ShardDataTreeCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
.nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
MutableCompositeModification modification2 = new MutableCompositeModification();
YangInstanceIdentifier listNodePath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
.nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build();
- DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort3", dataStore,
+ ShardDataTreeCohort cohort2 = setupMockWriteTransaction("cohort3", dataStore,
listNodePath,
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2),
modification2);
// Ready the Tx's
shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
+ cohort1, modification1, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
+ cohort2, modification2, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// canCommit 1st Tx. We don't send the commit so it should timeout.
shard.tell(new CanCommitTransaction(transactionID2).toSerializable(), getRef());
expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS);
+ // Try to commit the 1st Tx - should fail as it's not the current Tx.
+
+ shard.tell(new CommitTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
// Commit the 2nd Tx.
shard.tell(new CommitTransaction(transactionID2).toSerializable(), getRef());
final FiniteDuration duration = duration("5 seconds");
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ ShardDataTreeCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
String transactionID2 = "tx2";
MutableCompositeModification modification2 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ ShardDataTreeCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
TestModel.OUTER_LIST_PATH,
ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
modification2);
String transactionID3 = "tx3";
MutableCompositeModification modification3 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ ShardDataTreeCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification3);
// Ready the Tx's
shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
+ cohort1, modification1, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
+ cohort2, modification2, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
- cohort3, modification3, true), getRef());
+ cohort3, modification3, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// canCommit 1st Tx.
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ ShardDataTreeCohort cohort1 = mock(ShardDataTreeCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
doReturn(Futures.immediateFuture(null)).when(cohort1).abort();
String transactionID2 = "tx2";
MutableCompositeModification modification2 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
+ ShardDataTreeCohort cohort2 = mock(ShardDataTreeCohort.class, "cohort2");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
FiniteDuration duration = duration("5 seconds");
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
+ cohort1, modification1, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
+ cohort2, modification2, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the first Tx.
/**
* This test simply verifies that the applySnapShot logic will work
* @throws ReadFailedException
+ * @throws DataValidationFailedException
*/
@Test
- public void testInMemoryDataStoreRestore() throws ReadFailedException {
- InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", MoreExecutors.sameThreadExecutor());
-
- store.onGlobalContextUpdated(SCHEMA_CONTEXT);
+ public void testInMemoryDataTreeRestore() throws ReadFailedException, DataValidationFailedException {
+ DataTree store = InMemoryDataTreeFactory.getInstance().create();
+ store.setSchemaContext(SCHEMA_CONTEXT);
- DOMStoreWriteTransaction putTransaction = store.newWriteOnlyTransaction();
+ DataTreeModification putTransaction = store.takeSnapshot().newModification();
putTransaction.write(TestModel.TEST_PATH,
ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- commitTransaction(putTransaction);
+ commitTransaction(store, putTransaction);
- NormalizedNode<?, ?> expected = readStore(store);
+ NormalizedNode<?, ?> expected = readStore(store, YangInstanceIdentifier.builder().build());
- DOMStoreWriteTransaction writeTransaction = store.newWriteOnlyTransaction();
+ DataTreeModification writeTransaction = store.takeSnapshot().newModification();
writeTransaction.delete(YangInstanceIdentifier.builder().build());
writeTransaction.write(YangInstanceIdentifier.builder().build(), expected);
- commitTransaction(writeTransaction);
+ commitTransaction(store, writeTransaction);
- NormalizedNode<?, ?> actual = readStore(store);
+ NormalizedNode<?, ?> actual = readStore(store, YangInstanceIdentifier.builder().build());
assertEquals(expected, actual);
}
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}
- private void commitTransaction(final DOMStoreWriteTransaction transaction) {
- DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
- ListenableFuture<Void> future =
- commitCohort.preCommit();
- try {
- future.get();
- future = commitCohort.commit();
- future.get();
- } catch (InterruptedException | ExecutionException e) {
- }
+ private static void commitTransaction(DataTree store, final DataTreeModification modification) throws DataValidationFailedException {
+ modification.ready();
+ store.validate(modification);
+ store.commit(store.prepare(modification));
}
}
import akka.actor.Props;
import akka.pattern.AskTimeoutException;
import akka.testkit.TestActorRef;
-import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.node.utils.serialization.NormalizedNodeSerializer;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
* @author Basheeruddin Ahmed <syedbahm@cisco.com>
*/
public class ShardTransactionFailureTest extends AbstractActorTest {
- private static final InMemoryDOMDataStore store =
- new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
-
private static final SchemaContext testSchemaContext =
- TestModel.createTestContext();
+ TestModel.createTestContext();
+ private static final TransactionType RO = TransactionType.READ_ONLY;
+ private static final TransactionType RW = TransactionType.READ_WRITE;
+ private static final TransactionType WO = TransactionType.WRITE_ONLY;
+
+ private static final ShardDataTree store = new ShardDataTree(testSchemaContext);
private static final ShardIdentifier SHARD_IDENTIFIER =
ShardIdentifier.builder().memberName("member-1")
private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
- @BeforeClass
- public static void staticSetup() {
- store.onGlobalContextUpdated(testSchemaContext);
- }
-
private ActorRef createShard(){
return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.<String, String>emptyMap(), datastoreContext,
TestModel.createTestContext()));
throws Throwable {
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
+ final Props props = ShardTransaction.props(RO, store.newReadOnlyTransaction("test-txn", null), shard,
datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
akka.pattern.Patterns.ask(subject, readData, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
- subject.underlyingActor().getDOMStoreTransaction().close();
+ subject.underlyingActor().getDOMStoreTransaction().abort();
future = akka.pattern.Patterns.ask(subject, readData, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
throws Throwable {
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ final Props props = ShardTransaction.props(RW, store.newReadWriteTransaction("test-txn", null), shard,
datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
akka.pattern.Patterns.ask(subject, readData, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
- subject.underlyingActor().getDOMStoreTransaction().close();
+ subject.underlyingActor().getDOMStoreTransaction().abort();
future = akka.pattern.Patterns.ask(subject, readData, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
throws Throwable {
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ final Props props = ShardTransaction.props(RW, store.newReadWriteTransaction("test-txn", null), shard,
datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
akka.pattern.Patterns.ask(subject, dataExists, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
- subject.underlyingActor().getDOMStoreTransaction().close();
+ subject.underlyingActor().getDOMStoreTransaction().abort();
future = akka.pattern.Patterns.ask(subject, dataExists, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
+ final Props props = ShardTransaction.props(WO, store.newReadWriteTransaction("test-txn", null), shard,
datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ final Props props = ShardTransaction.props(RW, store.newReadWriteTransaction("test-txn", null), shard,
datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ final Props props = ShardTransaction.props(RW, store.newReadWriteTransaction("test-txn", null), shard,
datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ final Props props = ShardTransaction.props(RW, store.newReadWriteTransaction("test-txn", null), shard,
datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
import akka.actor.Terminated;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
-import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
-import org.junit.Before;
import org.junit.Test;
import org.mockito.InOrder;
import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.ShardWriteTransaction.GetCompositeModificationReply;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
public class ShardTransactionTest extends AbstractActorTest {
private static final SchemaContext testSchemaContext = TestModel.createTestContext();
+ private static final TransactionType RO = TransactionType.READ_ONLY;
+ private static final TransactionType RW = TransactionType.READ_WRITE;
+ private static final TransactionType WO = TransactionType.WRITE_ONLY;
private static final ShardIdentifier SHARD_IDENTIFIER =
ShardIdentifier.builder().memberName("member-1")
private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
- private final InMemoryDOMDataStore store =
- new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
+ private final ShardDataTree store = new ShardDataTree(testSchemaContext);
- @Before
- public void setup() {
- store.onGlobalContextUpdated(testSchemaContext);
- }
+ private int txCounter = 0;
- private ActorRef createShard(){
+ private ActorRef createShard() {
return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
Collections.<String, String>emptyMap(), datastoreContext, TestModel.createTestContext()));
}
- private ActorRef newTransactionActor(DOMStoreTransaction transaction, String name) {
- return newTransactionActor(transaction, name, DataStoreVersions.CURRENT_VERSION);
+ private ActorRef newTransactionActor(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, String name) {
+ return newTransactionActor(type, transaction, name, DataStoreVersions.CURRENT_VERSION);
}
- private ActorRef newTransactionActor(DOMStoreTransaction transaction, String name, short version) {
- return newTransactionActor(transaction, null, name, version);
+ private ActorRef newTransactionActor(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, String name, short version) {
+ return newTransactionActor(type, transaction, null, name, version);
}
- private ActorRef newTransactionActor(DOMStoreTransaction transaction, ActorRef shard, String name) {
- return newTransactionActor(transaction, null, name, DataStoreVersions.CURRENT_VERSION);
+ private ActorRef newTransactionActor(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, ActorRef shard, String name) {
+ return newTransactionActor(type, transaction, null, name, DataStoreVersions.CURRENT_VERSION);
}
- private ActorRef newTransactionActor(DOMStoreTransaction transaction, ActorRef shard, String name,
+ private ActorRef newTransactionActor(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, ActorRef shard, String name,
short version) {
- Props props = ShardTransaction.props(transaction, shard != null ? shard : createShard(),
+ Props props = ShardTransaction.props(type, transaction, shard != null ? shard : createShard(),
datastoreContext, shardStats, "txn", version);
return getSystem().actorOf(props, name);
}
+ private ReadOnlyShardDataTreeTransaction readOnlyTransaction() {
+ return store.newReadOnlyTransaction("test-ro-" + String.valueOf(txCounter++), null);
+ }
+
+ private ReadWriteShardDataTreeTransaction readWriteTransaction() {
+ return store.newReadWriteTransaction("test-rw-" + String.valueOf(txCounter++), null);
+ }
+
@Test
public void testOnReceiveReadData() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- testOnReceiveReadData(newTransactionActor(store.newReadOnlyTransaction(), shard, "testReadDataRO"));
+ testOnReceiveReadData(newTransactionActor(RO, readOnlyTransaction(), shard, "testReadDataRO"));
- testOnReceiveReadData(newTransactionActor(store.newReadWriteTransaction(), shard, "testReadDataRW"));
+ testOnReceiveReadData(newTransactionActor(RW, readWriteTransaction(), shard, "testReadDataRW"));
}
private void testOnReceiveReadData(final ActorRef transaction) {
final ActorRef shard = createShard();
testOnReceiveReadDataWhenDataNotFound(newTransactionActor(
- store.newReadOnlyTransaction(), shard, "testReadDataWhenDataNotFoundRO"));
+ RO, readOnlyTransaction(), shard, "testReadDataWhenDataNotFoundRO"));
testOnReceiveReadDataWhenDataNotFound(newTransactionActor(
- store.newReadWriteTransaction(), shard, "testReadDataWhenDataNotFoundRW"));
+ RW, readWriteTransaction(), shard, "testReadDataWhenDataNotFoundRW"));
}
private void testOnReceiveReadDataWhenDataNotFound(final ActorRef transaction) {
@Test
public void testOnReceiveReadDataHeliumR1() throws Exception {
new JavaTestKit(getSystem()) {{
- ActorRef transaction = newTransactionActor(store.newReadOnlyTransaction(),
+ ActorRef transaction = newTransactionActor(RO, readOnlyTransaction(),
"testOnReceiveReadDataHeliumR1", DataStoreVersions.HELIUM_1_VERSION);
transaction.tell(new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(),
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- testOnReceiveDataExistsPositive(newTransactionActor(store.newReadOnlyTransaction(), shard,
+ testOnReceiveDataExistsPositive(newTransactionActor(RO, readOnlyTransaction(), shard,
"testDataExistsPositiveRO"));
- testOnReceiveDataExistsPositive(newTransactionActor(store.newReadWriteTransaction(), shard,
+ testOnReceiveDataExistsPositive(newTransactionActor(RW, readWriteTransaction(), shard,
"testDataExistsPositiveRW"));
}
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- testOnReceiveDataExistsNegative(newTransactionActor(store.newReadOnlyTransaction(), shard,
+ testOnReceiveDataExistsNegative(newTransactionActor(RO, readOnlyTransaction(), shard,
"testDataExistsNegativeRO"));
- testOnReceiveDataExistsNegative(newTransactionActor(store.newReadWriteTransaction(), shard,
+ testOnReceiveDataExistsNegative(newTransactionActor(RW, readWriteTransaction(), shard,
"testDataExistsNegativeRW"));
}
}
@Test
- public void testOnReceiveWriteData() throws Exception {
+ public void testOnReceiveWriteData() {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testOnReceiveWriteData");
transaction.tell(new WriteData(TestModel.TEST_PATH,
}
@Test
- public void testOnReceiveHeliumR1WriteData() throws Exception {
+ public void testOnReceiveHeliumR1WriteData() {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testOnReceiveHeliumR1WriteData", DataStoreVersions.HELIUM_1_VERSION);
Encoded encoded = new NormalizedNodeToNodeCodec(null).encode(TestModel.TEST_PATH,
}
@Test
- public void testOnReceiveMergeData() throws Exception {
+ public void testOnReceiveMergeData() {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
+ final ActorRef transaction = newTransactionActor(RW, readWriteTransaction(),
"testMergeData");
transaction.tell(new MergeData(TestModel.TEST_PATH,
@Test
public void testOnReceiveHeliumR1MergeData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testOnReceiveHeliumR1MergeData", DataStoreVersions.HELIUM_1_VERSION);
Encoded encoded = new NormalizedNodeToNodeCodec(null).encode(TestModel.TEST_PATH,
@Test
public void testOnReceiveDeleteData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testDeleteData");
transaction.tell(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.HELIUM_2_VERSION).
public void testOnReceiveBatchedModifications() throws Exception {
new JavaTestKit(getSystem()) {{
- DOMStoreWriteTransaction mockWriteTx = Mockito.mock(DOMStoreWriteTransaction.class);
- final ActorRef transaction = newTransactionActor(mockWriteTx, "testOnReceiveBatchedModifications");
+ ShardDataTreeTransactionParent parent = Mockito.mock(ShardDataTreeTransactionParent.class);
+ DataTreeModification mockModification = Mockito.mock(DataTreeModification.class);
+ ReadWriteShardDataTreeTransaction mockWriteTx = new ReadWriteShardDataTreeTransaction(parent, "id", mockModification);
+ final ActorRef transaction = newTransactionActor(RW, mockWriteTx, "testOnReceiveBatchedModifications");
YangInstanceIdentifier writePath = TestModel.TEST_PATH;
NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
DeleteModification delete = (DeleteModification)compositeModification.getModifications().get(2);
assertEquals("getPath", deletePath, delete.getPath());
- InOrder inOrder = Mockito.inOrder(mockWriteTx);
- inOrder.verify(mockWriteTx).write(writePath, writeData);
- inOrder.verify(mockWriteTx).merge(mergePath, mergeData);
- inOrder.verify(mockWriteTx).delete(deletePath);
+ InOrder inOrder = Mockito.inOrder(mockModification);
+ inOrder.verify(mockModification).write(writePath, writeData);
+ inOrder.verify(mockModification).merge(mergePath, mergeData);
+ inOrder.verify(mockModification).delete(deletePath);
}};
}
@Test
- public void testOnReceiveBatchedModificationsReady() throws Exception {
+ public void testOnReceiveBatchedModificationsReadyWithoutImmediateCommit() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
- "testOnReceiveBatchedModificationsReady");
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
+ "testOnReceiveBatchedModificationsReadyWithoutImmediateCommit");
JavaTestKit watcher = new JavaTestKit(getSystem());
watcher.watch(transaction);
}};
}
+ @Test
+ public void testOnReceiveBatchedModificationsReadyWithImmediateCommit() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
+ "testOnReceiveBatchedModificationsReadyWithImmediateCommit");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.addModification(new WriteModification(writePath, writeData));
+ batched.setReady(true);
+ batched.setDoCommitOnReady(true);
+ batched.setTotalMessagesSent(1);
+
+ transaction.tell(batched, getRef());
+ expectMsgClass(duration("5 seconds"), CommitTransactionReply.SERIALIZABLE_CLASS);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+ }};
+ }
+
@Test(expected=TestException.class)
public void testOnReceiveBatchedModificationsFailure() throws Throwable {
new JavaTestKit(getSystem()) {{
- DOMStoreWriteTransaction mockWriteTx = Mockito.mock(DOMStoreWriteTransaction.class);
- final ActorRef transaction = newTransactionActor(mockWriteTx,
+ ShardDataTreeTransactionParent parent = Mockito.mock(ShardDataTreeTransactionParent.class);
+ DataTreeModification mockModification = Mockito.mock(DataTreeModification.class);
+ ReadWriteShardDataTreeTransaction mockWriteTx = new ReadWriteShardDataTreeTransaction(parent, "id", mockModification);
+ final ActorRef transaction = newTransactionActor(RW, mockWriteTx,
"testOnReceiveBatchedModificationsFailure");
JavaTestKit watcher = new JavaTestKit(getSystem());
YangInstanceIdentifier path = TestModel.TEST_PATH;
ContainerNode node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doThrow(new TestException()).when(mockWriteTx).write(path, node);
+ doThrow(new TestException()).when(mockModification).write(path, node);
BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
batched.addModification(new WriteModification(path, node));
public void testOnReceiveBatchedModificationsReadyWithIncorrectTotalMessageCount() throws Throwable {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testOnReceiveBatchedModificationsReadyWithIncorrectTotalMessageCount");
JavaTestKit watcher = new JavaTestKit(getSystem());
@Test
public void testOnReceivePreLithiumReadyTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
+ final ActorRef transaction = newTransactionActor(RW, readWriteTransaction(),
"testReadyTransaction", DataStoreVersions.HELIUM_2_VERSION);
JavaTestKit watcher = new JavaTestKit(getSystem());
// test
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
+ final ActorRef transaction = newTransactionActor(RW, readWriteTransaction(),
"testReadyTransaction2", DataStoreVersions.HELIUM_2_VERSION);
JavaTestKit watcher = new JavaTestKit(getSystem());
@Test
public void testOnReceiveCreateSnapshot() throws Exception {
new JavaTestKit(getSystem()) {{
- ShardTest.writeToStore(store, TestModel.TEST_PATH,
+ ShardTest.writeToStore(store.getDataTree(), TestModel.TEST_PATH,
ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- NormalizedNode<?,?> expectedRoot = ShardTest.readStore(store,
+ NormalizedNode<?,?> expectedRoot = ShardTest.readStore(store.getDataTree(),
YangInstanceIdentifier.builder().build());
- final ActorRef transaction = newTransactionActor(store.newReadOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(TransactionType.READ_ONLY, readOnlyTransaction(),
"testOnReceiveCreateSnapshot");
watch(transaction);
@Test
public void testReadWriteTxOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
+ final ActorRef transaction = newTransactionActor(RW, readWriteTransaction(),
"testReadWriteTxOnReceiveCloseTransaction");
watch(transaction);
@Test
public void testWriteOnlyTxOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testWriteTxOnReceiveCloseTransaction");
watch(transaction);
@Test
public void testReadOnlyTxOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(TransactionType.READ_ONLY, readOnlyTransaction(),
"testReadOnlyTxOnReceiveCloseTransaction");
watch(transaction);
@Test(expected=UnknownMessageException.class)
public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
+ final Props props = ShardTransaction.props(TransactionType.READ_ONLY, readOnlyTransaction(), shard,
datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> transaction = TestActorRef.apply(props,getSystem());
500, TimeUnit.MILLISECONDS).build();
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
+ final ActorRef transaction = newTransactionActor(RW, readWriteTransaction(),
"testShardTransactionInactivity");
watch(transaction);
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
if (exToThrow instanceof PrimaryNotFoundException) {
doReturn(Futures.failed(exToThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
} else {
- doReturn(Futures.successful(getSystem().actorSelection(actorRef.path()))).
+ doReturn(primaryShardInfoReply(getSystem(), actorRef)).
when(mockActorContext).findPrimaryShardAsync(anyString());
}
doReturn(getSystem().actorSelection(actorRef.path())).when(mockActorContext).
actorSelection(actorRef.path().toString());
- doReturn(Futures.successful(getSystem().actorSelection(actorRef.path()))).
+ doReturn(primaryShardInfoReply(getSystem(), actorRef)).
when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
doReturn(Futures.successful(new Object())).when(mockActorContext).executeOperationAsync(
@Test
public void testWriteAfterAsyncRead() throws Throwable {
- ActorRef actorRef = setupActorContextWithoutInitialCreateTransaction(getSystem());
+ ActorRef actorRef = setupActorContextWithoutInitialCreateTransaction(getSystem(), DefaultShardStrategy.DEFAULT_SHARD);
Promise<Object> createTxPromise = akka.dispatch.Futures.promise();
doReturn(createTxPromise).when(mockActorContext).executeOperationAsync(
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- expectBatchedModificationsReady(actorRef);
+ expectBatchedModificationsReady(actorRef, true);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+ verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), true,
+ verifyBatchedModifications(batchedModifications.get(0), true, true,
new WriteModification(TestModel.TEST_PATH, nodeToWrite));
assertEquals("getTotalMessageCount", 1, batchedModifications.get(0).getTotalMessagesSent());
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- expectBatchedModificationsReady(actorRef);
+ expectBatchedModificationsReady(actorRef, true);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+ verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), true);
+ verifyBatchedModifications(batchedModifications.get(0), true, true);
+ }
+
+ @Test
+ public void testReadyWithMultipleShardWrites() throws Exception {
+ ActorRef actorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
+
+ ActorRef actorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY, "junk");
+
+ expectBatchedModificationsReady(actorRef1);
+ expectBatchedModificationsReady(actorRef2);
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
+
+ transactionProxy.write(TestModel.JUNK_PATH, ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
+ transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
+
+ assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+
+ verifyCohortFutures((ThreePhaseCommitCohortProxy)ready, actorSelection(actorRef1),
+ actorSelection(actorRef2));
}
@Test
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- expectBatchedModificationsReady(actorRef);
+ expectBatchedModificationsReady(actorRef, true);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+ verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), true,
+ verifyBatchedModifications(batchedModifications.get(0), true, true,
new WriteModification(TestModel.TEST_PATH, nodeToWrite));
verify(mockActorContext, never()).executeOperationAsync(eq(actorSelection(actorRef)),
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- expectBatchedModificationsReady(actorRef);
+ expectBatchedModificationsReady(actorRef, true);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+ verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 2, batchedModifications.size());
verifyBatchedModifications(batchedModifications.get(0), false,
new WriteModification(TestModel.TEST_PATH, nodeToWrite));
- verifyBatchedModifications(batchedModifications.get(1), true);
+ verifyBatchedModifications(batchedModifications.get(1), true, true);
verify(mockActorContext, never()).executeOperationAsync(eq(actorSelection(actorRef)),
isA(ReadyTransaction.SERIALIZABLE_CLASS));
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- verifyCohortFutures(proxy, TestException.class);
+ verifyCohortFutures((SingleCommitCohortProxy)ready, TestException.class);
}
private void testWriteOnlyTxWithFindPrimaryShardFailure(Exception toThrow) throws Exception {
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
-
- verifyCohortFutures(proxy, toThrow.getClass());
+ verifyCohortFutures((SingleCommitCohortProxy)ready, toThrow.getClass());
}
@Test
@Test
public void testReadyWithInvalidReplyMessageType() throws Exception {
dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
+ ActorRef actorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- //expectBatchedModifications(actorRef, 1);
+ ActorRef actorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY, "junk");
doReturn(Futures.successful(new Object())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)),
- isA(BatchedModifications.class));
+ executeOperationAsync(eq(actorSelection(actorRef1)), isA(BatchedModifications.class));
+
+ expectBatchedModificationsReady(actorRef2);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+ transactionProxy.write(TestModel.JUNK_PATH, ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
+ transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
-
- verifyCohortFutures(proxy, IllegalArgumentException.class);
+ verifyCohortFutures((ThreePhaseCommitCohortProxy)ready, actorSelection(actorRef2),
+ IllegalArgumentException.class);
}
@Test
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
doReturn(true).when(mockActorContext).isPathLocal(anyString());
- expectBatchedModificationsReady(actorRef);
+ expectBatchedModificationsReady(actorRef, true);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
-
- verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+ verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
}
private static interface TransactionProxyOperation {
when(mockActorContext).actorSelection(shardActorRef.path().toString());
if(shardFound) {
- doReturn(Futures.successful(actorSystem.actorSelection(shardActorRef.path()))).
+ doReturn(primaryShardInfoReply(actorSystem, shardActorRef)).
when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
} else {
doReturn(Futures.failed(new PrimaryNotFoundException("test")))
verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
new MergeModification(mergePath2, mergeNode2), new WriteModification(writePath3, writeNode3));
- verifyBatchedModifications(batchedModifications.get(2), true, new MergeModification(mergePath3, mergeNode3),
- new DeleteModification(deletePath2));
+ verifyBatchedModifications(batchedModifications.get(2), true, true,
+ new MergeModification(mergePath3, mergeNode3), new DeleteModification(deletePath2));
assertEquals("getTotalMessageCount", 3, batchedModifications.get(2).getTotalMessagesSent());
}
doReturn(getSystem().actorSelection(shardActorRef.path())).
when(mockActorContext).actorSelection(shardActorRef.path().toString());
- doReturn(Futures.successful(getSystem().actorSelection(shardActorRef.path()))).
+ doReturn(primaryShardInfoReply(getSystem(), shardActorRef)).
when(mockActorContext).findPrimaryShardAsync(eq(shardName));
doReturn(true).when(mockActorContext).isPathLocal(shardActorRef.path().toString());
import akka.testkit.TestActorRef;
import akka.util.Timeout;
import com.google.common.base.Optional;
-import com.google.common.util.concurrent.MoreExecutors;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import org.mockito.InOrder;
import org.opendaylight.controller.cluster.datastore.AbstractShardTest;
import org.opendaylight.controller.cluster.datastore.Shard;
+import org.opendaylight.controller.cluster.datastore.ShardDataTree;
+import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort;
import org.opendaylight.controller.cluster.datastore.ShardTestKit;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
NormalizedNodeToNodeCodec codec = new NormalizedNodeToNodeCodec(SCHEMA_CONTEXT);
- InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
- store.onGlobalContextUpdated(SCHEMA_CONTEXT);
+ DataTree store = InMemoryDataTreeFactory.getInstance().create();
+ store.setSchemaContext(SCHEMA_CONTEXT);
writeToStore(store, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
@Test
public void testHelium2VersionRecovery() throws Exception {
- // Set up the InMemorySnapshotStore.
-
- InMemoryDOMDataStore testStore = InMemoryDOMDataStoreFactory.create("Test", null, null);
- testStore.onGlobalContextUpdated(SCHEMA_CONTEXT);
+ DataTree testStore = InMemoryDataTreeFactory.getInstance().create();
+ testStore.setSchemaContext(SCHEMA_CONTEXT);
writeToStore(testStore, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
// Setup 3 simulated transactions with mock cohorts backed by real cohorts.
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ ShardDataTreeCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
String transactionID2 = "tx2";
MutableCompositeModification modification2 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ ShardDataTreeCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
TestModel.OUTER_LIST_PATH,
ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
modification2);
String transactionID3 = "tx3";
MutableCompositeModification modification3 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ ShardDataTreeCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
.nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID1, HELIUM_2_VERSION,
- cohort1, modification1, true), getRef());
+ cohort1, modification1, true, false), getRef());
ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS));
assertEquals("Cohort path", shard.path().toString(), readyReply.getCohortPath());
// Send the ForwardedReadyTransaction for the next 2 Tx's.
shard.tell(new ForwardedReadyTransaction(transactionID2, HELIUM_2_VERSION,
- cohort2, modification2, true), getRef());
+ cohort2, modification2, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
shard.tell(new ForwardedReadyTransaction(transactionID3, HELIUM_2_VERSION,
- cohort3, modification3, true), getRef());
+ cohort3, modification3, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
// Send the CanCommitTransaction message for the next 2 Tx's. These should get queued and
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isA;
import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.WRITE_ONLY;
import akka.actor.ActorRef;
import akka.dispatch.Futures;
+import akka.util.Timeout;
import com.google.common.base.Optional;
import java.util.concurrent.TimeUnit;
import org.junit.Ignore;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
import org.mockito.Mockito;
+import org.opendaylight.controller.cluster.datastore.AbstractThreePhaseCommitCohort;
import org.opendaylight.controller.cluster.datastore.AbstractTransactionProxyTest;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.cluster.datastore.ThreePhaseCommitCohortProxy;
import org.opendaylight.controller.cluster.datastore.TransactionProxy;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
return argThat(matcher);
}
+ private CanCommitTransaction eqCanCommitTransaction(final String transactionID) {
+ ArgumentMatcher<CanCommitTransaction> matcher = new ArgumentMatcher<CanCommitTransaction>() {
+ @Override
+ public boolean matches(Object argument) {
+ return ThreePhaseCommitCohortMessages.CanCommitTransaction.class.equals(argument.getClass()) &&
+ CanCommitTransaction.fromSerializable(argument).getTransactionID().equals(transactionID);
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private CommitTransaction eqCommitTransaction(final String transactionID) {
+ ArgumentMatcher<CommitTransaction> matcher = new ArgumentMatcher<CommitTransaction>() {
+ @Override
+ public boolean matches(Object argument) {
+ return ThreePhaseCommitCohortMessages.CommitTransaction.class.equals(argument.getClass()) &&
+ CommitTransaction.fromSerializable(argument).getTransactionID().equals(transactionID);
+ }
+ };
+
+ return argThat(matcher);
+ }
+
private Future<Object> readySerializedTxReply(String path, short version) {
return Futures.successful(new ReadyTransactionReply(path, version).toSerializable());
}
private ActorRef testCompatibilityWithHeliumVersion(short version) throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE, version);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE, version,
+ DefaultShardStrategy.DEFAULT_SHARD);
NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
transactionProxy.delete(TestModel.TEST_PATH);
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
+ AbstractThreePhaseCommitCohort<?> proxy = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+ verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ doReturn(Futures.successful(CanCommitTransactionReply.YES.toSerializable())).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)),
+ eqCanCommitTransaction(transactionProxy.getIdentifier().toString()), any(Timeout.class));
- verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+ doReturn(Futures.successful(new CommitTransactionReply().toSerializable())).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)),
+ eqCommitTransaction(transactionProxy.getIdentifier().toString()), any(Timeout.class));
+
+ Boolean canCommit = proxy.canCommit().get(3, TimeUnit.SECONDS);
+ assertEquals("canCommit", true, canCommit.booleanValue());
+
+ proxy.preCommit().get(3, TimeUnit.SECONDS);
+
+ proxy.commit().get(3, TimeUnit.SECONDS);
return actorRef;
}
// creating transaction actors for write-only Tx's.
public void testWriteOnlyCompatibilityWithHeliumR2Version() throws Exception {
short version = DataStoreVersions.HELIUM_2_VERSION;
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY, version);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY, version,
+ DefaultShardStrategy.DEFAULT_SHARD);
NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
package org.opendaylight.controller.cluster.datastore.modification;
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
assertEquals("getPath", writePath, write.getPath());
assertEquals("getData", writeData, write.getData());
- ModificationPayload cloned =
- (ModificationPayload) SerializationUtils.clone(payload);
+ ModificationPayload cloned = SerializationUtils.clone(payload);
deserialized = (MutableCompositeModification) payload.getModification();
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Await;
DatastoreContext dataStoreContext = DatastoreContext.newBuilder().dataStoreType("config").
shardLeaderElectionTimeout(100, TimeUnit.MILLISECONDS).build();
+ final String expPrimaryPath = "akka://test-system/find-primary-shard";
ActorContext actorContext =
new ActorContext(getSystem(), shardManager, mock(ClusterWrapper.class),
mock(Configuration.class), dataStoreContext) {
@Override
protected Future<Object> doAsk(ActorRef actorRef, Object message, Timeout timeout) {
- return Futures.successful((Object) new PrimaryFound("akka://test-system/test"));
+ return Futures.successful((Object) new PrimaryFound(expPrimaryPath));
}
};
- Future<ActorSelection> foobar = actorContext.findPrimaryShardAsync("foobar");
- ActorSelection actual = Await.result(foobar, Duration.apply(5000, TimeUnit.MILLISECONDS));
+ Future<PrimaryShardInfo> foobar = actorContext.findPrimaryShardAsync("foobar");
+ PrimaryShardInfo actual = Await.result(foobar, Duration.apply(5000, TimeUnit.MILLISECONDS));
assertNotNull(actual);
+ assertEquals("LocalShardDataTree present", false, actual.getLocalShardDataTree().isPresent());
+ assertTrue("Unexpected PrimaryShardActor path " + actual.getPrimaryShardActor().path(),
+ expPrimaryPath.endsWith(actual.getPrimaryShardActor().pathString()));
- Future<ActorSelection> cached = actorContext.getPrimaryShardActorSelectionCache().getIfPresent("foobar");
+ Future<PrimaryShardInfo> cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
- ActorSelection cachedSelection = Await.result(cached, FiniteDuration.apply(1, TimeUnit.MILLISECONDS));
+ PrimaryShardInfo cachedInfo = Await.result(cached, FiniteDuration.apply(1, TimeUnit.MILLISECONDS));
- assertEquals(cachedSelection, actual);
+ assertEquals(cachedInfo, actual);
// Wait for 200 Milliseconds. The cached entry should have been removed.
Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
- cached = actorContext.getPrimaryShardActorSelectionCache().getIfPresent("foobar");
+ cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
assertNull(cached);
};
- Future<ActorSelection> foobar = actorContext.findPrimaryShardAsync("foobar");
+ Future<PrimaryShardInfo> foobar = actorContext.findPrimaryShardAsync("foobar");
try {
Await.result(foobar, Duration.apply(100, TimeUnit.MILLISECONDS));
}
- Future<ActorSelection> cached = actorContext.getPrimaryShardActorSelectionCache().getIfPresent("foobar");
+ Future<PrimaryShardInfo> cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
assertNull(cached);
}
};
- Future<ActorSelection> foobar = actorContext.findPrimaryShardAsync("foobar");
+ Future<PrimaryShardInfo> foobar = actorContext.findPrimaryShardAsync("foobar");
try {
Await.result(foobar, Duration.apply(100, TimeUnit.MILLISECONDS));
}
- Future<ActorSelection> cached = actorContext.getPrimaryShardActorSelectionCache().getIfPresent("foobar");
+ Future<PrimaryShardInfo> cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
assertNull(cached);
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+import javax.annotation.Nonnull;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public class MockDataTreeChangeListener implements DOMDataTreeChangeListener {
+
+ private final List<Collection<DataTreeCandidate>> changeList =
+ Collections.synchronizedList(Lists.<Collection<DataTreeCandidate>>newArrayList());
+
+ private volatile CountDownLatch changeLatch;
+ private int expChangeEventCount;
+
+ public MockDataTreeChangeListener(int expChangeEventCount) {
+ reset(expChangeEventCount);
+ }
+
+ public void reset(int expChangeEventCount) {
+ changeLatch = new CountDownLatch(expChangeEventCount);
+ this.expChangeEventCount = expChangeEventCount;
+ changeList.clear();
+ }
+
+ @Override
+ public void onDataTreeChanged(@Nonnull final Collection<DataTreeCandidate> changes) {
+ changeList.add(changes);
+ changeLatch.countDown();
+ }
+
+ public void waitForChangeEvents() {
+ boolean done = Uninterruptibles.awaitUninterruptibly(changeLatch, 5, TimeUnit.SECONDS);
+ if(!done) {
+ fail(String.format("Missing change notifications. Expected: %d. Actual: %d",
+ expChangeEventCount, (expChangeEventCount - changeLatch.getCount())));
+ }
+ }
+
+ public void expectNoMoreChanges(String assertMsg) {
+ Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
+ assertEquals(assertMsg, expChangeEventCount, changeList.size());
+ }
+}
*/
package org.opendaylight.controller.md.cluster.datastore.model;
+import com.google.common.io.Resources;
+import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
-import java.util.Set;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.parser.api.YangSyntaxErrorException;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
public class TestModel {
- public static final QName TEST_QNAME = QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13",
- "test");
-
- public static final QName JUNK_QNAME = QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:junk", "2014-03-13",
- "junk");
-
-
- public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
- public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
- public static final QName OUTER_CHOICE_QNAME = QName.create(TEST_QNAME, "outer-choice");
- public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
- public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
- public static final QName DESC_QNAME = QName.create(TEST_QNAME, "desc");
- public static final QName VALUE_QNAME = QName.create(TEST_QNAME, "value");
- private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
-
- public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
- public static final YangInstanceIdentifier JUNK_PATH = YangInstanceIdentifier.of(JUNK_QNAME);
- public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
- node(OUTER_LIST_QNAME).build();
- public static final YangInstanceIdentifier INNER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
- node(OUTER_LIST_QNAME).node(INNER_LIST_QNAME).build();
- public static final QName TWO_QNAME = QName.create(TEST_QNAME,"two");
- public static final QName THREE_QNAME = QName.create(TEST_QNAME,"three");
-
-
- public static final InputStream getDatastoreTestInputStream() {
- return getInputStream(DATASTORE_TEST_YANG);
- }
-
- private static InputStream getInputStream(final String resourceName) {
- return TestModel.class.getResourceAsStream(DATASTORE_TEST_YANG);
- }
-
- public static SchemaContext createTestContext() {
- YangParserImpl parser = new YangParserImpl();
- Set<Module> modules = parser.parseYangModelsFromStreams(Collections.singletonList(getDatastoreTestInputStream()));
- return parser.resolveSchemaContext(modules);
- }
+ public static final QName TEST_QNAME = QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13",
+ "test");
+
+ public static final QName JUNK_QNAME = QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:junk", "2014-03-13",
+ "junk");
+
+
+ public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
+ public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
+ public static final QName OUTER_CHOICE_QNAME = QName.create(TEST_QNAME, "outer-choice");
+ public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
+ public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
+ public static final QName DESC_QNAME = QName.create(TEST_QNAME, "desc");
+ public static final QName VALUE_QNAME = QName.create(TEST_QNAME, "value");
+ private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
+
+ public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
+ public static final YangInstanceIdentifier JUNK_PATH = YangInstanceIdentifier.of(JUNK_QNAME);
+ public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
+ node(OUTER_LIST_QNAME).build();
+ public static final YangInstanceIdentifier INNER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
+ node(OUTER_LIST_QNAME).node(INNER_LIST_QNAME).build();
+ public static final QName TWO_QNAME = QName.create(TEST_QNAME,"two");
+ public static final QName THREE_QNAME = QName.create(TEST_QNAME,"three");
+
+
+ public static final InputStream getDatastoreTestInputStream() {
+ return TestModel.class.getResourceAsStream(DATASTORE_TEST_YANG);
+ }
+
+ public static SchemaContext createTestContext() {
+ YangParserImpl parser = new YangParserImpl();
+ try {
+ return parser.parseSources(Collections.singleton(Resources.asByteSource(TestModel.class.getResource(DATASTORE_TEST_YANG))));
+ } catch (IOException | YangSyntaxErrorException e) {
+ throw new ExceptionInInitializerError(e);
+ }
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertFalse;
+import static junit.framework.Assert.assertNotNull;
+import static junit.framework.Assert.assertTrue;
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ForwardingExecutorService;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import javax.annotation.Nonnull;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class DOMDataTreeListenerTest {
+
+ private SchemaContext schemaContext;
+ private AbstractDOMDataBroker domBroker;
+ private ListeningExecutorService executor;
+ private ExecutorService futureExecutor;
+ private CommitExecutorService commitExecutor;
+
+ private static final DataContainerChild<?, ?> OUTER_LIST = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
+ .withChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1))
+ .build();
+
+ private static final DataContainerChild<?, ?> OUTER_LIST_2 = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
+ .withChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2))
+ .build();
+
+ private static final NormalizedNode<?, ?> TEST_CONTAINER = Builders.containerBuilder()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(OUTER_LIST)
+ .build();
+
+ private static final NormalizedNode<?, ?> TEST_CONTAINER_2 = Builders.containerBuilder()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(OUTER_LIST_2)
+ .build();
+
+ private static DOMDataTreeIdentifier ROOT_DATA_TREE_ID = new DOMDataTreeIdentifier(
+ LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
+
+ private static DOMDataTreeIdentifier OUTER_LIST_DATA_TREE_ID = new DOMDataTreeIdentifier(
+ LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH);
+
+ @Before
+ public void setupStore() {
+ InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER",
+ MoreExecutors.newDirectExecutorService());
+ InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG",
+ MoreExecutors.newDirectExecutorService());
+ schemaContext = TestModel.createTestContext();
+
+ operStore.onGlobalContextUpdated(schemaContext);
+ configStore.onGlobalContextUpdated(schemaContext);
+
+ ImmutableMap<LogicalDatastoreType, DOMStore> stores = ImmutableMap.<LogicalDatastoreType, DOMStore>builder() //
+ .put(CONFIGURATION, configStore) //
+ .put(OPERATIONAL, operStore) //
+ .build();
+
+ commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
+ futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB");
+ executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER, futureExecutor);
+ domBroker = new SerializedDOMDataBroker(stores, executor);
+ }
+
+ @After
+ public void tearDown() {
+ if (executor != null) {
+ executor.shutdownNow();
+ }
+
+ if (futureExecutor != null) {
+ futureExecutor.shutdownNow();
+ }
+ }
+
+ @Test
+ public void writeContainerEmptyTreeTest() throws InterruptedException {
+ CountDownLatch latch = new CountDownLatch(1);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
+
+ final DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit();
+
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertEquals(1, listener.getReceivedChanges().size());
+ final Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
+ listenerReg.close();
+ }
+
+ @Test
+ public void replaceContainerContainerInTreeTest() throws InterruptedException, TransactionCommitFailedException {
+ CountDownLatch latch = new CountDownLatch(2);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit().checkedGet();
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
+ writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER_2);
+ writeTx.submit();
+
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertEquals(2, listener.getReceivedChanges().size());
+ Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
+
+ changes = listener.getReceivedChanges().get(1);
+ assertEquals(1, changes.size());
+
+ candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ candidateRoot = candidate.getRootNode();
+ checkChange(TEST_CONTAINER, TEST_CONTAINER_2, ModificationType.WRITE, candidateRoot);
+ listenerReg.close();
+ }
+
+ @Test
+ public void deleteContainerContainerInTreeTest() throws InterruptedException, TransactionCommitFailedException {
+ CountDownLatch latch = new CountDownLatch(2);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit().checkedGet();
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
+
+ writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
+ writeTx.submit();
+
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertEquals(2, listener.getReceivedChanges().size());
+ Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
+
+ changes = listener.getReceivedChanges().get(1);
+ assertEquals(1, changes.size());
+
+ candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ candidateRoot = candidate.getRootNode();
+ checkChange(TEST_CONTAINER, null, ModificationType.DELETE, candidateRoot);
+ listenerReg.close();
+ }
+
+ @Test
+ public void replaceChildListContainerInTreeTest() throws InterruptedException, TransactionCommitFailedException {
+ CountDownLatch latch = new CountDownLatch(2);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit().checkedGet();
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
+
+ writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH, OUTER_LIST_2);
+ writeTx.submit();
+
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertEquals(2, listener.getReceivedChanges().size());
+ Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
+
+ changes = listener.getReceivedChanges().get(1);
+ assertEquals(1, changes.size());
+
+ candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ candidateRoot = candidate.getRootNode();
+ checkChange(TEST_CONTAINER, TEST_CONTAINER_2, ModificationType.SUBTREE_MODIFIED, candidateRoot);
+ final DataTreeCandidateNode modifiedChild = candidateRoot.getModifiedChild(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME));
+ assertNotNull(modifiedChild);
+ checkChange(OUTER_LIST, OUTER_LIST_2, ModificationType.WRITE, modifiedChild);
+ listenerReg.close();
+ }
+
+ @Test
+ public void rootModificationChildListenerTest() throws InterruptedException, TransactionCommitFailedException {
+ CountDownLatch latch = new CountDownLatch(2);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit().checkedGet();
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(OUTER_LIST_DATA_TREE_ID, listener);
+
+ writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER_2);
+ writeTx.submit().checkedGet();
+
+ latch.await(1, TimeUnit.SECONDS);
+
+ assertEquals(2, listener.getReceivedChanges().size());
+ Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, OUTER_LIST, ModificationType.WRITE, candidateRoot);
+
+ changes = listener.getReceivedChanges().get(1);
+ assertEquals(1, changes.size());
+
+ candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ candidateRoot = candidate.getRootNode();
+ checkChange(OUTER_LIST, OUTER_LIST_2, ModificationType.WRITE, candidateRoot);
+ listenerReg.close();
+ }
+
+ @Test
+ public void listEntryChangeNonRootRegistrationTest() throws InterruptedException, TransactionCommitFailedException {
+ CountDownLatch latch = new CountDownLatch(2);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit().checkedGet();
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(OUTER_LIST_DATA_TREE_ID, listener);
+
+ final YangInstanceIdentifier.NodeIdentifierWithPredicates outerListEntryId1 =
+ new YangInstanceIdentifier.NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1);
+ final YangInstanceIdentifier.NodeIdentifierWithPredicates outerListEntryId2 =
+ new YangInstanceIdentifier.NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2);
+ final YangInstanceIdentifier.NodeIdentifierWithPredicates outerListEntryId3 =
+ new YangInstanceIdentifier.NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 3);
+
+ final MapEntryNode outerListEntry1 = ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1);
+ final MapEntryNode outerListEntry2 = ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2);
+ final MapEntryNode outerListEntry3 = ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 3);
+
+ final MapNode listAfter = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
+ .withChild(outerListEntry2)
+ .withChild(outerListEntry3)
+ .build();
+
+ writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH.node(outerListEntryId1));
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH.node(outerListEntryId2),
+ outerListEntry2);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH.node(outerListEntryId3),
+ outerListEntry3);
+ writeTx.submit();
+
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertEquals(2, listener.getReceivedChanges().size());
+ Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, OUTER_LIST, ModificationType.WRITE, candidateRoot);
+
+ changes = listener.getReceivedChanges().get(1);
+ assertEquals(1, changes.size());
+
+ candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ candidateRoot = candidate.getRootNode();
+ checkChange(OUTER_LIST, listAfter, ModificationType.SUBTREE_MODIFIED, candidateRoot);
+ final DataTreeCandidateNode entry1Canditate = candidateRoot.getModifiedChild(outerListEntryId1);
+ checkChange(outerListEntry1, null, ModificationType.DELETE, entry1Canditate);
+ final DataTreeCandidateNode entry2Canditate = candidateRoot.getModifiedChild(outerListEntryId2);
+ checkChange(null, outerListEntry2, ModificationType.WRITE, entry2Canditate);
+ final DataTreeCandidateNode entry3Canditate = candidateRoot.getModifiedChild(outerListEntryId3);
+ checkChange(null, outerListEntry3, ModificationType.WRITE, entry3Canditate);
+ listenerReg.close();
+ }
+
+ private static void checkChange(NormalizedNode<?, ?> expectedBefore,
+ NormalizedNode<?, ?> expectedAfter,
+ ModificationType expectedMod,
+ DataTreeCandidateNode candidateNode) {
+ if (expectedBefore != null) {
+ assertTrue(candidateNode.getDataBefore().isPresent());
+ assertEquals(expectedBefore, candidateNode.getDataBefore().get());
+ } else {
+ assertFalse(candidateNode.getDataBefore().isPresent());
+ }
+
+ if (expectedAfter != null) {
+ assertTrue(candidateNode.getDataAfter().isPresent());
+ assertEquals(expectedAfter, candidateNode.getDataAfter().get());
+ } else {
+ assertFalse(candidateNode.getDataAfter().isPresent());
+ }
+
+ assertEquals(expectedMod, candidateNode.getModificationType());
+ }
+
+ private DOMDataTreeChangeService getDOMDataTreeChangeService() {
+ final DOMDataBrokerExtension extension = domBroker.getSupportedExtensions()
+ .get(DOMDataTreeChangeService.class);
+ if (extension == null) {
+ return null;
+ }
+ DOMDataTreeChangeService dataTreeChangeService = null;
+ if (extension instanceof DOMDataTreeChangeService) {
+ dataTreeChangeService = (DOMDataTreeChangeService) extension;
+ }
+ return dataTreeChangeService;
+ }
+
+
+ static class CommitExecutorService extends ForwardingExecutorService {
+
+ ExecutorService delegate;
+
+ public CommitExecutorService(final ExecutorService delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ protected ExecutorService delegate() {
+ return delegate;
+ }
+ }
+
+ static class TestDataTreeListener implements DOMDataTreeChangeListener {
+
+ private List<Collection<DataTreeCandidate>> receivedChanges = new ArrayList<>();
+ private CountDownLatch latch;
+
+ public TestDataTreeListener(final CountDownLatch latch) {
+ this.latch = latch;
+ }
+
+ @Override
+ public void onDataTreeChanged(@Nonnull final Collection<DataTreeCandidate> changes) {
+ receivedChanges.add(changes);
+ latch.countDown();
+ }
+
+ public List<Collection<DataTreeCandidate>> getReceivedChanges() {
+ return receivedChanges;
+ }
+ }
+}
* @return The context in which this transaction was allocated, or null
* if the context was not recorded.
*/
- @Nullable public final Throwable getDebugContext() {
+ @Nullable
+ public final Throwable getDebugContext() {
return debugContext;
}
import org.opendaylight.controller.md.sal.dom.spi.AbstractRegistrationTree;
import org.opendaylight.controller.md.sal.dom.spi.RegistrationTreeNode;
import org.opendaylight.controller.md.sal.dom.spi.RegistrationTreeSnapshot;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
/**
* Callback notifying the subclass that the specified registration is being closed and it's user no longer
- * wishes to receive notifications. This notification is invoked while the {@link ListenerRegistration#close()}
+ * wishes to receive notifications. This notification is invoked while the {@link org.opendaylight.yangtools.concepts.ListenerRegistration#close()}
* method is executing. Subclasses can use this callback to properly remove any delayed notifications pending
* towards the registration.
*
}
@Override
- public final <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener) {
+ public final <L extends DOMDataTreeChangeListener> AbstractDOMDataTreeChangeListenerRegistration<L> registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener) {
// Take the write lock
takeLock();
try {
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.spi.data;
+
+import com.google.common.annotations.Beta;
+import com.google.common.base.Preconditions;
+import java.util.AbstractMap.SimpleEntry;
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Abstract implementation of the {@link DOMStoreTransactionChain} interface relying on {@link DataTreeSnapshot} supplier
+ * and backend commit coordinator.
+ *
+ * @param <T> transaction identifier type
+ */
+@Beta
+public abstract class AbstractSnapshotBackedTransactionChain<T> extends TransactionReadyPrototype<T> implements DOMStoreTransactionChain {
+ private static abstract class State {
+ /**
+ * Allocate a new snapshot.
+ *
+ * @return A new snapshot
+ */
+ protected abstract DataTreeSnapshot getSnapshot();
+ }
+
+ private static final class Idle extends State {
+ private final AbstractSnapshotBackedTransactionChain<?> chain;
+
+ Idle(final AbstractSnapshotBackedTransactionChain<?> chain) {
+ this.chain = Preconditions.checkNotNull(chain);
+ }
+
+ @Override
+ protected DataTreeSnapshot getSnapshot() {
+ return chain.takeSnapshot();
+ }
+ }
+
+ /**
+ * We have a transaction out there.
+ */
+ private static final class Allocated extends State {
+ private static final AtomicReferenceFieldUpdater<Allocated, DataTreeSnapshot> SNAPSHOT_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(Allocated.class, DataTreeSnapshot.class, "snapshot");
+ private final DOMStoreWriteTransaction transaction;
+ private volatile DataTreeSnapshot snapshot;
+
+ Allocated(final DOMStoreWriteTransaction transaction) {
+ this.transaction = Preconditions.checkNotNull(transaction);
+ }
+
+ public DOMStoreWriteTransaction getTransaction() {
+ return transaction;
+ }
+
+ @Override
+ protected DataTreeSnapshot getSnapshot() {
+ final DataTreeSnapshot ret = snapshot;
+ Preconditions.checkState(ret != null, "Previous transaction %s is not ready yet", transaction.getIdentifier());
+ return ret;
+ }
+
+ void setSnapshot(final DataTreeSnapshot snapshot) {
+ final boolean success = SNAPSHOT_UPDATER.compareAndSet(this, null, snapshot);
+ Preconditions.checkState(success, "Transaction %s has already been marked as ready", transaction.getIdentifier());
+ }
+ }
+
+ /**
+ * Chain is logically shut down, no further allocation allowed.
+ */
+ private static final class Shutdown extends State {
+ private final String message;
+
+ Shutdown(final String message) {
+ this.message = Preconditions.checkNotNull(message);
+ }
+
+ @Override
+ protected DataTreeSnapshot getSnapshot() {
+ throw new IllegalStateException(message);
+ }
+ }
+
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<AbstractSnapshotBackedTransactionChain, State> STATE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(AbstractSnapshotBackedTransactionChain.class, State.class, "state");
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractSnapshotBackedTransactionChain.class);
+ private static final Shutdown CLOSED = new Shutdown("Transaction chain is closed");
+ private static final Shutdown FAILED = new Shutdown("Transaction chain has failed");
+ private final Idle idleState;
+ private volatile State state;
+
+ protected AbstractSnapshotBackedTransactionChain() {
+ idleState = new Idle(this);
+ state = idleState;
+ }
+
+ private Entry<State, DataTreeSnapshot> getSnapshot() {
+ final State localState = state;
+ return new SimpleEntry<>(localState, localState.getSnapshot());
+ }
+
+ private boolean recordTransaction(final State expected, final DOMStoreWriteTransaction transaction) {
+ final State state = new Allocated(transaction);
+ return STATE_UPDATER.compareAndSet(this, expected, state);
+ }
+
+ @Override
+ public final DOMStoreReadTransaction newReadOnlyTransaction() {
+ final Entry<State, DataTreeSnapshot> entry = getSnapshot();
+ return SnapshotBackedTransactions.newReadTransaction(nextTransactionIdentifier(), getDebugTransactions(), entry.getValue());
+ }
+
+ @Override
+ public final DOMStoreReadWriteTransaction newReadWriteTransaction() {
+ Entry<State, DataTreeSnapshot> entry;
+ DOMStoreReadWriteTransaction ret;
+
+ do {
+ entry = getSnapshot();
+ ret = new SnapshotBackedReadWriteTransaction<T>(nextTransactionIdentifier(),
+ getDebugTransactions(), entry.getValue(), this);
+ } while (!recordTransaction(entry.getKey(), ret));
+
+ return ret;
+ }
+
+ @Override
+ public final DOMStoreWriteTransaction newWriteOnlyTransaction() {
+ Entry<State, DataTreeSnapshot> entry;
+ DOMStoreWriteTransaction ret;
+
+ do {
+ entry = getSnapshot();
+ ret = new SnapshotBackedWriteTransaction<T>(nextTransactionIdentifier(),
+ getDebugTransactions(), entry.getValue(), this);
+ } while (!recordTransaction(entry.getKey(), ret));
+
+ return ret;
+ }
+
+ @Override
+ protected final void transactionAborted(final SnapshotBackedWriteTransaction<T> tx) {
+ final State localState = state;
+ if (localState instanceof Allocated) {
+ final Allocated allocated = (Allocated)localState;
+ if (allocated.getTransaction().equals(tx)) {
+ final boolean success = STATE_UPDATER.compareAndSet(this, localState, idleState);
+ if (!success) {
+ LOG.warn("Transaction {} aborted, but chain {} state already transitioned from {} to {}, very strange",
+ tx, this, localState, state);
+ }
+ }
+ }
+ }
+
+ @Override
+ protected final DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction<T> tx, final DataTreeModification tree) {
+ final State localState = state;
+
+ if (localState instanceof Allocated) {
+ final Allocated allocated = (Allocated)localState;
+ final DOMStoreWriteTransaction transaction = allocated.getTransaction();
+ Preconditions.checkState(tx.equals(transaction), "Mis-ordered ready transaction %s last allocated was %s", tx, transaction);
+ allocated.setSnapshot(tree);
+ } else {
+ LOG.debug("Ignoring transaction {} readiness due to state {}", tx, localState);
+ }
+
+ return createCohort(tx, tree);
+ }
+
+ @Override
+ public final void close() {
+ final State localState = state;
+
+ do {
+ Preconditions.checkState(!CLOSED.equals(localState), "Transaction chain {} has been closed", this);
+
+ if (FAILED.equals(localState)) {
+ LOG.debug("Ignoring user close in failed state");
+ return;
+ }
+ } while (!STATE_UPDATER.compareAndSet(this, localState, CLOSED));
+ }
+
+ /**
+ * Notify the base logic that a previously-submitted transaction has been committed successfully.
+ *
+ * @param transaction Transaction which completed successfully.
+ */
+ protected final void onTransactionCommited(final SnapshotBackedWriteTransaction<T> transaction) {
+ // If the committed transaction was the one we allocated last,
+ // we clear it and the ready snapshot, so the next transaction
+ // allocated refers to the data tree directly.
+ final State localState = state;
+
+ if (!(localState instanceof Allocated)) {
+ // This can legally happen if the chain is shut down before the transaction was committed
+ // by the backend.
+ LOG.debug("Ignoring successful transaction {} in state {}", transaction, localState);
+ return;
+ }
+
+ final Allocated allocated = (Allocated)localState;
+ final DOMStoreWriteTransaction tx = allocated.getTransaction();
+ if (!tx.equals(transaction)) {
+ LOG.debug("Ignoring non-latest successful transaction {} in state {}", transaction, allocated);
+ return;
+ }
+
+ if (!STATE_UPDATER.compareAndSet(this, localState, idleState)) {
+ LOG.debug("Transaction chain {} has already transitioned from {} to {}, not making it idle", this, localState, state);
+ }
+ }
+
+ /**
+ * Notify the base logic that a previously-submitted transaction has failed.
+ *
+ * @param transaction Transaction which failed.
+ * @param cause Failure cause
+ */
+ protected final void onTransactionFailed(final SnapshotBackedWriteTransaction<T> transaction, final Throwable cause) {
+ LOG.debug("Transaction chain {} failed on transaction {}", this, transaction, cause);
+ state = FAILED;
+ }
+
+ /**
+ * Return the next transaction identifier.
+ *
+ * @return transaction identifier.
+ */
+ protected abstract T nextTransactionIdentifier();
+
+ /**
+ * Inquire as to whether transactions should record their allocation context.
+ *
+ * @return True if allocation context should be recorded.
+ */
+ protected abstract boolean getDebugTransactions();
+
+ /**
+ * Take a fresh {@link DataTreeSnapshot} from the backend.
+ *
+ * @return A new snapshot.
+ */
+ protected abstract DataTreeSnapshot takeSnapshot();
+
+ /**
+ * Create a cohort for driving the transaction through the commit process.
+ *
+ * @param transaction Transaction handle
+ * @param modification {@link DataTreeModification} which needs to be applied to the backend
+ * @return A {@link DOMStoreThreePhaseCommitCohort} cohort.
+ */
+ protected abstract DOMStoreThreePhaseCommitCohort createCohort(final SnapshotBackedWriteTransaction<T> transaction, final DataTreeModification modification);
+}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.sal.dom.store.impl;
+package org.opendaylight.controller.sal.core.spi.data;
import static com.google.common.base.Preconditions.checkNotNull;
+import com.google.common.annotations.Beta;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
* Implementation of read-only transaction backed by {@link DataTreeSnapshot}
* which delegates most of its calls to similar methods provided by underlying snapshot.
*
+ * <T> identifier type
*/
-final class SnapshotBackedReadTransaction extends AbstractDOMStoreTransaction<Object>
- implements DOMStoreReadTransaction {
-
+@Beta
+public final class SnapshotBackedReadTransaction<T> extends AbstractDOMStoreTransaction<T> implements DOMStoreReadTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadTransaction.class);
private volatile DataTreeSnapshot stableSnapshot;
- public SnapshotBackedReadTransaction(final Object identifier, final boolean debug, final DataTreeSnapshot snapshot) {
+ /**
+ * Creates a new read-only transaction.
+ *
+ * @param identifier Transaction Identifier
+ * @param debug Enable transaction debugging
+ * @param snapshot Snapshot which will be modified.
+ */
+ SnapshotBackedReadTransaction(final T identifier, final boolean debug, final DataTreeSnapshot snapshot) {
super(identifier, debug);
this.stableSnapshot = Preconditions.checkNotNull(snapshot);
LOG.debug("ReadOnly Tx: {} allocated with snapshot {}", identifier, snapshot);
checkNotNull(path, "Path must not be null.");
try {
- return Futures.immediateCheckedFuture(
- read(path).checkedGet().isPresent());
+ return Futures.immediateCheckedFuture(read(path).checkedGet().isPresent());
} catch (ReadFailedException e) {
return Futures.immediateFailedCheckedFuture(e);
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.sal.dom.store.impl;
+package org.opendaylight.controller.sal.core.spi.data;
import static com.google.common.base.Preconditions.checkNotNull;
+import com.google.common.annotations.Beta;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
* Implementation of Read-Write transaction which is backed by {@link DataTreeSnapshot}
* and executed according to {@link TransactionReadyPrototype}.
*
+ * @param <T> identifier type
*/
-final class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction implements DOMStoreReadWriteTransaction {
+@Beta
+public final class SnapshotBackedReadWriteTransaction<T> extends SnapshotBackedWriteTransaction<T> implements DOMStoreReadWriteTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadWriteTransaction.class);
- /**
- * Creates new read-write transaction.
- *
- * @param identifier transaction Identifier
- * @param snapshot Snapshot which will be modified.
- * @param readyImpl Implementation of ready method.
- */
- protected SnapshotBackedReadWriteTransaction(final Object identifier, final boolean debug,
- final DataTreeSnapshot snapshot, final TransactionReadyPrototype store) {
- super(identifier, debug, snapshot, store);
+ SnapshotBackedReadWriteTransaction(final T identifier, final boolean debug,
+ final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
+ super(identifier, debug, snapshot, readyImpl);
}
@Override
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.spi.data;
+
+import com.google.common.annotations.Beta;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+
+/**
+ * Public utility class for instantiating snapshot-backed transactions.
+ */
+@Beta
+public final class SnapshotBackedTransactions {
+ private SnapshotBackedTransactions() {
+ throw new UnsupportedOperationException("Utility class");
+ }
+
+ /**
+ * Creates a new read-only transaction.
+ *
+ * @param identifier Transaction Identifier
+ * @param debug Enable transaction debugging
+ * @param snapshot Snapshot which will be modified.
+ */
+ public static <T> SnapshotBackedReadTransaction<T> newReadTransaction(final T identifier, final boolean debug, final DataTreeSnapshot snapshot) {
+ return new SnapshotBackedReadTransaction<T>(identifier, debug, snapshot);
+ }
+
+ /**
+ * Creates a new read-write transaction.
+ *
+ * @param identifier transaction Identifier
+ * @param debug Enable transaction debugging
+ * @param snapshot Snapshot which will be modified.
+ * @param readyImpl Implementation of ready method.
+ */
+ public static <T> SnapshotBackedReadWriteTransaction<T> newReadWriteTransaction(final T identifier, final boolean debug,
+ final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
+ return new SnapshotBackedReadWriteTransaction<T>(identifier, debug, snapshot, readyImpl);
+ }
+
+ /**
+ * Creates a new write-only transaction.
+ *
+ * @param identifier transaction Identifier
+ * @param debug Enable transaction debugging
+ * @param snapshot Snapshot which will be modified.
+ * @param readyImpl Implementation of ready method.
+ */
+ public static <T> SnapshotBackedWriteTransaction<T> newWriteTransaction(final T identifier, final boolean debug,
+ final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
+ return new SnapshotBackedWriteTransaction<T>(identifier, debug, snapshot, readyImpl);
+ }
+}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.sal.dom.store.impl;
+package org.opendaylight.controller.sal.core.spi.data;
import static com.google.common.base.Preconditions.checkState;
+import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* Implementation of Write transaction which is backed by
* {@link DataTreeSnapshot} and executed according to
- * {@link org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype}.
+ * {@link org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype}.
*
+ * @param <T> Identifier type
*/
-class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction<Object> implements DOMStoreWriteTransaction {
+@Beta
+public class SnapshotBackedWriteTransaction<T> extends AbstractDOMStoreTransaction<T> implements DOMStoreWriteTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class);
+ @SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, TransactionReadyPrototype> READY_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, TransactionReadyPrototype.class, "readyImpl");
+ @SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, DataTreeModification> TREE_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, DataTreeModification.class, "mutableTree");
// non-null when not ready
- private volatile TransactionReadyPrototype readyImpl;
+ private volatile TransactionReadyPrototype<T> readyImpl;
// non-null when not committed/closed
private volatile DataTreeModification mutableTree;
- /**
- * Creates new write-only transaction.
- *
- * @param identifier
- * transaction Identifier
- * @param snapshot
- * Snapshot which will be modified.
- * @param readyImpl
- * Implementation of ready method.
- */
- public SnapshotBackedWriteTransaction(final Object identifier, final boolean debug,
- final DataTreeSnapshot snapshot, final TransactionReadyPrototype readyImpl) {
+ SnapshotBackedWriteTransaction(final T identifier, final boolean debug,
+ final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
super(identifier, debug);
this.readyImpl = Preconditions.checkNotNull(readyImpl, "readyImpl must not be null.");
mutableTree = snapshot.newModification();
* @param path Path to read
* @return null if the the transaction has been closed;
*/
- protected final Optional<NormalizedNode<?, ?>> readSnapshotNode(final YangInstanceIdentifier path) {
+ final Optional<NormalizedNode<?, ?>> readSnapshotNode(final YangInstanceIdentifier path) {
return readyImpl == null ? null : mutableTree.readNode(path);
}
@Override
public DOMStoreThreePhaseCommitCohort ready() {
- final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ @SuppressWarnings("unchecked")
+ final TransactionReadyPrototype<T> wasReady = READY_UPDATER.getAndSet(this, null);
checkState(wasReady != null, "Transaction %s is no longer open", getIdentifier());
LOG.debug("Store transaction: {} : Ready", getIdentifier());
@Override
public void close() {
- final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ @SuppressWarnings("unchecked")
+ final TransactionReadyPrototype<T> wasReady = READY_UPDATER.getAndSet(this, null);
if (wasReady != null) {
LOG.debug("Store transaction: {} : Closed", getIdentifier());
TREE_UPDATER.lazySet(this, null);
/**
* Prototype implementation of
- * {@link #ready(org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction)}
+ * {@link #ready(org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction)}
*
* This class is intended to be implemented by Transaction factories
- * responsible for allocation of {@link org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction} and
+ * responsible for allocation of {@link org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction} and
* providing underlying logic for applying implementation.
*
+ * @param <T> identifier type
*/
- abstract static class TransactionReadyPrototype {
+ public abstract static class TransactionReadyPrototype<T> {
/**
* Called when a transaction is closed without being readied. This is not invoked for
* transactions which are ready.
*
* @param tx Transaction which got aborted.
*/
- protected abstract void transactionAborted(final SnapshotBackedWriteTransaction tx);
+ protected abstract void transactionAborted(final SnapshotBackedWriteTransaction<T> tx);
/**
* Returns a commit coordinator associated with supplied transactions.
* Modified data tree which has been constructed.
* @return DOMStoreThreePhaseCommitCohort associated with transaction
*/
- protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction tx, DataTreeModification tree);
+ protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction<T> tx, DataTreeModification tree);
}
}
\ No newline at end of file
package org.opendaylight.controller.md.sal.dom.store.impl;
import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.ForwardingDOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-final class ChainedTransactionCommitImpl extends ForwardingDOMStoreThreePhaseCommitCohort {
- private final SnapshotBackedWriteTransaction transaction;
- private final DOMStoreThreePhaseCommitCohort delegate;
+final class ChainedTransactionCommitImpl extends InMemoryDOMStoreThreePhaseCommitCohort {
private final DOMStoreTransactionChainImpl txChain;
- ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
- final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
- this.transaction = Preconditions.checkNotNull(transaction);
- this.delegate = Preconditions.checkNotNull(delegate);
+ ChainedTransactionCommitImpl(final InMemoryDOMDataStore store, final SnapshotBackedWriteTransaction<String> transaction,
+ final DataTreeModification modification, final DOMStoreTransactionChainImpl txChain) {
+ super(store, transaction, modification);
this.txChain = Preconditions.checkNotNull(txChain);
}
- @Override
- protected DOMStoreThreePhaseCommitCohort delegate() {
- return delegate;
- }
-
@Override
public ListenableFuture<Void> commit() {
- ListenableFuture<Void> commitFuture = super.commit();
- Futures.addCallback(commitFuture, new FutureCallback<Void>() {
- @Override
- public void onFailure(final Throwable t) {
- txChain.onTransactionFailed(transaction, t);
- }
-
- @Override
- public void onSuccess(final Void result) {
- txChain.onTransactionCommited(transaction);
- }
- });
- return commitFuture;
+ ListenableFuture<Void> ret = super.commit();
+ txChain.transactionCommited(getTransaction());
+ return ret;
}
}
\ No newline at end of file
package org.opendaylight.controller.md.sal.dom.store.impl;
import com.google.common.base.Preconditions;
-import java.util.AbstractMap.SimpleEntry;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.AbstractSnapshotBackedTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-final class DOMStoreTransactionChainImpl extends TransactionReadyPrototype implements DOMStoreTransactionChain {
- private static abstract class State {
- /**
- * Allocate a new snapshot.
- *
- * @return A new snapshot
- */
- protected abstract DataTreeSnapshot getSnapshot();
- }
-
- private static final class Idle extends State {
- private final InMemoryDOMDataStore store;
-
- Idle(final InMemoryDOMDataStore store) {
- this.store = Preconditions.checkNotNull(store);
- }
-
- @Override
- protected DataTreeSnapshot getSnapshot() {
- return store.takeSnapshot();
- }
- }
-
- /**
- * We have a transaction out there.
- */
- private static final class Allocated extends State {
- private static final AtomicReferenceFieldUpdater<Allocated, DataTreeSnapshot> SNAPSHOT_UPDATER =
- AtomicReferenceFieldUpdater.newUpdater(Allocated.class, DataTreeSnapshot.class, "snapshot");
- private final DOMStoreWriteTransaction transaction;
- private volatile DataTreeSnapshot snapshot;
-
- Allocated(final DOMStoreWriteTransaction transaction) {
- this.transaction = Preconditions.checkNotNull(transaction);
- }
-
- public DOMStoreWriteTransaction getTransaction() {
- return transaction;
- }
-
- @Override
- protected DataTreeSnapshot getSnapshot() {
- final DataTreeSnapshot ret = snapshot;
- Preconditions.checkState(ret != null, "Previous transaction %s is not ready yet", transaction.getIdentifier());
- return ret;
- }
-
- void setSnapshot(final DataTreeSnapshot snapshot) {
- final boolean success = SNAPSHOT_UPDATER.compareAndSet(this, null, snapshot);
- Preconditions.checkState(success, "Transaction %s has already been marked as ready", transaction.getIdentifier());
- }
- }
-
- /**
- * Chain is logically shut down, no further allocation allowed.
- */
- private static final class Shutdown extends State {
- private final String message;
-
- Shutdown(final String message) {
- this.message = Preconditions.checkNotNull(message);
- }
-
- @Override
- protected DataTreeSnapshot getSnapshot() {
- throw new IllegalStateException(message);
- }
- }
-
- private static final AtomicReferenceFieldUpdater<DOMStoreTransactionChainImpl, State> STATE_UPDATER =
- AtomicReferenceFieldUpdater.newUpdater(DOMStoreTransactionChainImpl.class, State.class, "state");
- private static final Logger LOG = LoggerFactory.getLogger(DOMStoreTransactionChainImpl.class);
- private static final Shutdown CLOSED = new Shutdown("Transaction chain is closed");
- private static final Shutdown FAILED = new Shutdown("Transaction chain has failed");
+final class DOMStoreTransactionChainImpl extends AbstractSnapshotBackedTransactionChain<String> {
private final InMemoryDOMDataStore store;
- private final Idle idleState;
- private volatile State state;
DOMStoreTransactionChainImpl(final InMemoryDOMDataStore store) {
this.store = Preconditions.checkNotNull(store);
- idleState = new Idle(store);
- state = idleState;
- }
-
- private Entry<State, DataTreeSnapshot> getSnapshot() {
- final State localState = state;
- return new SimpleEntry<>(localState, localState.getSnapshot());
- }
-
- private boolean recordTransaction(final State expected, final DOMStoreWriteTransaction transaction) {
- final State state = new Allocated(transaction);
- return STATE_UPDATER.compareAndSet(this, expected, state);
}
@Override
- public DOMStoreReadTransaction newReadOnlyTransaction() {
- final Entry<State, DataTreeSnapshot> entry = getSnapshot();
- return new SnapshotBackedReadTransaction(store.nextIdentifier(), store.getDebugTransactions(), entry.getValue());
+ protected DOMStoreThreePhaseCommitCohort createCohort(final SnapshotBackedWriteTransaction<String> tx, final DataTreeModification modification) {
+ return new ChainedTransactionCommitImpl(store, tx, modification, this);
}
@Override
- public DOMStoreReadWriteTransaction newReadWriteTransaction() {
- Entry<State, DataTreeSnapshot> entry;
- DOMStoreReadWriteTransaction ret;
-
- do {
- entry = getSnapshot();
- ret = new SnapshotBackedReadWriteTransaction(store.nextIdentifier(),
- store.getDebugTransactions(), entry.getValue(), this);
- } while (!recordTransaction(entry.getKey(), ret));
-
- return ret;
- }
-
- @Override
- public DOMStoreWriteTransaction newWriteOnlyTransaction() {
- Entry<State, DataTreeSnapshot> entry;
- DOMStoreWriteTransaction ret;
-
- do {
- entry = getSnapshot();
- ret = new SnapshotBackedWriteTransaction(store.nextIdentifier(),
- store.getDebugTransactions(), entry.getValue(), this);
- } while (!recordTransaction(entry.getKey(), ret));
-
- return ret;
+ protected DataTreeSnapshot takeSnapshot() {
+ return store.takeSnapshot();
}
@Override
- protected void transactionAborted(final SnapshotBackedWriteTransaction tx) {
- final State localState = state;
- if (localState instanceof Allocated) {
- final Allocated allocated = (Allocated)localState;
- if (allocated.getTransaction().equals(tx)) {
- final boolean success = STATE_UPDATER.compareAndSet(this, localState, idleState);
- if (!success) {
- LOG.info("State already transitioned from {} to {}", localState, state);
- }
- }
- }
+ protected String nextTransactionIdentifier() {
+ return store.nextIdentifier();
}
@Override
- protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
- final State localState = state;
-
- if (localState instanceof Allocated) {
- final Allocated allocated = (Allocated)localState;
- final DOMStoreWriteTransaction transaction = allocated.getTransaction();
- Preconditions.checkState(tx.equals(transaction), "Mis-ordered ready transaction %s last allocated was %s", tx, transaction);
- allocated.setSnapshot(tree);
- } else {
- LOG.debug("Ignoring transaction {} readiness due to state {}", tx, localState);
- }
-
- return new ChainedTransactionCommitImpl(tx, store.transactionReady(tx, tree), this);
+ protected boolean getDebugTransactions() {
+ return store.getDebugTransactions();
}
- @Override
- public void close() {
- final State localState = state;
-
- do {
- Preconditions.checkState(!CLOSED.equals(localState), "Transaction chain {} has been closed", this);
-
- if (FAILED.equals(localState)) {
- LOG.debug("Ignoring user close in failed state");
- return;
- }
- } while (!STATE_UPDATER.compareAndSet(this, localState, CLOSED));
- }
-
- void onTransactionFailed(final SnapshotBackedWriteTransaction transaction, final Throwable t) {
- LOG.debug("Transaction chain {} failed on transaction {}", this, transaction, t);
- state = FAILED;
- }
-
- void onTransactionCommited(final SnapshotBackedWriteTransaction transaction) {
- // If the committed transaction was the one we allocated last,
- // we clear it and the ready snapshot, so the next transaction
- // allocated refers to the data tree directly.
- final State localState = state;
-
- if (!(localState instanceof Allocated)) {
- LOG.debug("Ignoring successful transaction {} in state {}", transaction, localState);
- return;
- }
-
- final Allocated allocated = (Allocated)localState;
- final DOMStoreWriteTransaction tx = allocated.getTransaction();
- if (!tx.equals(transaction)) {
- LOG.debug("Ignoring non-latest successful transaction {} in state {}", transaction, allocated);
- return;
- }
-
- if (!STATE_UPDATER.compareAndSet(this, localState, idleState)) {
- LOG.debug("Transaction chain {} has already transitioned from {} to {}, not making it idle", this, localState, state);
- }
+ void transactionCommited(final SnapshotBackedWriteTransaction<String> transaction) {
+ super.onTransactionCommited(transaction);
}
-}
\ No newline at end of file
+}
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
-import static com.google.common.base.Preconditions.checkState;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree;
-import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedTransactions;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
import org.opendaylight.yangtools.concepts.Identifiable;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager.Invoker;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
*
* Implementation of {@link DOMStore} which uses {@link DataTree} and other
* classes such as {@link SnapshotBackedWriteTransaction}.
- * {@link SnapshotBackedReadTransaction} and {@link ResolveDataChangeEventsTask}
+ * {@link org.opendaylight.controller.sal.core.spi.data.SnapshotBackedReadTransaction} and {@link ResolveDataChangeEventsTask}
* to implement {@link DOMStore} contract.
*
*/
-public class InMemoryDOMDataStore extends TransactionReadyPrototype implements DOMStore, Identifiable<String>, SchemaContextListener, AutoCloseable, DOMStoreTreeChangePublisher {
+public class InMemoryDOMDataStore extends TransactionReadyPrototype<String> implements DOMStore, Identifiable<String>, SchemaContextListener, AutoCloseable, DOMStoreTreeChangePublisher {
private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMDataStore.class);
- private static final ListenableFuture<Void> SUCCESSFUL_FUTURE = Futures.immediateFuture(null);
- private static final ListenableFuture<Boolean> CAN_COMMIT_FUTURE = Futures.immediateFuture(Boolean.TRUE);
private static final Invoker<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> DCL_NOTIFICATION_MGR_INVOKER =
new Invoker<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent>() {
@Override
public DOMStoreReadTransaction newReadOnlyTransaction() {
- return new SnapshotBackedReadTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot());
+ return SnapshotBackedTransactions.newReadTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot());
}
@Override
public DOMStoreReadWriteTransaction newReadWriteTransaction() {
- return new SnapshotBackedReadWriteTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot(), this);
+ return SnapshotBackedTransactions.newReadWriteTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot(), this);
}
@Override
public DOMStoreWriteTransaction newWriteOnlyTransaction() {
- return new SnapshotBackedWriteTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot(), this);
+ return SnapshotBackedTransactions.newWriteTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot(), this);
}
@Override
}
@Override
- public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener) {
- return changePublisher.registerTreeChangeListener(treeId, listener);
+ public synchronized <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener) {
+ /*
+ * Make sure commit is not occurring right now. Listener has to be
+ * registered and its state capture enqueued at a consistent point.
+ */
+ return changePublisher.registerTreeChangeListener(treeId, listener, dataTree.takeSnapshot());
}
@Override
- protected void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ protected void transactionAborted(final SnapshotBackedWriteTransaction<String> tx) {
LOG.debug("Tx: {} is closed.", tx.getIdentifier());
}
@Override
- protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
- LOG.debug("Tx: {} is submitted. Modifications: {}", tx.getIdentifier(), tree);
- return new ThreePhaseCommitImpl(tx, tree);
+ protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction<String> tx, final DataTreeModification modification) {
+ LOG.debug("Tx: {} is submitted. Modifications: {}", tx.getIdentifier(), modification);
+ return new InMemoryDOMStoreThreePhaseCommitCohort(this, tx, modification);
}
- Object nextIdentifier() {
+ String nextIdentifier() {
return name + "-" + txCounter.getAndIncrement();
}
- private static void warnDebugContext(AbstractDOMStoreTransaction<?> transaction) {
- final Throwable ctx = transaction.getDebugContext();
- if (ctx != null) {
- LOG.warn("Transaction {} has been allocated in the following context", transaction.getIdentifier(), ctx);
- }
+ void validate(final DataTreeModification modification) throws DataValidationFailedException {
+ dataTree.validate(modification);
}
- private final class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
- private final SnapshotBackedWriteTransaction transaction;
- private final DataTreeModification modification;
-
- private ResolveDataChangeEventsTask listenerResolver;
- private DataTreeCandidate candidate;
-
- public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction, final DataTreeModification modification) {
- this.transaction = writeTransaction;
- this.modification = modification;
- }
-
- @Override
- public ListenableFuture<Boolean> canCommit() {
- try {
- dataTree.validate(modification);
- LOG.debug("Store Transaction: {} can be committed", transaction.getIdentifier());
- return CAN_COMMIT_FUTURE;
- } catch (ConflictingModificationAppliedException e) {
- LOG.warn("Store Tx: {} Conflicting modification for {}.", transaction.getIdentifier(),
- e.getPath());
- warnDebugContext(transaction);
- return Futures.immediateFailedFuture(new OptimisticLockFailedException("Optimistic lock failed.", e));
- } catch (DataValidationFailedException e) {
- LOG.warn("Store Tx: {} Data Precondition failed for {}.", transaction.getIdentifier(),
- e.getPath(), e);
- warnDebugContext(transaction);
-
- // For debugging purposes, allow dumping of the modification. Coupled with the above
- // precondition log, it should allow us to understand what went on.
- LOG.trace("Store Tx: {} modifications: {} tree: {}", modification, dataTree);
-
- return Futures.immediateFailedFuture(new TransactionCommitFailedException("Data did not pass validation.", e));
- } catch (Exception e) {
- LOG.warn("Unexpected failure in validation phase", e);
- return Futures.immediateFailedFuture(e);
- }
- }
-
- @Override
- public ListenableFuture<Void> preCommit() {
- try {
- candidate = dataTree.prepare(modification);
- listenerResolver = ResolveDataChangeEventsTask.create(candidate, listenerTree);
- return SUCCESSFUL_FUTURE;
- } catch (Exception e) {
- LOG.warn("Unexpected failure in pre-commit phase", e);
- return Futures.immediateFailedFuture(e);
- }
- }
-
- @Override
- public ListenableFuture<Void> abort() {
- candidate = null;
- return SUCCESSFUL_FUTURE;
- }
-
- @Override
- public ListenableFuture<Void> commit() {
- checkState(candidate != null, "Proposed subtree must be computed");
-
- /*
- * The commit has to occur atomically with regard to listener
- * registrations.
- */
- synchronized (InMemoryDOMDataStore.this) {
- dataTree.commit(candidate);
- changePublisher.publishChange(candidate);
- listenerResolver.resolve(dataChangeListenerNotificationManager);
- }
+ DataTreeCandidate prepare(final DataTreeModification modification) {
+ return dataTree.prepare(modification);
+ }
- return SUCCESSFUL_FUTURE;
- }
+ synchronized void commit(final DataTreeCandidate candidate) {
+ dataTree.commit(candidate);
+ changePublisher.publishChange(candidate);
+ ResolveDataChangeEventsTask.create(candidate, listenerTree).resolve(dataChangeListenerNotificationManager);
}
}
--- /dev/null
+package org.opendaylight.controller.md.sal.dom.store.impl;
+
+import static com.google.common.base.Preconditions.checkState;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class InMemoryDOMStoreThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
+ private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMStoreThreePhaseCommitCohort.class);
+ private static final ListenableFuture<Void> SUCCESSFUL_FUTURE = Futures.immediateFuture(null);
+ private static final ListenableFuture<Boolean> CAN_COMMIT_FUTURE = Futures.immediateFuture(Boolean.TRUE);
+ private final SnapshotBackedWriteTransaction<String> transaction;
+ private final DataTreeModification modification;
+ private final InMemoryDOMDataStore store;
+ private DataTreeCandidate candidate;
+
+ public InMemoryDOMStoreThreePhaseCommitCohort(final InMemoryDOMDataStore store, final SnapshotBackedWriteTransaction<String> writeTransaction, final DataTreeModification modification) {
+ this.transaction = Preconditions.checkNotNull(writeTransaction);
+ this.modification = Preconditions.checkNotNull(modification);
+ this.store = Preconditions.checkNotNull(store);
+ }
+
+ private static void warnDebugContext(final AbstractDOMStoreTransaction<?> transaction) {
+ final Throwable ctx = transaction.getDebugContext();
+ if (ctx != null) {
+ LOG.warn("Transaction {} has been allocated in the following context", transaction.getIdentifier(), ctx);
+ }
+ }
+
+ @Override
+ public final ListenableFuture<Boolean> canCommit() {
+ try {
+ store.validate(modification);
+ LOG.debug("Store Transaction: {} can be committed", getTransaction().getIdentifier());
+ return CAN_COMMIT_FUTURE;
+ } catch (ConflictingModificationAppliedException e) {
+ LOG.warn("Store Tx: {} Conflicting modification for {}.", getTransaction().getIdentifier(),
+ e.getPath());
+ warnDebugContext(getTransaction());
+ return Futures.immediateFailedFuture(new OptimisticLockFailedException("Optimistic lock failed.", e));
+ } catch (DataValidationFailedException e) {
+ LOG.warn("Store Tx: {} Data Precondition failed for {}.", getTransaction().getIdentifier(),
+ e.getPath(), e);
+ warnDebugContext(getTransaction());
+
+ // For debugging purposes, allow dumping of the modification. Coupled with the above
+ // precondition log, it should allow us to understand what went on.
+ LOG.trace("Store Tx: {} modifications: {} tree: {}", modification, store);
+
+ return Futures.immediateFailedFuture(new TransactionCommitFailedException("Data did not pass validation.", e));
+ } catch (Exception e) {
+ LOG.warn("Unexpected failure in validation phase", e);
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public final ListenableFuture<Void> preCommit() {
+ try {
+ candidate = store.prepare(modification);
+ return SUCCESSFUL_FUTURE;
+ } catch (Exception e) {
+ LOG.warn("Unexpected failure in pre-commit phase", e);
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public final ListenableFuture<Void> abort() {
+ candidate = null;
+ return SUCCESSFUL_FUTURE;
+ }
+
+ protected final SnapshotBackedWriteTransaction<String> getTransaction() {
+ return transaction;
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ checkState(candidate != null, "Proposed subtree must be computed");
+
+ /*
+ * The commit has to occur atomically with regard to listener
+ * registrations.
+ */
+ store.commit(candidate);
+ return SUCCESSFUL_FUTURE;
+ }
+}
\ No newline at end of file
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
+import com.google.common.base.Optional;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.ExecutorService;
import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.controller.md.sal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTreeChangePublisher;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager.Invoker;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.spi.DefaultDataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Override
protected void notifyListeners(final Collection<AbstractDOMDataTreeChangeListenerRegistration<?>> registrations, final YangInstanceIdentifier path, final DataTreeCandidateNode node) {
- final DataTreeCandidate candidate = new DefaultDataTreeCandidate(path, node);
+ final DataTreeCandidate candidate = DataTreeCandidates.newDataTreeCandidate(path, node);
for (AbstractDOMDataTreeChangeListenerRegistration<?> reg : registrations) {
LOG.debug("Enqueueing candidate {} to registration {}", candidate, registrations);
// FIXME: remove the queue for this registration and make sure we clear it
}
+ <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener, final DataTreeSnapshot snapshot) {
+ final AbstractDOMDataTreeChangeListenerRegistration<L> reg = registerTreeChangeListener(treeId, listener);
+
+ final Optional<NormalizedNode<?, ?>> node = snapshot.readNode(treeId);
+ if (node.isPresent()) {
+ final DataTreeCandidate candidate = DataTreeCandidates.fromNormalizedNode(treeId, node.get());
+ notificationManager.submitNotification(reg, candidate);
+ }
+
+ return reg;
+ }
+
synchronized void publishChange(@Nonnull final DataTreeCandidate candidate) {
// Runs synchronized with registrationRemoved()
processCandidateTree(candidate);
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
+import com.google.common.annotations.Beta;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ArrayListMultimap;
* Computes data change events for all affected registered listeners in data
* tree.
*/
-final class ResolveDataChangeEventsTask {
+@Beta
+public final class ResolveDataChangeEventsTask {
private static final Logger LOG = LoggerFactory.getLogger(ResolveDataChangeEventsTask.class);
private final DataTreeCandidate candidate;
private Multimap<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> collectedEvents;
- public ResolveDataChangeEventsTask(final DataTreeCandidate candidate, final ListenerTree listenerTree) {
+ private ResolveDataChangeEventsTask(final DataTreeCandidate candidate, final ListenerTree listenerTree) {
this.candidate = Preconditions.checkNotNull(candidate);
this.listenerRoot = Preconditions.checkNotNull(listenerTree);
}
import org.junit.Test;
import org.mockito.Mockito;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedTransactions;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
public class InMemoryDataStoreTest {
private SchemaContext schemaContext;
Mockito.doThrow( new RuntimeException( "mock ex" ) ).when( mockSnapshot )
.readNode( Mockito.any( YangInstanceIdentifier.class ) );
- DOMStoreReadTransaction readTx = new SnapshotBackedReadTransaction("1", true, mockSnapshot);
+ DOMStoreReadTransaction readTx = SnapshotBackedTransactions.newReadTransaction("1", true, mockSnapshot);
doReadAndThrowEx( readTx );
}
Mockito.doThrow( new RuntimeException( "mock ex" ) ).when( mockModification )
.readNode( Mockito.any( YangInstanceIdentifier.class ) );
Mockito.doReturn( mockModification ).when( mockSnapshot ).newModification();
- TransactionReadyPrototype mockReady = Mockito.mock( TransactionReadyPrototype.class );
- DOMStoreReadTransaction readTx = new SnapshotBackedReadWriteTransaction("1", false, mockSnapshot, mockReady);
+ @SuppressWarnings("unchecked")
+ TransactionReadyPrototype<String> mockReady = Mockito.mock( TransactionReadyPrototype.class );
+ DOMStoreReadTransaction readTx = SnapshotBackedTransactions.newReadWriteTransaction("1", false, mockSnapshot, mockReady);
doReadAndThrowEx( readTx );
}
- private void doReadAndThrowEx( final DOMStoreReadTransaction readTx ) throws Throwable {
-
+ private static void doReadAndThrowEx( final DOMStoreReadTransaction readTx ) throws Throwable {
try {
readTx.read(TestModel.TEST_PATH).get();
} catch( ExecutionException e ) {
* Create rpc implementation capable of handling RPC for monitoring and notifications even before the schemas of remote device are downloaded
*/
static NetconfDeviceRpc getRpcForInitialization(final NetconfDeviceCommunicator listener) {
- return new NetconfDeviceRpc(INIT_SCHEMA_CTX, listener, new NetconfMessageTransformer(INIT_SCHEMA_CTX));
+ return new NetconfDeviceRpc(INIT_SCHEMA_CTX, listener, new NetconfMessageTransformer(INIT_SCHEMA_CTX, false));
}
@VisibleForTesting
void handleSalInitializationSuccess(final SchemaContext result, final NetconfSessionPreferences remoteSessionCapabilities, final DOMRpcService deviceRpc) {
- messageTransformer = new NetconfMessageTransformer(result);
+ messageTransformer = new NetconfMessageTransformer(result, true);
updateTransformer(messageTransformer);
// salFacade.onDeviceConnected has to be called before the notification handler is initialized
}
private NetconfDeviceRpc getDeviceSpecificRpc(final SchemaContext result) {
- return new NetconfDeviceRpc(result, listener, new NetconfMessageTransformer(result));
+ return new NetconfDeviceRpc(result, listener, new NetconfMessageTransformer(result, true));
}
private Collection<SourceIdentifier> stripMissingSource(final Collection<SourceIdentifier> requiredSources, final SourceIdentifier sIdToRemove) {
private final Multimap<QName, NotificationDefinition> mappedNotifications;
private final DomToNormalizedNodeParserFactory parserFactory;
- public NetconfMessageTransformer(final SchemaContext schemaContext) {
+ public NetconfMessageTransformer(final SchemaContext schemaContext, final boolean strictParsing) {
this.counter = new MessageCounter();
this.schemaContext = schemaContext;
- parserFactory = DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, schemaContext);
+ parserFactory = DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, schemaContext, strictParsing);
mappedRpcs = Maps.uniqueIndex(schemaContext.getOperations(), QNAME_FUNCTION);
mappedNotifications = Multimaps.index(schemaContext.getNotifications(), QNAME_NOREV_FUNCTION);
final DataSchemaNode schemasNode = ((ContainerSchemaNode) NetconfDevice.INIT_SCHEMA_CTX.getDataChildByName("netconf-state")).getDataChildByName("schemas");
final Document schemasXml = XmlUtil.readXmlToDocument(getClass().getResourceAsStream("/netconf-state.schemas.payload.xml"));
- final ToNormalizedNodeParser<Element, ContainerNode, ContainerSchemaNode> containerNodeParser = DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, NetconfDevice.INIT_SCHEMA_CTX).getContainerNodeParser();
+ final ToNormalizedNodeParser<Element, ContainerNode, ContainerSchemaNode> containerNodeParser = DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, NetconfDevice.INIT_SCHEMA_CTX, false).getContainerNodeParser();
final ContainerNode compositeNodeSchemas = containerNodeParser.parse(Collections.singleton(schemasXml.getDocumentElement()), (ContainerSchemaNode) schemasNode);
final NetconfStateSchemas schemas = NetconfStateSchemas.create(new RemoteDeviceId("device", new InetSocketAddress(99)), compositeNodeSchemas);
public void setup() throws Exception {
final SchemaContext schemaContext = getNotificationSchemaContext(getClass());
- messageTransformer = new NetconfMessageTransformer(schemaContext);
+ messageTransformer = new NetconfMessageTransformer(schemaContext, true);
final DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
factory.setNamespaceAware(true);
cfgCtx = parser.resolveSchemaContext(Sets.union(configModules, notifModules));
assertNotNull(cfgCtx);
- messageTransformer = new NetconfMessageTransformer(cfgCtx);
+ messageTransformer = new NetconfMessageTransformer(cfgCtx, true);
}
private LeafNode<Object> buildLeaf(final QName running, final Object value) {
}
private NetconfMessageTransformer getTransformer(final SchemaContext schema) {
- return new NetconfMessageTransformer(schema);
+ return new NetconfMessageTransformer(schema, true);
}
@Test
import org.opendaylight.controller.sal.restconf.impl.RestconfDocumentedException;
import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorTag;
import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorType;
+import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
final JsonReader reader = new JsonReader(new InputStreamReader(entityStream));
jsonParser.parse(reader);
- final NormalizedNode<?, ?> partialResult = resultHolder.getResult();
+ NormalizedNode<?, ?> partialResult = resultHolder.getResult();
final NormalizedNode<?, ?> result;
- if(partialResult instanceof MapNode) {
+
+ // unwrap result from augmentation and choice nodes on PUT
+ if (!isPost()) {
+ while (partialResult instanceof AugmentationNode || partialResult instanceof ChoiceNode) {
+ final Object childNode = ((DataContainerNode) partialResult).getValue().iterator().next();
+ partialResult = (NormalizedNode<?, ?>) childNode;
+ }
+ }
+
+ if (partialResult instanceof MapNode) {
result = Iterables.getOnlyElement(((MapNode) partialResult).getValue());
} else {
result = partialResult;
import java.io.InputStream;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XmlUtils;
import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
+import org.opendaylight.yangtools.yang.model.api.AugmentationSchema;
+import org.opendaylight.yangtools.yang.model.api.AugmentationTarget;
+import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
+import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
final List<Element> elements = Collections.singletonList(doc.getDocumentElement());
final SchemaNode schemaNodeContext = pathContext.getSchemaNode();
- DataSchemaNode schemaNode = null;
+ DataSchemaNode schemaNode;
if (schemaNodeContext instanceof RpcDefinition) {
schemaNode = ((RpcDefinition) schemaNodeContext).getInput();
} else if (schemaNodeContext instanceof DataSchemaNode) {
final String docRootElm = doc.getDocumentElement().getLocalName();
final String schemaNodeName = pathContext.getSchemaNode().getQName().getLocalName();
+ // FIXME the factory instance should be cached if the schema context is the same
+ final DomToNormalizedNodeParserFactory parserFactory =
+ DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, pathContext.getSchemaContext());
+
if (!schemaNodeName.equalsIgnoreCase(docRootElm)) {
- final Collection<DataSchemaNode> children = ((DataNodeContainer) schemaNode).getChildNodes();
- for (final DataSchemaNode child : children) {
- if (child.getQName().getLocalName().equalsIgnoreCase(docRootElm)) {
- schemaNode = child;
- break;
+ final DataSchemaNode foundSchemaNode = findSchemaNodeOrParentChoiceByName(schemaNode, docRootElm);
+ if (foundSchemaNode != null) {
+ if (schemaNode instanceof AugmentationTarget) {
+ final AugmentationSchema augmentSchemaNode = findCorrespondingAugment(schemaNode, foundSchemaNode);
+ if (augmentSchemaNode != null) {
+ return parserFactory.getAugmentationNodeParser().parse(elements, augmentSchemaNode);
+ }
}
+ schemaNode = foundSchemaNode;
}
}
- // FIXME the factory instance should be cached if the schema context is the same
- final DomToNormalizedNodeParserFactory parserFactory =
- DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, pathContext.getSchemaContext());
+ NormalizedNode<?, ?> parsed = null;
if(schemaNode instanceof ContainerSchemaNode) {
return parserFactory.getContainerNodeParser().parse(Collections.singletonList(doc.getDocumentElement()), (ContainerSchemaNode) schemaNode);
} else if(schemaNode instanceof ListSchemaNode) {
final ListSchemaNode casted = (ListSchemaNode) schemaNode;
return parserFactory.getMapEntryNodeParser().parse(elements, casted);
- } // FIXME : add another DataSchemaNode extensions e.g. LeafSchemaNode
+ } else if (schemaNode instanceof ChoiceSchemaNode) {
+ final ChoiceSchemaNode casted = (ChoiceSchemaNode) schemaNode;
+ return parserFactory.getChoiceNodeParser().parse(elements, casted);
+ }
+ // FIXME : add another DataSchemaNode extensions e.g. LeafSchemaNode
+
+ return parsed;
+ }
+
+ private static DataSchemaNode findSchemaNodeOrParentChoiceByName(DataSchemaNode schemaNode, String elementName) {
+ final ArrayList<ChoiceSchemaNode> choiceSchemaNodes = new ArrayList<>();
+ final Collection<DataSchemaNode> children = ((DataNodeContainer) schemaNode).getChildNodes();
+ for (final DataSchemaNode child : children) {
+ if (child instanceof ChoiceSchemaNode) {
+ choiceSchemaNodes.add((ChoiceSchemaNode) child);
+ } else if (child.getQName().getLocalName().equalsIgnoreCase(elementName)) {
+ return child;
+ }
+ }
+
+ for (final ChoiceSchemaNode choiceNode : choiceSchemaNodes) {
+ for (final ChoiceCaseNode caseNode : choiceNode.getCases()) {
+ final DataSchemaNode resultFromRecursion = findSchemaNodeOrParentChoiceByName(caseNode, elementName);
+ if (resultFromRecursion != null) {
+ // this returns top choice node in which child element is found
+ return choiceNode;
+ }
+ }
+ }
+ return null;
+ }
+
+ private static AugmentationSchema findCorrespondingAugment(final DataSchemaNode parent, final DataSchemaNode child) {
+ if (parent instanceof AugmentationTarget && !((parent instanceof ChoiceCaseNode) || (parent instanceof ChoiceSchemaNode))) {
+ for (AugmentationSchema augmentation : ((AugmentationTarget) parent).getAvailableAugmentations()) {
+ DataSchemaNode childInAugmentation = augmentation.getDataChildByName(child.getQName());
+ if (childInAugmentation != null) {
+ return augmentation;
+ }
+ }
+ }
return null;
}
}
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.ArrayList;
import javax.ws.rs.core.Response.Status;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
import org.opendaylight.controller.md.sal.dom.api.DOMDataChangeListener;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// PUT configuration
public CheckedFuture<Void, TransactionCommitFailedException> commitConfigurationDataPut(
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
+ final SchemaContext globalSchema, final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
checkPreconditions();
- final DataNormalizationOperation<?> rootOp = ControllerContext.getInstance().getRootOperation();
- return putDataViaTransaction(domDataBroker.newReadWriteTransaction(), CONFIGURATION, path, payload, rootOp);
+ return putDataViaTransaction(domDataBroker.newReadWriteTransaction(), CONFIGURATION, path, payload, globalSchema);
}
public CheckedFuture<Void, TransactionCommitFailedException> commitConfigurationDataPut(
final DOMMountPoint mountPoint, final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
final Optional<DOMDataBroker> domDataBrokerService = mountPoint.getService(DOMDataBroker.class);
if (domDataBrokerService.isPresent()) {
- final DataNormalizationOperation<?> rootOp = new DataNormalizer(mountPoint.getSchemaContext()).getRootOperation();
return putDataViaTransaction(domDataBrokerService.get().newReadWriteTransaction(), CONFIGURATION, path,
- payload, rootOp);
+ payload, mountPoint.getSchemaContext());
}
throw new RestconfDocumentedException("DOM data broker service isn't available for mount point.");
}
// POST configuration
public CheckedFuture<Void, TransactionCommitFailedException> commitConfigurationDataPost(
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
+ final SchemaContext globalSchema, final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
checkPreconditions();
- final DataNormalizationOperation<?> rootOp = ControllerContext.getInstance().getRootOperation();
- return postDataViaTransaction(domDataBroker.newReadWriteTransaction(), CONFIGURATION, path, payload, rootOp);
+ return postDataViaTransaction(domDataBroker.newReadWriteTransaction(), CONFIGURATION, path, payload, globalSchema);
}
public CheckedFuture<Void, TransactionCommitFailedException> commitConfigurationDataPost(
final DOMMountPoint mountPoint, final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
final Optional<DOMDataBroker> domDataBrokerService = mountPoint.getService(DOMDataBroker.class);
if (domDataBrokerService.isPresent()) {
- final DataNormalizationOperation<?> rootOp = new DataNormalizer(mountPoint.getSchemaContext()).getRootOperation();
return postDataViaTransaction(domDataBrokerService.get().newReadWriteTransaction(), CONFIGURATION, path,
- payload, rootOp);
+ payload, mountPoint.getSchemaContext());
}
throw new RestconfDocumentedException("DOM data broker service isn't available for mount point.");
}
private CheckedFuture<Void, TransactionCommitFailedException> postDataViaTransaction(
final DOMDataReadWriteTransaction rWTransaction, final LogicalDatastoreType datastore,
- final YangInstanceIdentifier parentPath, final NormalizedNode<?, ?> payload, final DataNormalizationOperation<?> root) {
+ final YangInstanceIdentifier parentPath, final NormalizedNode<?, ?> payload, final SchemaContext schemaContext) {
// FIXME: This is doing correct post for container and list children
// not sure if this will work for choice case
final YangInstanceIdentifier path;
LOG.trace("It wasn't possible to get data loaded from datastore at path " + path);
}
- ensureParentsByMerge(datastore, path, rWTransaction, root);
+ ensureParentsByMerge(datastore, path, rWTransaction, schemaContext);
rWTransaction.merge(datastore, path, payload);
LOG.trace("Post " + datastore.name() + " via Restconf: {}", path);
return rWTransaction.submit();
private CheckedFuture<Void, TransactionCommitFailedException> putDataViaTransaction(
final DOMDataReadWriteTransaction writeTransaction, final LogicalDatastoreType datastore,
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload, final DataNormalizationOperation<?> root) {
+ final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload, final SchemaContext schemaContext) {
LOG.trace("Put " + datastore.name() + " via Restconf: {}", path);
- ensureParentsByMerge(datastore, path, writeTransaction, root);
+ ensureParentsByMerge(datastore, path, writeTransaction, schemaContext);
writeTransaction.put(datastore, path, payload);
return writeTransaction.submit();
}
this.domDataBroker = domDataBroker;
}
- private final void ensureParentsByMerge(final LogicalDatastoreType store,
- final YangInstanceIdentifier normalizedPath, final DOMDataReadWriteTransaction rwTx,
- final DataNormalizationOperation<?> root) {
- final List<PathArgument> currentArguments = new ArrayList<>();
- final Iterator<PathArgument> iterator = normalizedPath.getPathArguments().iterator();
- DataNormalizationOperation<?> currentOp = root;
- while (iterator.hasNext()) {
- final PathArgument currentArg = iterator.next();
- try {
- currentOp = currentOp.getChild(currentArg);
- } catch (final DataNormalizationException e) {
- rwTx.cancel();
- throw new IllegalArgumentException(
- String.format("Invalid child encountered in path %s", normalizedPath), e);
- }
- currentArguments.add(currentArg);
- final YangInstanceIdentifier currentPath = YangInstanceIdentifier.create(currentArguments);
+ private void ensureParentsByMerge(final LogicalDatastoreType store,
+ final YangInstanceIdentifier normalizedPath, final DOMDataReadWriteTransaction rwTx, final SchemaContext schemaContext) {
+ final List<PathArgument> normalizedPathWithoutChildArgs = new ArrayList<>();
+ YangInstanceIdentifier rootNormalizedPath = null;
- final Boolean exists;
+ final Iterator<PathArgument> it = normalizedPath.getPathArguments().iterator();
- try {
-
- final CheckedFuture<Boolean, ReadFailedException> future = rwTx.exists(store, currentPath);
- exists = future.checkedGet();
- } catch (final ReadFailedException e) {
- LOG.error("Failed to read pre-existing data from store {} path {}", store, currentPath, e);
- rwTx.cancel();
- throw new IllegalStateException("Failed to read pre-existing data", e);
+ while(it.hasNext()) {
+ final PathArgument pathArgument = it.next();
+ if(rootNormalizedPath == null) {
+ rootNormalizedPath = YangInstanceIdentifier.create(pathArgument);
}
- if (!exists && iterator.hasNext()) {
- rwTx.merge(store, currentPath, currentOp.createDefault(currentArg));
+ // Skip last element, its not a parent
+ if(it.hasNext()) {
+ normalizedPathWithoutChildArgs.add(pathArgument);
}
}
+
+ // No parent structure involved, no need to ensure parents
+ if(normalizedPathWithoutChildArgs.isEmpty()) {
+ return;
+ }
+
+ Preconditions.checkArgument(rootNormalizedPath != null, "Empty path received");
+
+ final NormalizedNode<?, ?> parentStructure =
+ ImmutableNodes.fromInstanceId(schemaContext, YangInstanceIdentifier.create(normalizedPathWithoutChildArgs));
+ rwTx.merge(store, rootNormalizedPath, parentStructure);
}
}
if (mountPoint != null) {
broker.commitConfigurationDataPut(mountPoint, normalizedII, payload.getData()).checkedGet();
} else {
- broker.commitConfigurationDataPut(normalizedII, payload.getData()).checkedGet();
+ broker.commitConfigurationDataPut(controllerContext.getGlobalSchema(), normalizedII, payload.getData()).checkedGet();
}
break;
throw new RestconfDocumentedException("Input is required.", ErrorType.PROTOCOL, ErrorTag.MALFORMED_MESSAGE);
}
- final URI payloadNS = payload.getData().getNodeType().getNamespace();
- if (payloadNS == null) {
- throw new RestconfDocumentedException(
- "Data has bad format. Root element node must have namespace (XML format) or module name(JSON format)",
- ErrorType.PROTOCOL, ErrorTag.UNKNOWN_NAMESPACE);
- }
+ // FIXME: move this to parsing stage (we can have augmentation nodes here which do not have namespace)
+// final URI payloadNS = payload.getData().getNodeType().getNamespace();
+// if (payloadNS == null) {
+// throw new RestconfDocumentedException(
+// "Data has bad format. Root element node must have namespace (XML format) or module name(JSON format)",
+// ErrorType.PROTOCOL, ErrorTag.UNKNOWN_NAMESPACE);
+// }
final DOMMountPoint mountPoint = payload.getInstanceIdentifierContext().getMountPoint();
final InstanceIdentifierContext<?> iiWithData = payload.getInstanceIdentifierContext();
if (mountPoint != null) {
broker.commitConfigurationDataPost(mountPoint, normalizedII, payload.getData()).checkedGet();
} else {
- broker.commitConfigurationDataPost(normalizedII, payload.getData()).checkedGet();
+ broker.commitConfigurationDataPost(controllerContext.getGlobalSchema(), normalizedII, payload.getData()).checkedGet();
}
} catch(final RestconfDocumentedException e) {
throw e;
restconfImpl = RestconfImpl.getInstance();
restconfImpl.setBroker(brokerFacade);
restconfImpl.setControllerContext(controllerContext);
- when(brokerFacade.commitConfigurationDataPut(any(YangInstanceIdentifier.class), any(NormalizedNode.class)))
+ when(brokerFacade.commitConfigurationDataPut(any(SchemaContext.class), any(YangInstanceIdentifier.class), any(NormalizedNode.class)))
.thenReturn(mock(CheckedFuture.class));
}
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
/**
when(wTransaction.submit()).thenReturn(expFuture);
- final Future<Void> actualFuture = brokerFacade.commitConfigurationDataPut(instanceID, dummyNode);
+ final Future<Void> actualFuture = brokerFacade.commitConfigurationDataPut((SchemaContext)null, instanceID, dummyNode);
assertSame("commitConfigurationDataPut", expFuture, actualFuture);
when(rwTransaction.submit()).thenReturn(expFuture);
final CheckedFuture<Void, TransactionCommitFailedException> actualFuture = brokerFacade.commitConfigurationDataPost(
- YangInstanceIdentifier.builder().build(), dummyNode);
+ (SchemaContext)null, YangInstanceIdentifier.builder().build(), dummyNode);
assertSame("commitConfigurationDataPost", expFuture, actualFuture);
when(rwTransaction.read(eq(LogicalDatastoreType.CONFIGURATION), any(YangInstanceIdentifier.class))).thenReturn(
dummyNodeInFuture);
try {
- brokerFacade.commitConfigurationDataPost(instanceID, dummyNode);
+ // Schema context is only necessary for ensuring parent structure
+ brokerFacade.commitConfigurationDataPost((SchemaContext)null, instanceID, dummyNode);
} catch (final RestconfDocumentedException e) {
assertEquals("getErrorTag", RestconfError.ErrorTag.DATA_EXISTS, e.getErrors().get(0).getErrorTag());
throw e;
final RpcResult<TransactionStatus> rpcResult = new DummyRpcResult.Builder<TransactionStatus>().result(
TransactionStatus.COMMITED).build();
- when(brokerFacade.commitConfigurationDataPost(any(YangInstanceIdentifier.class), any(NormalizedNode.class)))
+ when(brokerFacade.commitConfigurationDataPost((SchemaContext)null, any(YangInstanceIdentifier.class), any(NormalizedNode.class)))
.thenReturn(mock(CheckedFuture.class));
final ArgumentCaptor<YangInstanceIdentifier> instanceIdCaptor = ArgumentCaptor.forClass(YangInstanceIdentifier.class);
// FIXME : NEVER test a nr. of call some service in complex test suite
// verify(brokerFacade, times(2))
verify(brokerFacade, times(1))
- .commitConfigurationDataPost(instanceIdCaptor.capture(), compNodeCaptor.capture());
+ .commitConfigurationDataPost((SchemaContext)null, instanceIdCaptor.capture(), compNodeCaptor.capture());
// identifier = "[(urn:ietf:params:xml:ns:yang:test-interface?revision=2014-07-01)interfaces, (urn:ietf:params:xml:ns:yang:test-interface?revision=2014-07-01)block]";
assertEquals(identifier, ImmutableList.copyOf(instanceIdCaptor.getValue().getPathArguments()).toString());
}
public void createConfigurationDataNullTest() throws UnsupportedEncodingException {
initMocking();
- when(brokerFacade.commitConfigurationDataPost(any(YangInstanceIdentifier.class),any(NormalizedNode.class)))
+ when(brokerFacade.commitConfigurationDataPost(any(SchemaContext.class), any(YangInstanceIdentifier.class),any(NormalizedNode.class)))
.thenReturn(Futures.<Void, TransactionCommitFailedException>immediateCheckedFuture(null));
//FIXME : find who is set schemaContext
doThrow(OptimisticLockFailedException.class).
when(brokerFacade).commitConfigurationDataPut(
- any(YangInstanceIdentifier.class), any(NormalizedNode.class));
+ any(SchemaContext.class), any(YangInstanceIdentifier.class), any(NormalizedNode.class));
assertEquals(500, put(uri, MediaType.APPLICATION_XML, xmlData));
doThrow(OptimisticLockFailedException.class).doReturn(mock(CheckedFuture.class)).
when(brokerFacade).commitConfigurationDataPut(
- any(YangInstanceIdentifier.class), any(NormalizedNode.class));
+ any(SchemaContext.class), any(YangInstanceIdentifier.class), any(NormalizedNode.class));
assertEquals(200, put(uri, MediaType.APPLICATION_XML, xmlData));
}
doThrow(TransactionCommitFailedException.class).
when(brokerFacade).commitConfigurationDataPut(
- any(YangInstanceIdentifier.class), any(NormalizedNode.class));
+ (SchemaContext)null, any(YangInstanceIdentifier.class), any(NormalizedNode.class));
assertEquals(500, put(uri, MediaType.APPLICATION_XML, xmlData));
}
private void mockCommitConfigurationDataPutMethod(final boolean noErrors) {
if (noErrors) {
doReturn(mock(CheckedFuture.class)).when(brokerFacade).commitConfigurationDataPut(
- any(YangInstanceIdentifier.class), any(NormalizedNode.class));
+ any(SchemaContext.class), any(YangInstanceIdentifier.class), any(NormalizedNode.class));
} else {
doThrow(RestconfDocumentedException.class).when(brokerFacade).commitConfigurationDataPut(
- any(YangInstanceIdentifier.class), any(NormalizedNode.class));
+ any(SchemaContext.class), any(YangInstanceIdentifier.class), any(NormalizedNode.class));
}
}
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
-import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
-import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
public class EditConfig extends AbstractSingletonNetconfOperation {
LOG.debug("DataNode from module is not ContainerSchemaNode nor ListSchemaNode, aborting..");
return Optional.absent();
}
-
}
private Datastore extractTargetParameter(final XmlElement operationElement) throws NetconfDocumentedException {
- final XmlElement targetChildNode;
- try {
- final XmlElement targetElement = operationElement.getOnlyChildElementWithSameNamespace(TARGET_KEY);
- targetChildNode = targetElement.getOnlyChildElementWithSameNamespace();
- } catch (final MissingNameSpaceException | UnexpectedNamespaceException e) {
- LOG.trace("Can't get only child element with same namespace", e);
- throw NetconfDocumentedException.wrap(e);
+ final NodeList elementsByTagName = operationElement.getDomElement().getElementsByTagName(TARGET_KEY);
+ // Direct lookup instead of using XmlElement class due to performance
+ if (elementsByTagName.getLength() == 0) {
+ throw new NetconfDocumentedException("Missing target element", ErrorType.rpc, ErrorTag.missing_attribute, ErrorSeverity.error);
+ } else if (elementsByTagName.getLength() > 1) {
+ throw new NetconfDocumentedException("Multiple target elements", ErrorType.rpc, ErrorTag.unknown_attribute, ErrorSeverity.error);
+ } else {
+ final XmlElement targetChildNode = XmlElement.fromDomElement((Element) elementsByTagName.item(0)).getOnlyChildElement();
+ return Datastore.valueOf(targetChildNode.getName());
}
-
- return Datastore.valueOf(targetChildNode.getName());
}
- private ModifyAction getDefaultOperation(final XmlElement operationElement) throws NetconfDocumentedException{
- try {
- return ModifyAction.fromXmlValue(getElement(operationElement, DEFAULT_OPERATION_KEY).getTextContent());
- } catch (NetconfDocumentedException e) {
- if (e.getErrorType() == ErrorType.protocol
- && e.getErrorSeverity() == ErrorSeverity.error
- && e.getErrorTag() == ErrorTag.missing_element) {
- return ModifyAction.MERGE;
- }
- else {
- throw e;
- }
+ private ModifyAction getDefaultOperation(final XmlElement operationElement) throws NetconfDocumentedException {
+ final NodeList elementsByTagName = operationElement.getDomElement().getElementsByTagName(DEFAULT_OPERATION_KEY);
+ if(elementsByTagName.getLength() == 0) {
+ return ModifyAction.MERGE;
+ } else if(elementsByTagName.getLength() > 1) {
+ throw new NetconfDocumentedException("Multiple " + DEFAULT_OPERATION_KEY + " elements",
+ ErrorType.rpc, ErrorTag.unknown_attribute, ErrorSeverity.error);
+ } else {
+ return ModifyAction.fromXmlValue(elementsByTagName.item(0).getTextContent());
}
+
}
private XmlElement getElement(final XmlElement operationElement, String elementName) throws NetconfDocumentedException {
package org.opendaylight.controller.netconf.cli.reader.custom;
import static org.opendaylight.controller.netconf.cli.io.IOUtil.isSkipInput;
-
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
filterPartsQNames.add(qName);
}
- List<NormalizedNode<?, ?>> previous = readInnerNode(rawValue);
+ List<? extends NormalizedNode<?, ?>> previous = readInnerNode(rawValue);
for (final QName qName : Lists.reverse(filterPartsQNames).subList(1, filterPartsQNames.size())) {
previous = Collections.<NormalizedNode<?, ?>>singletonList(
);
}
- final DataContainerChild<?, ?> newNode = previous == null ? null
- : ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(schemaNode.getQName()))
- .withValue((Collection) previous).build();
+ if (previous == null) {
+ return Collections.singletonList(null);
+ }
+
+ final DataContainerNodeAttrBuilder<NodeIdentifier, ContainerNode> builder = ImmutableContainerNodeBuilder.create();
+ builder.withNodeIdentifier(new NodeIdentifier(schemaNode.getQName()));
+ builder.withValue((Collection<DataContainerChild<?, ?>>) previous);
- return Collections.<NormalizedNode<?, ?>> singletonList(newNode);
+ return Collections.<NormalizedNode<?, ?>> singletonList(builder.build());
}
private List<NormalizedNode<?, ?>> readInnerNode(final String pathString) throws ReadingException {
this.timer = timer;
}
+ protected Timer getTimer() {
+ return timer;
+ }
+
@Override
public Future<NetconfClientSession> createClient(final NetconfClientConfiguration clientConfiguration) {
switch (clientConfiguration.getProtocol()) {
public class NetconfClientSessionNegotiatorFactory implements SessionNegotiatorFactory<NetconfMessage, NetconfClientSession, NetconfClientSessionListener> {
- public static final Set<String> CLIENT_CAPABILITIES = ImmutableSet.of(
+ public static final Set<String> EXI_CLIENT_CAPABILITIES = ImmutableSet.of(
XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0,
XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1,
XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_CAPABILITY_EXI_1_0);
+ public static final Set<String> LEGACY_EXI_CLIENT_CAPABILITIES = ImmutableSet.of(
+ XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_CAPABILITY_EXI_1_0);
+
+ public static final Set<String> DEFAULT_CLIENT_CAPABILITIES = ImmutableSet.of(
+ XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1);
+
+ public static final Set<String> LEGACY_FRAMING_CLIENT_CAPABILITIES = ImmutableSet.of(
+ XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0);
+
private static final Logger LOG = LoggerFactory.getLogger(NetconfClientSessionNegotiatorFactory.class);
private static final String START_EXI_MESSAGE_ID = "default-start-exi";
private static final EXIOptions DEFAULT_OPTIONS;
DEFAULT_OPTIONS = opts;
}
+ private final Set<String> clientCapabilities;
+
public NetconfClientSessionNegotiatorFactory(final Timer timer,
final Optional<NetconfHelloMessageAdditionalHeader> additionalHeader,
final long connectionTimeoutMillis) {
this(timer, additionalHeader, connectionTimeoutMillis, DEFAULT_OPTIONS);
}
+ public NetconfClientSessionNegotiatorFactory(final Timer timer,
+ final Optional<NetconfHelloMessageAdditionalHeader> additionalHeader,
+ final long connectionTimeoutMillis, final Set<String> capabilities) {
+ this(timer, additionalHeader, connectionTimeoutMillis, DEFAULT_OPTIONS, capabilities);
+
+ }
+
public NetconfClientSessionNegotiatorFactory(final Timer timer,
final Optional<NetconfHelloMessageAdditionalHeader> additionalHeader,
final long connectionTimeoutMillis, final EXIOptions exiOptions) {
+ this(timer, additionalHeader, connectionTimeoutMillis, exiOptions, EXI_CLIENT_CAPABILITIES);
+ }
+
+ public NetconfClientSessionNegotiatorFactory(final Timer timer,
+ final Optional<NetconfHelloMessageAdditionalHeader> additionalHeader,
+ final long connectionTimeoutMillis, final EXIOptions exiOptions, final Set<String> capabilities) {
this.timer = Preconditions.checkNotNull(timer);
this.additionalHeader = additionalHeader;
this.connectionTimeoutMillis = connectionTimeoutMillis;
this.options = exiOptions;
+ this.clientCapabilities = capabilities;
}
@Override
NetconfMessage startExiMessage = NetconfStartExiMessage.create(options, START_EXI_MESSAGE_ID);
NetconfHelloMessage helloMessage = null;
try {
- helloMessage = NetconfHelloMessage.createClientHello(CLIENT_CAPABILITIES, additionalHeader);
+ helloMessage = NetconfHelloMessage.createClientHello(clientCapabilities, additionalHeader);
} catch (NetconfDocumentedException e) {
- LOG.error("Unable to create client hello message with capabilities {} and additional handler {}",CLIENT_CAPABILITIES,additionalHeader);
+ LOG.error("Unable to create client hello message with capabilities {} and additional handler {}", clientCapabilities,additionalHeader);
throw new IllegalStateException(e);
}
private static final Logger LOG = LoggerFactory.getLogger(SubtreeFilter.class);
static Document applySubtreeFilter(Document requestDocument, Document rpcReply) throws NetconfDocumentedException {
- // FIXME: rpcReply document must be reread otherwise some nodes do not inherit namespaces. (services/service)
- try {
- rpcReply = XmlUtil.readXmlToDocument(XmlUtil.toString(rpcReply, true));
- } catch (SAXException | IOException e) {
- LOG.error("Cannot transform document", e);
- throw new NetconfDocumentedException("Cannot transform document");
- }
-
OperationNameAndNamespace operationNameAndNamespace = new OperationNameAndNamespace(requestDocument);
if (XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0.equals(operationNameAndNamespace.getNamespace()) &&
XmlNetconfConstants.GET.equals(operationNameAndNamespace.getOperationName()) ||
// not implement filtering.
Optional<XmlElement> maybeFilter = operationNameAndNamespace.getOperationElement().getOnlyChildElementOptionally(
XmlNetconfConstants.FILTER, XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
- if (maybeFilter.isPresent() && (
- "subtree".equals(maybeFilter.get().getAttribute("type"))||
- "subtree".equals(maybeFilter.get().getAttribute("type", XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0)))
- ) {
+ if (!maybeFilter.isPresent()) {
+ return rpcReply;
+ }
+ // FIXME: rpcReply document must be reread otherwise some nodes do not inherit namespaces. (services/service)
+ try {
+ rpcReply = XmlUtil.readXmlToDocument(XmlUtil.toString(rpcReply, true));
+ } catch (SAXException | IOException e) {
+ LOG.error("Cannot transform document", e);
+ throw new NetconfDocumentedException("Cannot transform document" + e);
+ }
+ XmlElement filter = maybeFilter.get();
+ if ("subtree".equals(filter.getAttribute("type"))||
+ "subtree".equals(filter.getAttribute("type", XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0))) {
// do
return filtered(maybeFilter.get(), rpcReply);
}
}
+
return rpcReply; // return identical document
}
--- /dev/null
+<modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+<module>
+<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl">prefix:threadfactory-naming</type>
+<name>name{MSG_ID}</name>
+<name-prefix xmlns="urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl">remote-connector-processing-executor</name-prefix>
+</module>
+</modules>
\ No newline at end of file
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-connector-config</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-netconf-connector</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>logback-config</artifactId>
<shadedClassifierName>executable</shadedClassifierName>
</configuration>
</execution>
+
+ <execution>
+ <id>stress-client</id>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <shadedArtifactId>stress-client</shadedArtifactId>
+ <filters>
+ <filter>
+ <artifact>*:*</artifact>
+ <excludes>
+ <exclude>META-INF/*.SF</exclude>
+ <exclude>META-INF/*.DSA</exclude>
+ <exclude>META-INF/*.RSA</exclude>
+ </excludes>
+ </filter>
+ </filters>
+ <transformers>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
+ <mainClass>org.opendaylight.controller.netconf.test.tool.client.stress.StressClient</mainClass>
+ </transformer>
+ </transformers>
+ <shadedArtifactAttached>true</shadedArtifactAttached>
+ <shadedClassifierName>executable</shadedClassifierName>
+ </configuration>
+ </execution>
</executions>
</plugin>
</plugins>
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.NavigableMap;
import java.util.Set;
-import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
return input.getKey().getAST();
}
});
- final Map<String, TreeMap<Date, URI>> namespaceContext = BuilderUtils.createYangNamespaceContext(
+ final Map<String, NavigableMap<Date, URI>> namespaceContext = BuilderUtils.createYangNamespaceContext(
asts.values(), Optional.<SchemaContext>absent());
final ParseTreeWalker walker = new ParseTreeWalker();
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class AsyncExecutionStrategy implements ExecutionStrategy {
+ private static final Logger LOG = LoggerFactory.getLogger(AsyncExecutionStrategy.class);
+
+ private final Parameters params;
+ private final List<NetconfMessage> preparedMessages;
+ private final NetconfDeviceCommunicator sessionListener;
+ private final List<Integer> editBatches;
+
+ public AsyncExecutionStrategy(final Parameters params, final List<NetconfMessage> editConfigMsgs, final NetconfDeviceCommunicator sessionListener) {
+ this.params = params;
+ this.preparedMessages = editConfigMsgs;
+ this.sessionListener = sessionListener;
+ this.editBatches = countEditBatchSizes(params);
+ }
+
+ private static List<Integer> countEditBatchSizes(final Parameters params) {
+ final List<Integer> editBatches = Lists.newArrayList();
+ if (params.editBatchSize != params.editCount) {
+ final int fullBatches = params.editCount / params.editBatchSize;
+ for (int i = 0; i < fullBatches; i++) {
+ editBatches.add(params.editBatchSize);
+ }
+
+ if (params.editCount % params.editBatchSize != 0) {
+ editBatches.add(params.editCount % params.editBatchSize);
+ }
+ } else {
+ editBatches.add(params.editBatchSize);
+ }
+ return editBatches;
+ }
+
+ @Override
+ public void invoke() {
+ final AtomicInteger responseCounter = new AtomicInteger(0);
+ final List<ListenableFuture<RpcResult<NetconfMessage>>> futures = Lists.newArrayList();
+
+ int batchI = 0;
+ for (final Integer editBatch : editBatches) {
+ for (int i = 0; i < editBatch; i++) {
+ final int msgId = i + (batchI * params.editBatchSize);
+ final NetconfMessage msg = preparedMessages.get(msgId);
+ LOG.debug("Sending message {}", msgId);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Sending message {}", XmlUtil.toString(msg.getDocument()));
+ }
+ final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture =
+ sessionListener.sendRequest(msg, StressClient.EDIT_QNAME);
+ futures.add(netconfMessageFuture);
+ }
+ batchI++;
+ LOG.info("Batch {} with size {} sent. Committing", batchI, editBatch);
+ futures.add(sessionListener.sendRequest(StressClient.COMMIT_MSG, StressClient.COMMIT_QNAME));
+ }
+
+ LOG.info("All batches sent. Waiting for responses");
+ // Wait for every future
+ for (final ListenableFuture<RpcResult<NetconfMessage>> future : futures) {
+ try {
+ final RpcResult<NetconfMessage> netconfMessageRpcResult = future.get(params.msgTimeout, TimeUnit.SECONDS);
+ if(netconfMessageRpcResult.isSuccessful()) {
+ responseCounter.incrementAndGet();
+ LOG.debug("Received response {}", responseCounter.get());
+ } else {
+ LOG.warn("Request failed {}", netconfMessageRpcResult);
+ }
+ } catch (final InterruptedException e) {
+ throw new RuntimeException(e);
+ } catch (final ExecutionException | TimeoutException e) {
+ throw new RuntimeException("Request not finished", e);
+ }
+ }
+
+ Preconditions.checkState(responseCounter.get() == params.editCount + editBatches.size(), "Not all responses were received, only %s from %s", responseCounter.get(), params.editCount + editBatches.size());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import io.netty.channel.EventLoopGroup;
+import io.netty.util.Timer;
+import java.util.Set;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionNegotiatorFactory;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+
+public class ConfigurableClientDispatcher extends NetconfClientDispatcherImpl {
+
+ private final Set<String> capabilities;
+
+ private ConfigurableClientDispatcher(final EventLoopGroup bossGroup, final EventLoopGroup workerGroup, final Timer timer, final Set<String> capabilities) {
+ super(bossGroup, workerGroup, timer);
+ this.capabilities = capabilities;
+ }
+
+ /**
+ * EXI + chunked framing
+ */
+ public static ConfigurableClientDispatcher createChunkedExi(final EventLoopGroup bossGroup, final EventLoopGroup workerGroup, final Timer timer) {
+ return new ConfigurableClientDispatcher(bossGroup, workerGroup, timer, NetconfClientSessionNegotiatorFactory.EXI_CLIENT_CAPABILITIES);
+ }
+
+ /**
+ * EXI + ]]>]]> framing
+ */
+ public static ConfigurableClientDispatcher createLegacyExi(final EventLoopGroup bossGroup, final EventLoopGroup workerGroup, final Timer timer) {
+ return new ConfigurableClientDispatcher(bossGroup, workerGroup, timer, NetconfClientSessionNegotiatorFactory.LEGACY_EXI_CLIENT_CAPABILITIES);
+ }
+
+ /**
+ * Chunked framing
+ */
+ public static ConfigurableClientDispatcher createChunked(final EventLoopGroup bossGroup, final EventLoopGroup workerGroup, final Timer timer) {
+ return new ConfigurableClientDispatcher(bossGroup, workerGroup, timer, NetconfClientSessionNegotiatorFactory.DEFAULT_CLIENT_CAPABILITIES);
+ }
+
+ /**
+ * ]]>]]> framing
+ */
+ public static ConfigurableClientDispatcher createLegacy(final EventLoopGroup bossGroup, final EventLoopGroup workerGroup, final Timer timer) {
+ return new ConfigurableClientDispatcher(bossGroup, workerGroup, timer, NetconfClientSessionNegotiatorFactory.LEGACY_FRAMING_CLIENT_CAPABILITIES);
+ }
+
+ @Override
+ protected NetconfClientSessionNegotiatorFactory getNegotiatorFactory(final NetconfClientConfiguration cfg) {
+ return new NetconfClientSessionNegotiatorFactory(getTimer(), cfg.getAdditionalHeader(), cfg.getConnectionTimeoutMillis(), capabilities);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+/**
+ * Created by mmarsale on 18.4.2015.
+ */
+public interface ExecutionStrategy {
+ void invoke();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import com.google.common.base.Preconditions;
+import java.io.File;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import net.sourceforge.argparse4j.ArgumentParsers;
+import net.sourceforge.argparse4j.annotation.Arg;
+import net.sourceforge.argparse4j.inf.ArgumentParser;
+
+public class Parameters {
+
+ @Arg(dest = "ip")
+ public String ip;
+
+ @Arg(dest = "port")
+ public int port;
+
+ @Arg(dest = "edit-count")
+ public int editCount;
+
+ @Arg(dest = "edit-content")
+ public File editContent;
+
+ @Arg(dest = "edit-batch-size")
+ public int editBatchSize;
+
+ @Arg(dest = "debug")
+ public boolean debug;
+
+ @Arg(dest = "legacy-framing")
+ public boolean legacyFraming;
+
+ @Arg(dest = "exi")
+ public boolean exi;
+
+ @Arg(dest = "async")
+ public boolean async;
+
+ @Arg(dest = "ssh")
+ public boolean ssh;
+
+ @Arg(dest = "msg-timeout")
+ public long msgTimeout;
+
+ static ArgumentParser getParser() {
+ final ArgumentParser parser = ArgumentParsers.newArgumentParser("netconf stress client");
+
+ parser.description("Netconf stress client");
+
+ parser.addArgument("--ip")
+ .type(String.class)
+ .setDefault("127.0.0.1")
+ .type(String.class)
+ .help("Netconf server IP")
+ .dest("ip");
+
+ parser.addArgument("--port")
+ .type(Integer.class)
+ .setDefault(2830)
+ .type(Integer.class)
+ .help("Netconf server port")
+ .dest("port");
+
+ parser.addArgument("--edits")
+ .type(Integer.class)
+ .setDefault(50000)
+ .type(Integer.class)
+ .help("Netconf edit rpcs to be sent")
+ .dest("edit-count");
+
+ parser.addArgument("--edit-content")
+ .type(File.class)
+ .setDefault(new File("edit.txt"))
+ .type(File.class)
+ .dest("edit-content");
+
+ parser.addArgument("--edit-batch-size")
+ .type(Integer.class)
+ .required(false)
+ .setDefault(-1)
+ .type(Integer.class)
+ .dest("edit-batch-size");
+
+ parser.addArgument("--debug")
+ .type(Boolean.class)
+ .setDefault(false)
+ .help("Whether to use debug log level instead of INFO")
+ .dest("debug");
+
+ parser.addArgument("--legacy-framing")
+ .type(Boolean.class)
+ .setDefault(false)
+ .dest("legacy-framing");
+
+ parser.addArgument("--exi")
+ .type(Boolean.class)
+ .setDefault(false)
+ .dest("exi");
+
+ parser.addArgument("--async-requests")
+ .type(Boolean.class)
+ .setDefault(true)
+ .dest("async");
+
+ parser.addArgument("--msg-timeout")
+ .type(Integer.class)
+ .setDefault(60)
+ .dest("msg-timeout");
+
+ parser.addArgument("--ssh")
+ .type(Boolean.class)
+ .setDefault(false)
+ .dest("ssh");
+
+ // TODO add get-config option instead of edit + commit
+ // TODO different edit config content
+
+ return parser;
+ }
+
+ void validate() {
+ Preconditions.checkArgument(port > 0, "Port =< 0");
+ Preconditions.checkArgument(editCount > 0, "Edit count =< 0");
+ if (editBatchSize == -1) {
+ editBatchSize = editCount;
+ } else {
+ Preconditions.checkArgument(editBatchSize <= editCount, "Edit count =< 0");
+ }
+
+ Preconditions.checkArgument(editContent.exists(), "Edit content file missing");
+ Preconditions.checkArgument(editContent.isDirectory() == false, "Edit content file is a dir");
+ Preconditions.checkArgument(editContent.canRead(), "Edit content file is unreadable");
+ // TODO validate
+ }
+
+ public InetSocketAddress getInetAddress() {
+ try {
+ return new InetSocketAddress(InetAddress.getByName(ip), port);
+ } catch (final UnknownHostException e) {
+ throw new IllegalArgumentException("Unknown ip", e);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import ch.qos.logback.classic.Level;
+import com.google.common.base.Charsets;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Lists;
+import com.google.common.io.Files;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.GlobalEventExecutor;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import net.sourceforge.argparse4j.inf.ArgumentParser;
+import net.sourceforge.argparse4j.inf.ArgumentParserException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSession;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.api.RemoteDevice;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.protocol.framework.NeverReconnectStrategy;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.base._1._0.rev110601.CommitInput;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.base._1._0.rev110601.EditConfigInput;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.xml.sax.SAXException;
+
+public final class StressClient {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StressClient.class);
+
+ static final QName COMMIT_QNAME = QName.create(CommitInput.QNAME, "commit");
+ public static final NetconfMessage COMMIT_MSG;
+
+ static {
+ try {
+ COMMIT_MSG = new NetconfMessage(XmlUtil.readXmlToDocument("<rpc message-id=\"commit-batch\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ " <commit/>\n" +
+ "</rpc>"));
+ } catch (SAXException | IOException e) {
+ throw new ExceptionInInitializerError(e);
+ }
+ }
+
+ static final QName EDIT_QNAME = QName.create(EditConfigInput.QNAME, "edit-config");
+ static final org.w3c.dom.Document editBlueprint;
+
+ static {
+ try {
+ editBlueprint = XmlUtil.readXmlToDocument(
+ "<rpc xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ " <edit-config xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ " <target>\n" +
+ " <candidate/>\n" +
+ " </target>\n" +
+ " <config/>\n" +
+ " </edit-config>\n" +
+ "</rpc>");
+ } catch (SAXException | IOException e) {
+ throw new ExceptionInInitializerError(e);
+ }
+ }
+
+ private static final String MSG_ID_PLACEHOLDER = "{MSG_ID}";
+ private static final String MSG_ID_PLACEHOLDER_REGEX = "\\{MSG_ID\\}";
+
+ public static void main(final String[] args) {
+ final Parameters params = parseArgs(args, Parameters.getParser());
+ params.validate();
+
+ // TODO remove
+ try {
+ Thread.sleep(10000);
+ } catch (final InterruptedException e) {
+// e.printStackTrace();
+ }
+
+ final ch.qos.logback.classic.Logger root = (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
+ root.setLevel(params.debug ? Level.DEBUG : Level.INFO);
+
+ LOG.info("Preparing messages");
+ // Prepare all msgs up front
+ final List<NetconfMessage> preparedMessages = Lists.newArrayListWithCapacity(params.editCount);
+
+ final String editContentString;
+ boolean needsModification = false;
+ try {
+ editContentString = Files.toString(params.editContent, Charsets.UTF_8);
+ if(editContentString.contains(MSG_ID_PLACEHOLDER)) {
+ needsModification = true;
+ };
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Cannot read content of " + params.editContent);
+ }
+
+ for (int i = 0; i < params.editCount; i++) {
+ final Document msg = XmlUtil.createDocumentCopy(editBlueprint);
+ msg.getDocumentElement().setAttribute("message-id", Integer.toString(i));
+ final NetconfMessage netconfMessage = new NetconfMessage(msg);
+
+ final Element editContentElement;
+ try {
+ // Insert message id where needed
+ final String specificEditContent = needsModification ?
+ editContentString.replaceAll(MSG_ID_PLACEHOLDER_REGEX, Integer.toString(i)) :
+ editContentString;
+
+ editContentElement = XmlUtil.readXmlToElement(specificEditContent);
+ final Node config = ((Element) msg.getDocumentElement().getElementsByTagName("edit-config").item(0)).
+ getElementsByTagName("config").item(0);
+ config.appendChild(msg.importNode(editContentElement, true));
+ } catch (final IOException | SAXException e) {
+ throw new IllegalArgumentException("Edit content file is unreadable", e);
+ }
+
+ preparedMessages.add(netconfMessage);
+
+ }
+
+
+ final NioEventLoopGroup nioGroup = new NioEventLoopGroup();
+ final Timer timer = new HashedWheelTimer();
+
+ final NetconfClientDispatcherImpl netconfClientDispatcher = configureClientDispatcher(params, nioGroup, timer);
+
+ final NetconfDeviceCommunicator sessionListener = getSessionListener(params.getInetAddress());
+
+ final NetconfClientConfiguration cfg = getNetconfClientConfiguration(params, sessionListener);
+
+ LOG.info("Connecting to netconf server {}:{}", params.ip, params.port);
+ final NetconfClientSession netconfClientSession;
+ try {
+ netconfClientSession = netconfClientDispatcher.createClient(cfg).get();
+ } catch (final InterruptedException e) {
+ throw new RuntimeException(e);
+ } catch (final ExecutionException e) {
+ throw new RuntimeException("Unable to connect", e);
+ }
+
+ LOG.info("Starting stress test");
+ final Stopwatch started = Stopwatch.createStarted();
+ getExecutionStrategy(params, preparedMessages, sessionListener).invoke();
+ started.stop();
+
+ LOG.info("FINISHED. Execution time: {}", started);
+ LOG.info("Requests per second: {}", (params.editCount * 1000.0 / started.elapsed(TimeUnit.MILLISECONDS)));
+
+ // Cleanup
+ netconfClientSession.close();
+ timer.stop();
+ try {
+ nioGroup.shutdownGracefully().get(20L, TimeUnit.SECONDS);
+ } catch (InterruptedException | ExecutionException | TimeoutException e) {
+ LOG.warn("Unable to close executor properly", e);
+ }
+ }
+
+ private static ExecutionStrategy getExecutionStrategy(final Parameters params, final List<NetconfMessage> preparedMessages, final NetconfDeviceCommunicator sessionListener) {
+ if(params.async) {
+ return new AsyncExecutionStrategy(params, preparedMessages, sessionListener);
+ } else {
+ return new SyncExecutionStrategy(params, preparedMessages, sessionListener);
+ }
+ }
+
+ private static NetconfClientDispatcherImpl configureClientDispatcher(final Parameters params, final NioEventLoopGroup nioGroup, final Timer timer) {
+ final NetconfClientDispatcherImpl netconfClientDispatcher;
+ if(params.exi) {
+ if(params.legacyFraming) {
+ netconfClientDispatcher= ConfigurableClientDispatcher.createLegacyExi(nioGroup, nioGroup, timer);
+ } else {
+ netconfClientDispatcher = ConfigurableClientDispatcher.createChunkedExi(nioGroup, nioGroup, timer);
+ }
+ } else {
+ if(params.legacyFraming) {
+ netconfClientDispatcher = ConfigurableClientDispatcher.createLegacy(nioGroup, nioGroup, timer);
+ } else {
+ netconfClientDispatcher = ConfigurableClientDispatcher.createChunked(nioGroup, nioGroup, timer);
+ }
+ }
+ return netconfClientDispatcher;
+ }
+
+ private static NetconfClientConfiguration getNetconfClientConfiguration(final Parameters params, final NetconfDeviceCommunicator sessionListener) {
+ final NetconfClientConfigurationBuilder netconfClientConfigurationBuilder = NetconfClientConfigurationBuilder.create();
+ netconfClientConfigurationBuilder.withSessionListener(sessionListener);
+ netconfClientConfigurationBuilder.withAddress(params.getInetAddress());
+ netconfClientConfigurationBuilder.withProtocol(params.ssh ? NetconfClientConfiguration.NetconfClientProtocol.SSH : NetconfClientConfiguration.NetconfClientProtocol.TCP);
+ netconfClientConfigurationBuilder.withConnectionTimeoutMillis(20000L);
+ netconfClientConfigurationBuilder.withReconnectStrategy(new NeverReconnectStrategy(GlobalEventExecutor.INSTANCE, 5000));
+ return netconfClientConfigurationBuilder.build();
+ }
+
+ static NetconfDeviceCommunicator getSessionListener(final InetSocketAddress inetAddress) {
+ final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> loggingRemoteDevice = new LoggingRemoteDevice();
+ return new NetconfDeviceCommunicator(new RemoteDeviceId("secure-test", inetAddress), loggingRemoteDevice);
+ }
+
+ private static Parameters parseArgs(final String[] args, final ArgumentParser parser) {
+ final Parameters opt = new Parameters();
+ try {
+ parser.parseArgs(args, opt);
+ return opt;
+ } catch (final ArgumentParserException e) {
+ parser.handleError(e);
+ }
+
+ System.exit(1);
+ return null;
+ }
+
+
+ private static class LoggingRemoteDevice implements RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> {
+ @Override
+ public void onRemoteSessionUp(final NetconfSessionPreferences remoteSessionCapabilities, final NetconfDeviceCommunicator netconfDeviceCommunicator) {
+ LOG.info("Session established");
+ }
+
+ @Override
+ public void onRemoteSessionDown() {
+ LOG.info("Session down");
+ }
+
+ @Override
+ public void onRemoteSessionFailed(final Throwable throwable) {
+ LOG.info("Session failed");
+ }
+
+ @Override
+ public void onNotification(final NetconfMessage notification) {
+ LOG.info("Notification received: {}", notification.toString());
+ }
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// TODO reuse code from org.opendaylight.controller.netconf.test.tool.client.stress.AsyncExecutionStrategy
+class SyncExecutionStrategy implements ExecutionStrategy {
+ private static final Logger LOG = LoggerFactory.getLogger(SyncExecutionStrategy.class);
+
+ private final Parameters params;
+ private final List<NetconfMessage> preparedMessages;
+ private final NetconfDeviceCommunicator sessionListener;
+ private final List<Integer> editBatches;
+
+ public SyncExecutionStrategy(final Parameters params, final List<NetconfMessage> preparedMessages, final NetconfDeviceCommunicator sessionListener) {
+ this.params = params;
+ this.preparedMessages = preparedMessages;
+ this.sessionListener = sessionListener;
+ editBatches = countEditBatchSizes(params);
+ }
+
+ private static List<Integer> countEditBatchSizes(final Parameters params) {
+ final List<Integer> editBatches = Lists.newArrayList();
+ if (params.editBatchSize != params.editCount) {
+ final int fullBatches = params.editCount / params.editBatchSize;
+ for (int i = 0; i < fullBatches; i++) {
+ editBatches.add(params.editBatchSize);
+ }
+
+ if (params.editCount % params.editBatchSize != 0) {
+ editBatches.add(params.editCount % params.editBatchSize);
+ }
+ } else {
+ editBatches.add(params.editBatchSize);
+ }
+ return editBatches;
+ }
+
+ public void invoke() {
+ final AtomicInteger responseCounter = new AtomicInteger(0);
+
+ int batchI = 0;
+ for (final Integer editBatch : editBatches) {
+ for (int i = 0; i < editBatch; i++) {
+ final int msgId = i + (batchI * params.editBatchSize);
+ final NetconfMessage msg = preparedMessages.get(msgId);
+ LOG.debug("Sending message {}", msgId);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Sending message {}", XmlUtil.toString(msg.getDocument()));
+ }
+ final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture =
+ sessionListener.sendRequest(msg, StressClient.EDIT_QNAME);
+ // Wait for response
+ waitForResponse(responseCounter, netconfMessageFuture);
+
+ }
+ batchI++;
+ LOG.info("Batch {} with size {} sent. Committing", batchI, editBatch);
+
+ // Commit batch sync
+ waitForResponse(responseCounter,
+ sessionListener.sendRequest(StressClient.COMMIT_MSG, StressClient.COMMIT_QNAME));
+ }
+
+ Preconditions.checkState(responseCounter.get() == params.editCount + editBatches.size(), "Not all responses were received, only %s from %s", responseCounter.get(), params.editCount + editBatches.size());
+ }
+
+ private void waitForResponse(AtomicInteger responseCounter, final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture) {
+ try {
+ final RpcResult<NetconfMessage> netconfMessageRpcResult =
+ netconfMessageFuture.get(params.msgTimeout, TimeUnit.SECONDS);
+ if (netconfMessageRpcResult.isSuccessful()) {
+ responseCounter.incrementAndGet();
+ LOG.debug("Received response {}", responseCounter.get());
+ } else {
+ LOG.warn("Request failed {}", netconfMessageRpcResult);
+ }
+
+ } catch (final InterruptedException e) {
+ throw new RuntimeException(e);
+ } catch (final ExecutionException | TimeoutException e) {
+ throw new RuntimeException("Request not finished", e);
+ }
+ }
+}
factory.setFeature("http://xml.org/sax/features/external-parameter-entities", false);
factory.setXIncludeAware(false);
factory.setExpandEntityReferences(false);
+ // Performance improvement for messages with size <10k according to
+ // https://xerces.apache.org/xerces2-j/faq-performance.html
+ factory.setFeature("http://apache.org/xml/features/dom/defer-node-expansion", false);
} catch (ParserConfigurationException e) {
throw new ExceptionInInitializerError(e);
}
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectory>${project.basedir}</sourceDirectory>
<includes>**\/*.java,**\/*.xml,**\/*.ini,**\/*.sh,**\/*.bat,**\/*.yang</includes>
- <excludes>**\/target\/,**\/bin\/,**\/target-ide\/,**\/${jmxGeneratorPath}\/,**\/${salGeneratorPath}\/,**\/netconf\/test\/tool\/Main.java</excludes>
+ <excludes>**\/target\/,**\/bin\/,**\/target-ide\/,**\/${jmxGeneratorPath}\/,**\/${salGeneratorPath}\/,**\/netconf\/test\/tool\/Main.java, **\/netconf\/test\/tool\/client\/stress\/StressClient.java</excludes>
</configuration>
<dependencies>
<dependency>