assertThat(runtimeBeans.size(), is(4));
{
- RuntimeBeanEntry streamRB = findFirstByYangName(runtimeBeans,
- "stream");
+ RuntimeBeanEntry streamRB = findFirstByNamePrefix(runtimeBeans,
+ "ThreadStream");
assertNotNull(streamRB);
assertFalse(streamRB.getKeyYangName().isPresent());
assertFalse(streamRB.getKeyJavaName().isPresent());
+ " in " + runtimeBeans);
}
+ protected RuntimeBeanEntry findFirstByNamePrefix(final Collection<RuntimeBeanEntry> runtimeBeans, final String namePrefix) {
+ for (RuntimeBeanEntry rb : runtimeBeans) {
+ if (namePrefix.equals(rb.getJavaNamePrefix())) {
+ return rb;
+ }
+ }
+
+ throw new IllegalArgumentException("Name prefix not found:" + namePrefix
+ + " in " + runtimeBeans);
+ }
+
@Test
public void testGetWhenConditionMatcher() {
assertMatches("config",
assertThat(threadRB.getRpcs().size(), is(2));
}
{
- RuntimeBeanEntry streamRB = findFirstByYangName(runtimeBeans,
- "stream");
+ RuntimeBeanEntry streamRB = findFirstByNamePrefix(runtimeBeans,
+ "ThreadStream");
assertNotNull(streamRB);
assertFalse(streamRB.getKeyYangName().isPresent());
assertFalse(streamRB.getKeyJavaName().isPresent());
* This will stop the timeout clock
*/
void markFollowerInActive();
+
+
+ /**
+ * This will return the active time of follower, since it was last reset
+ * @return time in milliseconds
+ */
+ long timeSinceLastActivity();
+
}
stopwatch.stop();
}
}
+
+ @Override
+ public long timeSinceLastActivity() {
+ return stopwatch.elapsed(TimeUnit.MILLISECONDS);
+ }
}
public class Snapshot implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -8298574936724056236L;
+
private final byte[] state;
private final List<ReplicatedLogEntry> unAppliedEntries;
private final long lastIndex;
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.base.messages;
-
-import java.io.Serializable;
-
-/**
- * Message sent to commit an entry to the log
- */
-public class CommitEntry implements Serializable {
- private static final long serialVersionUID = 1L;
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.base.messages;
-
-import java.io.Serializable;
-
-/**
- * Message sent to Persist an entry into the transaction journal
- */
-public class PersistEntry implements Serializable {
- private static final long serialVersionUID = 1L;
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.base.messages;
-
-import java.io.Serializable;
-
-/**
- * This message is sent by a RaftActor to itself so that a subclass can process
- * it and use it to save it's state
- */
-public class SaveSnapshot implements Serializable {
- private static final long serialVersionUID = 1L;
-}
purgeInMemoryLog();
}
+ //Send the next log entry immediately, if possible, no need to wait for heartbeat to trigger that event
+ sendUpdatesToFollower(followerId, followerLogInformation, false);
return this;
}
// set currentTerm = T, convert to follower (§5.1)
// This applies to all RPC messages and responses
if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
+ LOG.debug("{}: Term {} in \"{}\" message is greater than leader's term {}", context.getId(),
+ rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
+
context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
return switchBehavior(new Follower(context));
private void handleInstallSnapshotReply(InstallSnapshotReply reply) {
String followerId = reply.getFollowerId();
FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
+
+ if (followerToSnapshot == null) {
+ LOG.error("{}: FollowerId {} in InstallSnapshotReply not known to Leader",
+ context.getId(), followerId);
+ return;
+ }
+
FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
followerLogInformation.markFollowerActive();
- if (followerToSnapshot != null &&
- followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
-
+ if (followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
+ boolean wasLastChunk = false;
if (reply.isSuccess()) {
if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
//this was the last chunk reply
// we can remove snapshot from the memory
setSnapshot(Optional.<ByteString>absent());
}
+ wasLastChunk = true;
} else {
followerToSnapshot.markSendStatus(true);
followerToSnapshot.markSendStatus(false);
}
+ if (!wasLastChunk && followerToSnapshot.canSendNextChunk()) {
+ ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ if(followerActor != null) {
+ sendSnapshotChunk(followerActor, followerId);
+ }
+ }
+
} else {
- LOG.error("{}: FollowerId in InstallSnapshotReply not known to Leader" +
- " or Chunk Index in InstallSnapshotReply not matching {} != {}",
- context.getId(), followerToSnapshot.getChunkIndex(), reply.getChunkIndex()
- );
+ LOG.error("{}: Chunk index {} in InstallSnapshotReply from follower {} does not match expected index {}",
+ context.getId(), reply.getChunkIndex(), followerId,
+ followerToSnapshot.getChunkIndex());
if(reply.getChunkIndex() == INVALID_CHUNK_INDEX){
// Since the Follower did not find this index to be valid we should reset the follower snapshot
private void sendAppendEntries() {
// Send an AppendEntries to all followers
+ long heartbeatInterval = context.getConfigParams().getHeartBeatInterval().toMillis();
for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
final String followerId = e.getKey();
- ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ final FollowerLogInformation followerLogInformation = e.getValue();
+ // This checks helps not to send a repeat message to the follower
+ if(followerLogInformation.timeSinceLastActivity() >= heartbeatInterval) {
+ sendUpdatesToFollower(followerId, followerLogInformation, true);
+ }
+ }
+ }
- if (followerActor != null) {
- FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
- long followerNextIndex = followerLogInformation.getNextIndex();
- boolean isFollowerActive = followerLogInformation.isFollowerActive();
-
- if (mapFollowerToSnapshot.get(followerId) != null) {
- // if install snapshot is in process , then sent next chunk if possible
- if (isFollowerActive && mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
- sendSnapshotChunk(followerActor, followerId);
- } else {
- // we send a heartbeat even if we have not received a reply for the last chunk
- sendAppendEntriesToFollower(followerActor, followerNextIndex,
- Collections.<ReplicatedLogEntry>emptyList());
- }
+ /**
+ *
+ * This method checks if any update needs to be sent to the given follower. This includes append log entries,
+ * sending next snapshot chunk, and initiating a snapshot.
+ * @return true if any update is sent, false otherwise
+ */
- } else {
- long leaderLastIndex = context.getReplicatedLog().lastIndex();
- long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
- final List<ReplicatedLogEntry> entries;
-
- if (isFollowerActive &&
- context.getReplicatedLog().isPresent(followerNextIndex)) {
- // FIXME : Sending one entry at a time
- entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
-
- } else if (isFollowerActive && followerNextIndex >= 0 &&
- leaderLastIndex >= followerNextIndex ) {
- // if the followers next index is not present in the leaders log, and
- // if the follower is just not starting and if leader's index is more than followers index
- // then snapshot should be sent
-
- if(LOG.isDebugEnabled()) {
- LOG.debug(String.format("%s: InitiateInstallSnapshot to follower: %s," +
- "follower-nextIndex: %s, leader-snapshot-index: %s, " +
- "leader-last-index: %s", context.getId(), followerId,
- followerNextIndex, leaderSnapShotIndex, leaderLastIndex));
- }
- actor().tell(new InitiateInstallSnapshot(), actor());
-
- // we would want to sent AE as the capture snapshot might take time
- entries = Collections.<ReplicatedLogEntry>emptyList();
-
- } else {
- //we send an AppendEntries, even if the follower is inactive
- // in-order to update the followers timestamp, in case it becomes active again
- entries = Collections.<ReplicatedLogEntry>emptyList();
+ private void sendUpdatesToFollower(String followerId, FollowerLogInformation followerLogInformation,
+ boolean sendHeartbeat) {
+
+ ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ if (followerActor != null) {
+ long followerNextIndex = followerLogInformation.getNextIndex();
+ boolean isFollowerActive = followerLogInformation.isFollowerActive();
+
+ if (mapFollowerToSnapshot.get(followerId) != null) {
+ // if install snapshot is in process , then sent next chunk if possible
+ if (isFollowerActive && mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
+ sendSnapshotChunk(followerActor, followerId);
+ } else if(sendHeartbeat) {
+ // we send a heartbeat even if we have not received a reply for the last chunk
+ sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
+ Collections.<ReplicatedLogEntry>emptyList(), followerId);
+ }
+ } else {
+ long leaderLastIndex = context.getReplicatedLog().lastIndex();
+ long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
+ if (isFollowerActive &&
+ context.getReplicatedLog().isPresent(followerNextIndex)) {
+ // FIXME : Sending one entry at a time
+ final List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
+
+ sendAppendEntriesToFollower(followerActor, followerNextIndex, entries, followerId);
+
+ } else if (isFollowerActive && followerNextIndex >= 0 &&
+ leaderLastIndex >= followerNextIndex) {
+ // if the followers next index is not present in the leaders log, and
+ // if the follower is just not starting and if leader's index is more than followers index
+ // then snapshot should be sent
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("InitiateInstallSnapshot to follower:{}," +
+ "follower-nextIndex:{}, leader-snapshot-index:{}, " +
+ "leader-last-index:{}", followerId,
+ followerNextIndex, leaderSnapShotIndex, leaderLastIndex
+ );
}
+ actor().tell(new InitiateInstallSnapshot(), actor());
- sendAppendEntriesToFollower(followerActor, followerNextIndex, entries);
+ // Send heartbeat to follower whenever install snapshot is initiated.
+ sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
+ Collections.<ReplicatedLogEntry>emptyList(), followerId);
+ } else if(sendHeartbeat) {
+ //we send an AppendEntries, even if the follower is inactive
+ // in-order to update the followers timestamp, in case it becomes active again
+ sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
+ Collections.<ReplicatedLogEntry>emptyList(), followerId);
}
+
}
}
}
private void sendAppendEntriesToFollower(ActorSelection followerActor, long followerNextIndex,
- List<ReplicatedLogEntry> entries) {
- followerActor.tell(
- new AppendEntries(currentTerm(), context.getId(),
- prevLogIndex(followerNextIndex),
- prevLogTerm(followerNextIndex), entries,
- context.getCommitIndex(),
- replicatedToAllIndex).toSerializable(),
- actor()
- );
+ List<ReplicatedLogEntry> entries, String followerId) {
+ AppendEntries appendEntries = new AppendEntries(currentTerm(), context.getId(),
+ prevLogIndex(followerNextIndex),
+ prevLogTerm(followerNextIndex), entries,
+ context.getCommitIndex(), replicatedToAllIndex);
+
+ if(!entries.isEmpty()) {
+ LOG.debug("{}: Sending AppendEntries to follower {}: {}", context.getId(), followerId,
+ appendEntries);
+ }
+
+ followerActor.tell(appendEntries.toSerializable(), actor());
}
/**
*
*/
private void installSnapshotIfNeeded() {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{}: installSnapshotIfNeeded, followers {}", context.getId(), followerToLog.keySet());
+ }
+
for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
final ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
long nextIndex = e.getValue().getNextIndex();
if (!context.getReplicatedLog().isPresent(nextIndex) &&
- context.getReplicatedLog().isInSnapshot(nextIndex)) {
+ context.getReplicatedLog().isInSnapshot(nextIndex)) {
LOG.info("{}: {} follower needs a snapshot install", context.getId(), e.getKey());
if (snapshot.isPresent()) {
// if a snapshot is present in the memory, most likely another install is in progress
// no need to capture snapshot
sendSnapshotChunk(followerActor, e.getKey());
- } else {
+ } else if (!context.isSnapshotCaptureInitiated()) {
initiateCaptureSnapshot();
//we just need 1 follower who would need snapshot to be installed.
// when we have the snapshot captured, we would again check (in SendInstallSnapshot)
actor().tell(new CaptureSnapshot(lastIndex(), lastTerm(),
lastAppliedIndex, lastAppliedTerm, isInstallSnapshotInitiated),
actor());
+ context.setSnapshotCaptureInitiated(true);
}
private void sendSnapshotChunk(ActorSelection followerActor, String followerId) {
try {
if (snapshot.isPresent()) {
+ ByteString nextSnapshotChunk = getNextSnapshotChunk(followerId,snapshot.get());
+
+ // Note: the previous call to getNextSnapshotChunk has the side-effect of adding
+ // followerId to the followerToSnapshot map.
+ FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
+
followerActor.tell(
new InstallSnapshot(currentTerm(), context.getId(),
context.getReplicatedLog().getSnapshotIndex(),
context.getReplicatedLog().getSnapshotTerm(),
- getNextSnapshotChunk(followerId,snapshot.get()),
- mapFollowerToSnapshot.get(followerId).incrementChunkIndex(),
- mapFollowerToSnapshot.get(followerId).getTotalChunks(),
- Optional.of(mapFollowerToSnapshot.get(followerId).getLastChunkHashCode())
+ nextSnapshotChunk,
+ followerToSnapshot.incrementChunkIndex(),
+ followerToSnapshot.getTotalChunks(),
+ Optional.of(followerToSnapshot.getLastChunkHashCode())
).toSerializable(),
actor()
);
LOG.info("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}",
context.getId(), followerActor.path(),
- mapFollowerToSnapshot.get(followerId).getChunkIndex(),
- mapFollowerToSnapshot.get(followerId).getTotalChunks());
+ followerToSnapshot.getChunkIndex(),
+ followerToSnapshot.getTotalChunks());
}
} catch (IOException e) {
LOG.error(e, "{}: InstallSnapshot failed for Leader.", context.getId());
* Reply for the AppendEntriesRpc message
*/
public class AppendEntriesReply extends AbstractRaftRPC {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -7487547356392536683L;
// true if follower contained entry matching
// prevLogIndex and prevLogTerm
this.logLastTerm = logLastTerm;
}
+ @Override
public long getTerm() {
return term;
}
package org.opendaylight.controller.cluster.raft.messages;
public class InstallSnapshotReply extends AbstractRaftRPC {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 642227896390779503L;
// The followerId - this will be used to figure out which follower is
// responding
private final String followerId;
private final int chunkIndex;
- private boolean success;
+ private final boolean success;
public InstallSnapshotReply(long term, String followerId, int chunkIndex,
boolean success) {
* Invoked by candidates to gather votes (§5.2).
*/
public class RequestVote extends AbstractRaftRPC {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -6967509186297108657L;
// candidate requesting vote
private String candidateId;
public RequestVote() {
}
+ @Override
public long getTerm() {
return term;
}
package org.opendaylight.controller.cluster.raft.messages;
public class RequestVoteReply extends AbstractRaftRPC {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 8427899326488775660L;
// true means candidate received vot
private final boolean voteGranted;
this.voteGranted = voteGranted;
}
+ @Override
public long getTerm() {
return term;
}
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
};
}
+ @Test
+ public void testFakeSnapshotsForLeaderWithInInitiateSnapshots() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "leader1";
+
+ ActorRef followerActor1 =
+ getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ ActorRef followerActor2 =
+ getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1", followerActor1.path().toString());
+ peerAddresses.put("follower-2", followerActor2.path().toString());
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(),
+ MockRaftActor.props(persistenceId, peerAddresses,
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor leaderActor = mockActorRef.underlyingActor();
+ leaderActor.getRaftActorContext().setCommitIndex(9);
+ leaderActor.getRaftActorContext().setLastApplied(9);
+ leaderActor.getRaftActorContext().getTermInformation().update(1, persistenceId);
+
+ leaderActor.waitForInitializeBehaviorComplete();
+
+ Leader leader = new Leader(leaderActor.getRaftActorContext());
+ leaderActor.setCurrentBehavior(leader);
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ // create 5 entries in the log
+ MockRaftActorContext.MockReplicatedLogBuilder logBuilder = new MockRaftActorContext.MockReplicatedLogBuilder();
+ leaderActor.getRaftActorContext().setReplicatedLog(logBuilder.createEntries(5, 10, 1).build());
+ //set the snapshot index to 4 , 0 to 4 are snapshotted
+ leaderActor.getRaftActorContext().getReplicatedLog().setSnapshotIndex(4);
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+
+ leaderActor.onReceiveCommand(new AppendEntriesReply("follower-1", 1, true, 9, 1));
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+
+ // set the 2nd follower nextIndex to 1 which has been snapshotted
+ leaderActor.onReceiveCommand(new AppendEntriesReply("follower-2", 1, true, 0, 1));
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+
+ // simulate a real snapshot
+ leaderActor.onReceiveCommand(new InitiateInstallSnapshot());
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ //reply from a slow follower does not initiate a fake snapshot
+ leaderActor.onReceiveCommand(new AppendEntriesReply("follower-2", 1, true, 9, 1));
+ assertEquals("Fake snapshot should not happen when Initiate is in progress", 5, leaderActor.getReplicatedLog().size());
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("foo-0"),
+ new MockRaftActorContext.MockPayload("foo-1"),
+ new MockRaftActorContext.MockPayload("foo-2"),
+ new MockRaftActorContext.MockPayload("foo-3"),
+ new MockRaftActorContext.MockPayload("foo-4")));
+ leaderActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
+ assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+
+ assertEquals("Real snapshot didn't clear the log till lastApplied", 0, leaderActor.getReplicatedLog().size());
+
+ //reply from a slow follower after should not raise errors
+ leaderActor.onReceiveCommand(new AppendEntriesReply("follower-2", 1, true, 5, 1));
+ assertEquals(0, leaderActor.getReplicatedLog().size());
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+
+
private ByteString fromObject(Object snapshot) throws Exception {
ByteArrayOutputStream b = null;
ObjectOutputStream o = null;
package org.opendaylight.controller.cluster.raft.behaviors;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import akka.actor.Props;
import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
import scala.concurrent.duration.FiniteDuration;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
public class LeaderTest extends AbstractRaftActorBehaviorTest {
private final ActorRef leaderActor =
actorContext.setPeerAddresses(peerAddresses);
Leader leader = new Leader(actorContext);
+ leader.markFollowerActive(followerActor.path().toString());
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
leader.handleMessage(senderActor, new SendHeartBeat());
final String out =
actorContext.setPeerAddresses(peerAddresses);
Leader leader = new Leader(actorContext);
+ leader.markFollowerActive(followerActor.path().toString());
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
RaftActorBehavior raftBehavior = leader
.handleMessage(senderActor, new Replicate(null, null,
new MockRaftActorContext.MockReplicatedLogEntry(1,
leader.getFollowerToSnapshot().getNextChunk();
leader.getFollowerToSnapshot().incrementChunkIndex();
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
+
leader.handleMessage(leaderActor, new SendHeartBeat());
AppendEntries aeproto = (AppendEntries)MessageCollectorActor.getFirstMatching(
//update follower timestamp
leader.markFollowerActive(followerActor.path().toString());
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
+
// this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
RaftActorBehavior raftBehavior = leader.handleMessage(
senderActor, new Replicate(null, "state-id", entry));
assertEquals(1, cs.getLastAppliedTerm());
assertEquals(4, cs.getLastIndex());
assertEquals(2, cs.getLastTerm());
+
+ // if an initiate is started again when first is in progress, it shouldnt initiate Capture
+ raftBehavior = leader.handleMessage(leaderActor, new InitiateInstallSnapshot());
+ List<Object> captureSnapshots = MessageCollectorActor.getAllMatching(leaderActor, CaptureSnapshot.class);
+ assertEquals("CaptureSnapshot should not get invoked when initiate is in progress", 1, captureSnapshots.size());
+
}};
}
assertEquals(snapshotIndex + 1, fli.getNextIndex());
}};
}
+ @Test
+ public void testSendSnapshotfromInstallSnapshotReply() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ TestActorRef<MessageCollectorActor> followerActor =
+ TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class), "follower-reply");
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-reply",
+ followerActor.path().toString());
+
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
+
+ MockRaftActorContext actorContext =
+ (MockRaftActorContext) createActorContext();
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl(){
+ @Override
+ public int getSnapshotChunkSize() {
+ return 50;
+ }
+ };
+ configParams.setHeartBeatInterval(new FiniteDuration(9, TimeUnit.SECONDS));
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(10, TimeUnit.SECONDS));
+
+ actorContext.setConfigParams(configParams);
+ actorContext.setPeerAddresses(peerAddresses);
+ actorContext.setCommitIndex(followersLastIndex);
+
+ MockLeader leader = new MockLeader(actorContext);
+
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+
+ ByteString bs = toByteString(leadersSnapshot);
+ leader.setSnapshot(Optional.of(bs));
+
+ leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
+
+ List<Object> objectList = MessageCollectorActor.getAllMatching(followerActor,
+ InstallSnapshotMessages.InstallSnapshot.class);
+
+ assertEquals(1, objectList.size());
+
+ Object o = objectList.get(0);
+ assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
+
+ InstallSnapshotMessages.InstallSnapshot installSnapshot = (InstallSnapshotMessages.InstallSnapshot) o;
+
+ assertEquals(1, installSnapshot.getChunkIndex());
+ assertEquals(3, installSnapshot.getTotalChunks());
+
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ "follower-reply", installSnapshot.getChunkIndex(), true));
+
+ objectList = MessageCollectorActor.getAllMatching(followerActor,
+ InstallSnapshotMessages.InstallSnapshot.class);
+
+ assertEquals(2, objectList.size());
+
+ installSnapshot = (InstallSnapshotMessages.InstallSnapshot) objectList.get(1);
+
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ "follower-reply", installSnapshot.getChunkIndex(), true));
+
+ objectList = MessageCollectorActor.getAllMatching(followerActor,
+ InstallSnapshotMessages.InstallSnapshot.class);
+
+ assertEquals(3, objectList.size());
+
+ installSnapshot = (InstallSnapshotMessages.InstallSnapshot) objectList.get(2);
+
+ // Send snapshot reply one more time and make sure that a new snapshot message should not be sent to follower
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ "follower-reply", installSnapshot.getChunkIndex(), true));
+
+ objectList = MessageCollectorActor.getAllMatching(followerActor,
+ InstallSnapshotMessages.InstallSnapshot.class);
+
+ // Count should still stay at 3
+ assertEquals(3, objectList.size());
+ }};
+ }
+
@Test
- public void testHandleInstallSnapshotReplyWithInvalidChunkIndex() throws Exception {
+ public void testHandleInstallSnapshotReplyWithInvalidChunkIndex() throws Exception{
new JavaTestKit(getSystem()) {{
TestActorRef<MessageCollectorActor> followerActor =
assertEquals(3, installSnapshot.getTotalChunks());
- leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(), followerActor.path().toString(), -1, false));
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ followerActor.path().toString(), -1, false));
+
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
leader.handleMessage(leaderActor, new SendHeartBeat());
- o = MessageCollectorActor.getAllMessages(followerActor).get(1);
+ o = MessageCollectorActor.getAllMatching(followerActor,InstallSnapshotMessages.InstallSnapshot.class).get(1);
assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
{
TestActorRef<MessageCollectorActor> followerActor =
- TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class), "follower");
+ TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class), "follower-chunk");
Map<String, String> peerAddresses = new HashMap<>();
peerAddresses.put(followerActor.path().toString(),
leader.handleMessage(followerActor, new InstallSnapshotReply(installSnapshot.getTerm(),followerActor.path().toString(),1,true ));
- leader.handleMessage(leaderActor, new SendHeartBeat());
-
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+
o = MessageCollectorActor.getAllMessages(followerActor).get(1);
assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
Leader leader = new Leader(leaderActorContext);
leader.markFollowerActive(followerActor.path().toString());
+ Uninterruptibles.sleepUninterruptibly(leaderActorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
+
leader.handleMessage(leaderActor, new SendHeartBeat());
AppendEntries appendEntries = (AppendEntries) MessageCollectorActor
Leader leader = new Leader(leaderActorContext);
leader.markFollowerActive(followerActor.path().toString());
+ Thread.sleep(leaderActorContext.getConfigParams().getHeartBeatInterval().toMillis());
+
leader.handleMessage(leaderActor, new SendHeartBeat());
AppendEntries appendEntries = (AppendEntries) MessageCollectorActor
}};
}
+
+ @Test
+ public void testAppendEntryCallAtEndofAppendEntryReply() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ configParams.setHeartBeatInterval(new FiniteDuration(9, TimeUnit.SECONDS));
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(10, TimeUnit.SECONDS));
+
+ leaderActorContext.setConfigParams(configParams);
+
+ ActorRef followerActor = getSystem().actorOf(Props.create(ForwardMessageToBehaviorActor.class));
+
+ MockRaftActorContext followerActorContext =
+ new MockRaftActorContext("follower-reply", getSystem(), followerActor);
+
+ followerActorContext.setConfigParams(configParams);
+
+ Follower follower = new Follower(followerActorContext);
+
+ ForwardMessageToBehaviorActor.setBehavior(follower);
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-reply",
+ followerActor.path().toString());
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ leaderActorContext.getReplicatedLog().removeFrom(0);
+
+ //create 3 entries
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+ leaderActorContext.setCommitIndex(1);
+
+ Leader leader = new Leader(leaderActorContext);
+ leader.markFollowerActive("follower-reply");
+
+ Uninterruptibles.sleepUninterruptibly(leaderActorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
+
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+
+ AppendEntries appendEntries = (AppendEntries) ForwardMessageToBehaviorActor
+ .getFirstMatching(followerActor, AppendEntries.class);
+
+ assertNotNull(appendEntries);
+
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(1, appendEntries.getEntries().get(0).getIndex());
+ assertEquals(0, appendEntries.getPrevLogIndex());
+
+ AppendEntriesReply appendEntriesReply =
+ (AppendEntriesReply)ForwardMessageToBehaviorActor.getFirstMatching(leaderActor, AppendEntriesReply.class);
+
+ assertNotNull(appendEntriesReply);
+
+ leader.handleAppendEntriesReply(followerActor, appendEntriesReply);
+
+ List<Object> entries = ForwardMessageToBehaviorActor
+ .getAllMatching(followerActor, AppendEntries.class);
+
+ assertEquals("AppendEntries count should be 2 ", 2, entries.size());
+
+ AppendEntries appendEntriesSecond = (AppendEntries) entries.get(1);
+
+ assertEquals(1, appendEntriesSecond.getLeaderCommit());
+ assertEquals(2, appendEntriesSecond.getEntries().get(0).getIndex());
+ assertEquals(1, appendEntriesSecond.getPrevLogIndex());
+
+ }};
+ }
+
class MockLeader extends Leader {
FollowerToSnapshot fts;
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.IOException;
+
+/**
+ * Exception thrown from NormalizedNodeInputStreamReader when the input stream does not contain
+ * valid serialized data.
+ *
+ * @author Thomas Pantelis
+ */
+public class InvalidNormalizedNodeStreamException extends IOException {
+ private static final long serialVersionUID = 1L;
+
+ public InvalidNormalizedNodeStreamException(String message) {
+ super(message);
+ }
+}
private final StringBuilder reusableStringBuilder = new StringBuilder(50);
+ private boolean readSignatureMarker = true;
+
public NormalizedNodeInputStreamReader(InputStream stream) throws IOException {
Preconditions.checkNotNull(stream);
input = new DataInputStream(stream);
@Override
public NormalizedNode<?, ?> readNormalizedNode() throws IOException {
+ readSignatureMarkerAndVersionIfNeeded();
+ return readNormalizedNodeInternal();
+ }
+
+ private void readSignatureMarkerAndVersionIfNeeded() throws IOException {
+ if(readSignatureMarker) {
+ readSignatureMarker = false;
+
+ byte marker = input.readByte();
+ if(marker != NormalizedNodeOutputStreamWriter.SIGNATURE_MARKER) {
+ throw new InvalidNormalizedNodeStreamException(String.format(
+ "Invalid signature marker: %d", marker));
+ }
+
+ input.readShort(); // read the version - not currently used/needed.
+ }
+ }
+
+ private NormalizedNode<?, ?> readNormalizedNodeInternal() throws IOException {
// each node should start with a byte
byte nodeType = input.readByte();
return bytes;
case ValueTypes.YANG_IDENTIFIER_TYPE :
- return readYangInstanceIdentifier();
+ return readYangInstanceIdentifierInternal();
default :
return null;
}
public YangInstanceIdentifier readYangInstanceIdentifier() throws IOException {
+ readSignatureMarkerAndVersionIfNeeded();
+ return readYangInstanceIdentifierInternal();
+ }
+
+ private YangInstanceIdentifier readYangInstanceIdentifierInternal() throws IOException {
int size = input.readInt();
List<PathArgument> pathArguments = new ArrayList<>(size);
lastLeafSetQName = nodeType;
- LeafSetEntryNode<Object> child = (LeafSetEntryNode<Object>)readNormalizedNode();
+ LeafSetEntryNode<Object> child = (LeafSetEntryNode<Object>)readNormalizedNodeInternal();
while(child != null) {
builder.withChild(child);
- child = (LeafSetEntryNode<Object>)readNormalizedNode();
+ child = (LeafSetEntryNode<Object>)readNormalizedNodeInternal();
}
return builder;
}
NormalizedNodeContainerBuilder builder) throws IOException {
LOG.debug("Reading data container (leaf nodes) nodes");
- NormalizedNode<?, ?> child = readNormalizedNode();
+ NormalizedNode<?, ?> child = readNormalizedNodeInternal();
while(child != null) {
builder.addChild(child);
- child = readNormalizedNode();
+ child = readNormalizedNodeInternal();
}
return builder;
}
private static final Logger LOG = LoggerFactory.getLogger(NormalizedNodeOutputStreamWriter.class);
+ static final byte SIGNATURE_MARKER = (byte) 0xab;
+ static final short CURRENT_VERSION = (short) 1;
+
static final byte IS_CODE_VALUE = 1;
static final byte IS_STRING_VALUE = 2;
static final byte IS_NULL_VALUE = 3;
private NormalizedNodeWriter normalizedNodeWriter;
+ private boolean wroteSignatureMarker;
+
public NormalizedNodeOutputStreamWriter(OutputStream stream) throws IOException {
Preconditions.checkNotNull(stream);
output = new DataOutputStream(stream);
}
public void writeNormalizedNode(NormalizedNode<?, ?> node) throws IOException {
+ writeSignatureMarkerAndVersionIfNeeded();
normalizedNodeWriter().write(node);
}
+ private void writeSignatureMarkerAndVersionIfNeeded() throws IOException {
+ if(!wroteSignatureMarker) {
+ output.writeByte(SIGNATURE_MARKER);
+ output.writeShort(CURRENT_VERSION);
+ wroteSignatureMarker = true;
+ }
+ }
+
@Override
public void leafNode(YangInstanceIdentifier.NodeIdentifier name, Object value) throws IOException, IllegalArgumentException {
Preconditions.checkNotNull(name, "Node identifier should not be null");
private void startNode(final QName qName, byte nodeType) throws IOException {
Preconditions.checkNotNull(qName, "QName of node identifier should not be null.");
+
+ writeSignatureMarkerAndVersionIfNeeded();
+
// First write the type of node
output.writeByte(nodeType);
// Write Start Tag
}
public void writeYangInstanceIdentifier(YangInstanceIdentifier identifier) throws IOException {
+ writeSignatureMarkerAndVersionIfNeeded();
+ writeYangInstanceIdentifierInternal(identifier);
+ }
+
+ private void writeYangInstanceIdentifierInternal(YangInstanceIdentifier identifier) throws IOException {
Iterable<YangInstanceIdentifier.PathArgument> pathArguments = identifier.getPathArguments();
int size = Iterables.size(pathArguments);
output.writeInt(size);
output.write(bytes);
break;
case ValueTypes.YANG_IDENTIFIER_TYPE:
- writeYangInstanceIdentifier((YangInstanceIdentifier) value);
+ writeYangInstanceIdentifierInternal((YangInstanceIdentifier) value);
break;
case ValueTypes.NULL_TYPE :
break;
import org.apache.commons.lang.SerializationUtils;
import org.junit.Assert;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.cluster.datastore.util.TestModel;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
public class NormalizedNodeStreamReaderWriterTest {
@Test
- public void testNormalizedNodeStreamReaderWriter() throws IOException {
+ public void testNormalizedNodeStreaming() throws IOException {
- testNormalizedNodeStreamReaderWriter(createTestContainer());
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeOutputStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream);
+
+ NormalizedNode<?, ?> testContainer = createTestContainer();
+ writer.writeNormalizedNode(testContainer);
QName toaster = QName.create("http://netconfcentral.org/ns/toaster","2009-11-20","toaster");
QName darknessFactor = QName.create("http://netconfcentral.org/ns/toaster","2009-11-20","darknessFactor");
withNodeIdentifier(new NodeIdentifier(toaster)).
withChild(ImmutableNodes.leafNode(darknessFactor, "1000")).build();
- testNormalizedNodeStreamReaderWriter(Builders.containerBuilder().
+ ContainerNode toasterContainer = Builders.containerBuilder().
withNodeIdentifier(new NodeIdentifier(SchemaContext.NAME)).
- withChild(toasterNode).build());
+ withChild(toasterNode).build();
+ writer.writeNormalizedNode(toasterContainer);
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(byteArrayOutputStream.toByteArray()));
+
+ NormalizedNode<?,?> node = reader.readNormalizedNode();
+ Assert.assertEquals(testContainer, node);
+
+ node = reader.readNormalizedNode();
+ Assert.assertEquals(toasterContainer, node);
+
+ writer.close();
}
private NormalizedNode<?, ?> createTestContainer() {
build();
}
- private void testNormalizedNodeStreamReaderWriter(NormalizedNode<?, ?> input) throws IOException {
+ @Test
+ public void testYangInstanceIdentifierStreaming() throws IOException {
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH).
+ node(TestModel.OUTER_LIST_QNAME).nodeWithKey(
+ TestModel.INNER_LIST_QNAME, TestModel.ID_QNAME, 10).build();
- byte[] byteData = null;
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeOutputStreamWriter writer =
+ new NormalizedNodeOutputStreamWriter(byteArrayOutputStream);
+ writer.writeYangInstanceIdentifier(path);
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(byteArrayOutputStream.toByteArray()));
+
+ YangInstanceIdentifier newPath = reader.readYangInstanceIdentifier();
+ Assert.assertEquals(path, newPath);
+
+ writer.close();
+ }
+
+ @Test
+ public void testNormalizedNodeAndYangInstanceIdentifierStreaming() throws IOException {
- try(ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
- NormalizedNodeStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream)) {
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeOutputStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream);
- NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(writer);
- normalizedNodeWriter.write(input);
- byteData = byteArrayOutputStream.toByteArray();
+ NormalizedNode<?, ?> testContainer = TestModel.createBaseTestContainerBuilder().build();
+ writer.writeNormalizedNode(testContainer);
- }
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH).
+ node(TestModel.OUTER_LIST_QNAME).nodeWithKey(
+ TestModel.INNER_LIST_QNAME, TestModel.ID_QNAME, 10).build();
+
+ writer.writeYangInstanceIdentifier(path);
NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
- new ByteArrayInputStream(byteData));
+ new ByteArrayInputStream(byteArrayOutputStream.toByteArray()));
NormalizedNode<?,?> node = reader.readNormalizedNode();
- Assert.assertEquals(input, node);
+ Assert.assertEquals(testContainer, node);
+
+ YangInstanceIdentifier newPath = reader.readYangInstanceIdentifier();
+ Assert.assertEquals(path, newPath);
+
+ writer.close();
+ }
+
+ @Test(expected=InvalidNormalizedNodeStreamException.class, timeout=10000)
+ public void testInvalidNormalizedNodeStream() throws IOException {
+ byte[] protobufBytes = new NormalizedNodeToNodeCodec(null).encode(
+ TestModel.createBaseTestContainerBuilder().build()).getNormalizedNode().toByteArray();
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(protobufBytes));
+
+ reader.readNormalizedNode();
+ }
+
+ @Test(expected=InvalidNormalizedNodeStreamException.class, timeout=10000)
+ public void testInvalidYangInstanceIdentifierStream() throws IOException {
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH).build();
+
+ byte[] protobufBytes = ShardTransactionMessages.DeleteData.newBuilder().setInstanceIdentifierPathArguments(
+ InstanceIdentifierUtils.toSerializable(path)).build().toByteArray();
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(protobufBytes));
+
+ reader.readYangInstanceIdentifier();
}
@Test
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
/**
* The ShardManager has the following jobs,
}
static class SchemaContextModules implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -8884620101025936590L;
+
private final Set<String> modules;
SchemaContextModules(Set<String> modules){
protected DOMStoreTransaction getDOMStoreTransaction() {
return transaction;
}
+
+ @Override
+ protected boolean returnCloseTransactionReply() {
+ return false;
+ }
}
* The ShardTransaction Actor delegates all actions to DOMDataReadWriteTransaction
* </p>
* <p>
- * Even though the DOMStore and the DOMStoreTransactionChain implement multiple types of transactions
- * the ShardTransaction Actor only works with read-write transactions. This is just to keep the logic simple. At this
- * time there are no known advantages for creating a read-only or write-only transaction which may change over time
- * at which point we can optimize things in the distributed store as well.
- * </p>
- * <p>
* Handles Messages <br/>
* ---------------- <br/>
* <li> {@link org.opendaylight.controller.cluster.datastore.messages.ReadData}
}
}
+ protected boolean returnCloseTransactionReply() {
+ return true;
+ }
+
private void closeTransaction(boolean sendReply) {
getDOMStoreTransaction().close();
- if(sendReply) {
+ if(sendReply && returnCloseTransactionReply()) {
getSender().tell(CloseTransactionReply.INSTANCE.toSerializable(), getSelf());
}
import java.io.DataOutputStream;
import java.io.IOException;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.datastore.node.utils.stream.InvalidNormalizedNodeStreamException;
import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputStreamReader;
import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeOutputStreamWriter;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
}
public static NormalizedNode<?, ?> deserializeNormalizedNode(DataInput in) {
- try {
- boolean present = in.readBoolean();
- if(present) {
- NormalizedNodeInputStreamReader streamReader = streamReader(in);
- return streamReader.readNormalizedNode();
- }
- } catch (IOException e) {
- throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
- }
+ try {
+ return tryDeserializeNormalizedNode(in);
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
+ }
+ }
+
+ private static NormalizedNode<?, ?> tryDeserializeNormalizedNode(DataInput in) throws IOException {
+ boolean present = in.readBoolean();
+ if(present) {
+ NormalizedNodeInputStreamReader streamReader = streamReader(in);
+ return streamReader.readNormalizedNode();
+ }
return null;
}
public static NormalizedNode<?, ?> deserializeNormalizedNode(byte [] bytes) {
NormalizedNode<?, ?> node = null;
try {
- node = deserializeNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes)));
- } catch(Exception e) {
- }
-
- if(node == null) {
- // Must be from legacy protobuf serialization - try that.
+ node = tryDeserializeNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes)));
+ } catch(InvalidNormalizedNodeStreamException e) {
+ // Probably from legacy protobuf serialization - try that.
try {
NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(bytes);
node = new NormalizedNodeToNodeCodec(null).decode(serializedNode);
- } catch (InvalidProtocolBufferException e) {
+ } catch (InvalidProtocolBufferException e2) {
throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
}
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
}
return node;
}
@Test
- public void testOnReceiveCloseTransaction() throws Exception {
+ public void testReadWriteTxOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
- "testCloseTransaction");
+ "testReadWriteTxOnReceiveCloseTransaction");
watch(transaction);
}};
}
+ @Test
+ public void testWriteOnlyTxOnReceiveCloseTransaction() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ "testWriteTxOnReceiveCloseTransaction");
+
+ watch(transaction);
+
+ transaction.tell(new CloseTransaction().toSerializable(), getRef());
+
+ expectMsgClass(duration("3 seconds"), CloseTransactionReply.SERIALIZABLE_CLASS);
+ expectTerminated(duration("3 seconds"), transaction);
+ }};
+ }
+
+ @Test
+ public void testReadOnlyTxOnReceiveCloseTransaction() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef transaction = newTransactionActor(store.newReadOnlyTransaction(),
+ "testReadOnlyTxOnReceiveCloseTransaction");
+
+ watch(transaction);
+
+ transaction.tell(new CloseTransaction().toSerializable(), getRef());
+
+ expectMsgClass(duration("3 seconds"), Terminated.class);
+ }};
+ }
+
@Test(expected=UnknownMessageException.class)
public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
final ActorRef shard = createShard();
<version>1.2.0-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-distributed-datastore</artifactId>
+ </dependency>
<!-- Test Dependencies -->
<dependency>
import com.google.common.base.Preconditions;
-import org.opendaylight.yangtools.yang.common.QName;
-
import java.io.Serializable;
+import org.opendaylight.yangtools.yang.common.QName;
public class ExecuteRpc implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 1128904894827335676L;
- private final String inputCompositeNode;
- private final QName rpc;
+ private final String inputCompositeNode;
+ private final QName rpc;
- public ExecuteRpc(final String inputCompositeNode, final QName rpc) {
- Preconditions.checkNotNull(inputCompositeNode, "Composite Node input string should be present");
- Preconditions.checkNotNull(rpc, "rpc Qname should not be null");
+ public ExecuteRpc(final String inputCompositeNode, final QName rpc) {
+ Preconditions.checkNotNull(inputCompositeNode, "Composite Node input string should be present");
+ Preconditions.checkNotNull(rpc, "rpc Qname should not be null");
- this.inputCompositeNode = inputCompositeNode;
- this.rpc = rpc;
- }
+ this.inputCompositeNode = inputCompositeNode;
+ this.rpc = rpc;
+ }
- public String getInputCompositeNode() {
- return inputCompositeNode;
- }
+ public String getInputCompositeNode() {
+ return inputCompositeNode;
+ }
- public QName getRpc() {
- return rpc;
- }
+ public QName getRpc() {
+ return rpc;
+ }
}
package org.opendaylight.controller.remote.rpc.messages;
import com.google.common.base.Preconditions;
+import java.io.Serializable;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import java.io.Serializable;
-
public class InvokeRpc implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -2813459607858108953L;
- private final QName rpc;
- private final YangInstanceIdentifier identifier;
- private final CompositeNode input;
+ private final QName rpc;
+ private final YangInstanceIdentifier identifier;
+ private final CompositeNode input;
- public InvokeRpc(final QName rpc, final YangInstanceIdentifier identifier, final CompositeNode input) {
- Preconditions.checkNotNull(rpc, "rpc qname should not be null");
- Preconditions.checkNotNull(input, "rpc input should not be null");
+ public InvokeRpc(final QName rpc, final YangInstanceIdentifier identifier, final CompositeNode input) {
+ Preconditions.checkNotNull(rpc, "rpc qname should not be null");
+ Preconditions.checkNotNull(input, "rpc input should not be null");
- this.rpc = rpc;
- this.identifier = identifier;
- this.input = input;
- }
+ this.rpc = rpc;
+ this.identifier = identifier;
+ this.input = input;
+ }
- public QName getRpc() {
- return rpc;
- }
+ public QName getRpc() {
+ return rpc;
+ }
- public YangInstanceIdentifier getIdentifier() {
- return identifier;
- }
+ public YangInstanceIdentifier getIdentifier() {
+ return identifier;
+ }
- public CompositeNode getInput() {
- return input;
- }
+ public CompositeNode getInput() {
+ return input;
+ }
}
import java.io.Serializable;
public class RpcResponse implements Serializable {
- private static final long serialVersionUID = 1L;
- private final String resultCompositeNode;
+ private static final long serialVersionUID = -4211279498688989245L;
- public RpcResponse(final String resultCompositeNode) {
- this.resultCompositeNode = resultCompositeNode;
- }
+ private final String resultCompositeNode;
- public String getResultCompositeNode() {
- return resultCompositeNode;
- }
+ public RpcResponse(final String resultCompositeNode) {
+ this.resultCompositeNode = resultCompositeNode;
+ }
+
+ public String getResultCompositeNode() {
+ return resultCompositeNode;
+ }
}
import org.opendaylight.controller.sal.connector.api.RpcRouter;
public class RoutingTable implements Copier<RoutingTable>, Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 5592610415175278760L;
private final Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> table = new HashMap<>();
private ActorRef router;
import java.io.Serializable;
public class BucketImpl<T extends Copier<T>> implements Bucket<T>, Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 294779770032719196L;
private Long version = System.currentTimeMillis();
}
public static class ContainsBuckets implements Serializable{
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -4940160367495308286L;
+
private final Map<Address, Bucket> buckets;
public ContainsBuckets(Map<Address, Bucket> buckets){
}
public static class ContainsBucketVersions implements Serializable{
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -8172148925383801613L;
+
Map<Address, Long> versions;
public ContainsBucketVersions(Map<Address, Long> versions) {
public static class GossiperMessages{
public static class Tick implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -4770935099506366773L;
}
public static final class GossipTick extends Tick {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 5803354404380026143L;
}
public static final class GossipStatus extends ContainsBucketVersions implements Serializable{
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -593037395143883265L;
+
private final Address from;
public GossipStatus(Address from, Map<Address, Long> versions) {
}
public static final class GossipEnvelope extends ContainsBuckets implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 8346634072582438818L;
+
private final Address from;
private final Address to;
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-subsystem</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>ietf-netconf-notifications</artifactId>
+ <packaging>bundle</packaging>
+ <name>${project.artifactId}</name>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Export-Package>
+ org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.*,
+ org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.*,
+ org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.*
+ </Export-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
--- /dev/null
+module ietf-netconf-notifications {
+
+ namespace
+ "urn:ietf:params:xml:ns:yang:ietf-netconf-notifications";
+
+ prefix ncn;
+
+ import ietf-inet-types { prefix inet; }
+ import ietf-netconf { prefix nc; }
+
+ organization
+ "IETF NETCONF (Network Configuration Protocol) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netconf/>
+ WG List: <mailto:netconf@ietf.org>
+
+ WG Chair: Bert Wijnen
+ <mailto:bertietf@bwijnen.net>
+
+ WG Chair: Mehmet Ersue
+ <mailto:mehmet.ersue@nsn.com>
+
+ Editor: Andy Bierman
+ <mailto:andy@netconfcentral.org>";
+
+ description
+ "This module defines a YANG data model for use with the
+ NETCONF protocol that allows the NETCONF client to
+ receive common NETCONF base event notifications.
+
+ Copyright (c) 2012 IETF Trust and the persons identified as
+ the document authors. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or
+ without modification, is permitted pursuant to, and subject
+ to the license terms contained in, the Simplified BSD License
+
+
+
+ set forth in Section 4.c of the IETF Trust's Legal Provisions
+ Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC 6470; see
+ the RFC itself for full legal notices.";
+
+ revision "2012-02-06" {
+ description
+ "Initial version. Errata 3957 added.";
+ reference
+ "RFC 6470: NETCONF Base Notifications";
+ }
+
+ grouping common-session-parms {
+ description
+ "Common session parameters to identify a
+ management session.";
+
+ leaf username {
+ type string;
+ mandatory true;
+ description
+ "Name of the user for the session.";
+ }
+
+ leaf session-id {
+ type nc:session-id-or-zero-type;
+ mandatory true;
+ description
+ "Identifier of the session.
+ A NETCONF session MUST be identified by a non-zero value.
+ A non-NETCONF session MAY be identified by the value zero.";
+ }
+
+ leaf source-host {
+ type inet:ip-address;
+ description
+ "Address of the remote host for the session.";
+ }
+ }
+
+
+
+
+
+
+
+
+ grouping changed-by-parms {
+ description
+ "Common parameters to identify the source
+ of a change event, such as a configuration
+ or capability change.";
+
+ container changed-by {
+ description
+ "Indicates the source of the change.
+ If caused by internal action, then the
+ empty leaf 'server' will be present.
+ If caused by a management session, then
+ the name, remote host address, and session ID
+ of the session that made the change will be reported.";
+ choice server-or-user {
+ mandatory true;
+ leaf server {
+ type empty;
+ description
+ "If present, the change was caused
+ by the server.";
+ }
+
+ case by-user {
+ uses common-session-parms;
+ }
+ } // choice server-or-user
+ } // container changed-by-parms
+ }
+
+
+ notification netconf-config-change {
+ description
+ "Generated when the NETCONF server detects that the
+ <running> or <startup> configuration datastore
+ has been changed by a management session.
+ The notification summarizes the edits that
+ have been detected.
+
+ The server MAY choose to also generate this
+ notification while loading a datastore during the
+ boot process for the device.";
+
+ uses changed-by-parms;
+
+
+
+
+
+ leaf datastore {
+ type enumeration {
+ enum running {
+ description "The <running> datastore has changed.";
+ }
+ enum startup {
+ description "The <startup> datastore has changed";
+ }
+ }
+ default "running";
+ description
+ "Indicates which configuration datastore has changed.";
+ }
+
+ list edit {
+ description
+ "An edit record SHOULD be present for each distinct
+ edit operation that the server has detected on
+ the target datastore. This list MAY be omitted
+ if the detailed edit operations are not known.
+ The server MAY report entries in this list for
+ changes not made by a NETCONF session (e.g., CLI).";
+
+ leaf target {
+ type instance-identifier;
+ description
+ "Topmost node associated with the configuration change.
+ A server SHOULD set this object to the node within
+ the datastore that is being altered. A server MAY
+ set this object to one of the ancestors of the actual
+ node that was changed, or omit this object, if the
+ exact node is not known.";
+ }
+
+ leaf operation {
+ type nc:edit-operation-type;
+ description
+ "Type of edit operation performed.
+ A server MUST set this object to the NETCONF edit
+ operation performed on the target datastore.";
+ }
+ } // list edit
+ } // notification netconf-config-change
+
+
+
+
+
+
+ notification netconf-capability-change {
+ description
+ "Generated when the NETCONF server detects that
+ the server capabilities have changed.
+ Indicates which capabilities have been added, deleted,
+ and/or modified. The manner in which a server
+ capability is changed is outside the scope of this
+ document.";
+
+ uses changed-by-parms;
+
+ leaf-list added-capability {
+ type inet:uri;
+ description
+ "List of capabilities that have just been added.";
+ }
+
+ leaf-list deleted-capability {
+ type inet:uri;
+ description
+ "List of capabilities that have just been deleted.";
+ }
+
+ leaf-list modified-capability {
+ type inet:uri;
+ description
+ "List of capabilities that have just been modified.
+ A capability is considered to be modified if the
+ base URI for the capability has not changed, but
+ one or more of the parameters encoded at the end of
+ the capability URI have changed.
+ The new modified value of the complete URI is returned.";
+ }
+ } // notification netconf-capability-change
+
+
+ notification netconf-session-start {
+ description
+ "Generated when a NETCONF server detects that a
+ NETCONF session has started. A server MAY generate
+ this event for non-NETCONF management sessions.
+ Indicates the identity of the user that started
+ the session.";
+ uses common-session-parms;
+ } // notification netconf-session-start
+
+
+
+
+ notification netconf-session-end {
+ description
+ "Generated when a NETCONF server detects that a
+ NETCONF session has terminated.
+ A server MAY optionally generate this event for
+ non-NETCONF management sessions. Indicates the
+ identity of the user that owned the session,
+ and why the session was terminated.";
+
+ uses common-session-parms;
+
+ leaf killed-by {
+ when "../termination-reason = 'killed'";
+ type nc:session-id-type;
+ description
+ "The ID of the session that directly caused this session
+ to be abnormally terminated. If this session was abnormally
+ terminated by a non-NETCONF session unknown to the server,
+ then this leaf will not be present.";
+ }
+
+ leaf termination-reason {
+ type enumeration {
+ enum "closed" {
+ description
+ "The session was terminated by the client in normal
+ fashion, e.g., by the NETCONF <close-session>
+ protocol operation.";
+ }
+ enum "killed" {
+ description
+ "The session was terminated in abnormal
+ fashion, e.g., by the NETCONF <kill-session>
+ protocol operation.";
+ }
+ enum "dropped" {
+ description
+ "The session was terminated because the transport layer
+ connection was unexpectedly closed.";
+ }
+ enum "timeout" {
+ description
+ "The session was terminated because of inactivity,
+ e.g., waiting for the <hello> message or <rpc>
+ messages.";
+ }
+
+
+
+ enum "bad-hello" {
+ description
+ "The client's <hello> message was invalid.";
+ }
+ enum "other" {
+ description
+ "The session was terminated for some other reason.";
+ }
+ }
+ mandatory true;
+ description
+ "Reason the session was terminated.";
+ }
+ } // notification netconf-session-end
+
+
+ notification netconf-confirmed-commit {
+ description
+ "Generated when a NETCONF server detects that a
+ confirmed-commit event has occurred. Indicates the event
+ and the current state of the confirmed-commit procedure
+ in progress.";
+ reference
+ "RFC 6241, Section 8.4";
+
+ uses common-session-parms {
+ when "confirm-event != 'timeout'";
+ }
+
+ leaf confirm-event {
+ type enumeration {
+ enum "start" {
+ description
+ "The confirmed-commit procedure has started.";
+ }
+ enum "cancel" {
+ description
+ "The confirmed-commit procedure has been canceled,
+ e.g., due to the session being terminated, or an
+ explicit <cancel-commit> operation.";
+ }
+ enum "timeout" {
+ description
+ "The confirmed-commit procedure has been canceled
+ due to the confirm-timeout interval expiring.
+ The common session parameters will not be present
+ in this sub-mode.";
+ }
+
+ enum "extend" {
+ description
+ "The confirmed-commit timeout has been extended,
+ e.g., by a new <confirmed-commit> operation.";
+ }
+ enum "complete" {
+ description
+ "The confirmed-commit procedure has been completed.";
+ }
+ }
+ mandatory true;
+ description
+ "Indicates the event that caused the notification.";
+ }
+
+ leaf timeout {
+ when
+ "../confirm-event = 'start' or ../confirm-event = 'extend'";
+ type uint32;
+ units "seconds";
+ description
+ "The configured timeout value if the event type
+ is 'start' or 'extend'. This value represents
+ the approximate number of seconds from the event
+ time when the 'timeout' event might occur.";
+ }
+ } // notification netconf-confirmed-commit
+
+}
--- /dev/null
+module nc-notifications {
+
+ namespace "urn:ietf:params:xml:ns:netmod:notification";
+ prefix "manageEvent";
+
+ import ietf-yang-types{ prefix yang; }
+ import notifications { prefix ncEvent; }
+
+ organization
+ "IETF NETCONF WG";
+
+ contact
+ "netconf@ietf.org";
+
+ description
+ "Conversion of the 'manageEvent' XSD in the NETCONF
+ Notifications RFC.";
+
+ reference
+ "RFC 5277";
+
+ revision 2008-07-14 {
+ description "RFC 5277 version.";
+ }
+
+ container netconf {
+ description "Top-level element in the notification namespace";
+
+ config false;
+
+ container streams {
+ description
+ "The list of event streams supported by the system. When
+ a query is issued, the returned set of streams is
+ determined based on user privileges.";
+
+ list stream {
+ description
+ "Stream name, description and other information.";
+ key name;
+ min-elements 1;
+
+ leaf name {
+ description
+ "The name of the event stream. If this is the default
+ NETCONF stream, this must have the value 'NETCONF'.";
+ type ncEvent:streamNameType;
+ }
+
+ leaf description {
+ description
+ "A description of the event stream, including such
+ information as the type of events that are sent over
+ this stream.";
+ type string;
+ mandatory true;
+ }
+
+ leaf replaySupport {
+ description
+ "A description of the event stream, including such
+ information as the type of events that are sent over
+ this stream.";
+ type boolean;
+ mandatory true;
+ }
+
+ leaf replayLogCreationTime {
+ description
+ "The timestamp of the creation of the log used to support
+ the replay function on this stream. Note that this might
+ be earlier then the earliest available notification in
+ the log. This object is updated if the log resets for
+ some reason. This object MUST be present if replay is
+ supported.";
+ type yang:date-and-time; // xsd:dateTime is wrong!
+ }
+ }
+ }
+ }
+
+ notification replayComplete {
+ description
+ "This notification is sent to signal the end of a replay
+ portion of a subscription.";
+ }
+
+ notification notificationComplete {
+ description
+ "This notification is sent to signal the end of a notification
+ subscription. It is sent in the case that stopTime was
+ specified during the creation of the subscription..";
+ }
+
+}
--- /dev/null
+module notifications {
+
+ namespace "urn:ietf:params:xml:ns:netconf:notification:1.0";
+ prefix "ncEvent";
+
+ import ietf-yang-types { prefix yang; }
+
+ organization
+ "IETF NETCONF WG";
+
+ contact
+ "netconf@ops.ietf.org";
+
+ description
+ "Conversion of the 'ncEvent' XSD in the
+ NETCONF Notifications RFC.";
+
+ reference
+ "RFC 5277.";
+
+ revision 2008-07-14 {
+ description "RFC 5277 version.";
+ }
+
+ typedef streamNameType {
+ description
+ "The name of an event stream.";
+ type string;
+ }
+
+ rpc create-subscription {
+ description
+ "The command to create a notification subscription. It
+ takes as argument the name of the notification stream
+ and filter. Both of those options limit the content of
+ the subscription. In addition, there are two time-related
+ parameters, startTime and stopTime, which can be used to
+ select the time interval of interest to the notification
+ replay feature.";
+
+ input {
+ leaf stream {
+ description
+ "An optional parameter that indicates which stream of events
+ is of interest. If not present, then events in the default
+ NETCONF stream will be sent.";
+ type streamNameType;
+ default "NETCONF";
+ }
+
+ anyxml filter {
+ description
+ "An optional parameter that indicates which subset of all
+ possible events is of interest. The format of this
+ parameter is the same as that of the filter parameter
+ in the NETCONF protocol operations. If not present,
+ all events not precluded by other parameters will
+ be sent.";
+ }
+
+ leaf startTime {
+ description
+ "A parameter used to trigger the replay feature and
+ indicates that the replay should start at the time
+ specified. If start time is not present, this is not a
+ replay subscription.";
+ type yang:date-and-time;
+ }
+
+ leaf stopTime {
+ // must ". >= ../startTime";
+ description
+ "An optional parameter used with the optional replay
+ feature to indicate the newest notifications of
+ interest. If stop time is not present, the notifications
+ will continue until the subscription is terminated.
+ Must be used with startTime.";
+ type yang:date-and-time;
+ }
+ }
+ }
+}
+