/**
*
*/
+@Deprecated
public final class NeverReconnectStrategyFactoryModule extends org.opendaylight.controller.config.yang.protocol.framework.AbstractNeverReconnectStrategyFactoryModule
{
/**
*
*/
+@Deprecated
public class NeverReconnectStrategyFactoryModuleFactory extends org.opendaylight.controller.config.yang.protocol.framework.AbstractNeverReconnectStrategyFactoryModuleFactory
{
/**
*
*/
+@Deprecated
public final class ReconnectImmediatelyStrategyFactoryModule extends org.opendaylight.controller.config.yang.protocol.framework.AbstractReconnectImmediatelyStrategyFactoryModule
{
/**
*
*/
+@Deprecated
public class ReconnectImmediatelyStrategyFactoryModuleFactory extends org.opendaylight.controller.config.yang.protocol.framework.AbstractReconnectImmediatelyStrategyFactoryModuleFactory
{
/**
*
*/
+@Deprecated
public final class TimedReconnectStrategyFactoryModule extends org.opendaylight.controller.config.yang.protocol.framework.AbstractTimedReconnectStrategyFactoryModule
{
/**
*
*/
+@Deprecated
public class TimedReconnectStrategyFactoryModuleFactory extends org.opendaylight.controller.config.yang.protocol.framework.AbstractTimedReconnectStrategyFactoryModuleFactory
{
* Dispatcher class for creating servers and clients. The idea is to first create servers and clients and the run the
* start method that will handle sockets in different thread.
*/
+@Deprecated
public abstract class AbstractDispatcher<S extends ProtocolSession<?>, L extends SessionListener<?, ?, ?>> implements Closeable {
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@Deprecated
public abstract class AbstractProtocolSession<M> extends SimpleChannelInboundHandler<Object> implements ProtocolSession<M> {
private static final Logger LOG = LoggerFactory.getLogger(AbstractProtocolSession.class);
* @param <M> Protocol message type
* @param <S> Protocol session type, has to extend ProtocolSession<M>
*/
+@Deprecated
public abstract class AbstractSessionNegotiator<M, S extends AbstractProtocolSession<?>> extends ChannelInboundHandlerAdapter implements SessionNegotiator<S> {
private final Logger LOG = LoggerFactory.getLogger(AbstractSessionNegotiator.class);
private final Promise<S> promise;
* Utility ReconnectStrategy singleton, which will cause the reconnect process
* to always fail.
*/
+@Deprecated
@ThreadSafe
public final class NeverReconnectStrategy implements ReconnectStrategy {
private final EventExecutor executor;
*
* This interface should be implemented by a final class representing a protocol specific session.
*/
+@Deprecated
public interface ProtocolSession<T> extends Closeable {
@Override
void close();
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@Deprecated
@ThreadSafe
final class ProtocolSessionPromise<S extends ProtocolSession<?>> extends DefaultPromise<S> {
private static final Logger LOG = LoggerFactory.getLogger(ProtocolSessionPromise.class);
* Utility ReconnectStrategy singleton, which will cause the reconnect process
* to immediately schedule a reconnection attempt.
*/
+@Deprecated
@ThreadSafe
public final class ReconnectImmediatelyStrategy implements ReconnectStrategy {
private static final Logger LOG = LoggerFactory.getLogger(ReconnectImmediatelyStrategy.class);
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@Deprecated
final class ReconnectPromise<S extends ProtocolSession<?>, L extends SessionListener<?, ?, ?>> extends DefaultPromise<Void> {
private static final Logger LOG = LoggerFactory.getLogger(ReconnectPromise.class);
* not attempt any more connection attempts and should abort the reconnection
* process.
*/
+@Deprecated
public interface ReconnectStrategy {
/**
* Query the strategy for the connect timeout.
* primarily useful for allowing injection of a specific type of strategy for
* on-demand use, pretty much like you would use a ThreadFactory.
*/
+@Deprecated
public interface ReconnectStrategyFactory {
/**
* Create a new ReconnectStrategy.
* implemented by a protocol specific abstract class, that is extended by
* a final class that implements the methods.
*/
+@Deprecated
public interface SessionListener<M, S extends ProtocolSession<?>, T extends TerminationReason> extends EventListener {
/**
* Fired when the session was established successfully.
* implemented by a protocol specific abstract class, that is extended by
* a final class that implements the methods.
*/
+@Deprecated
public interface SessionListenerFactory<T extends SessionListener<?, ?, ?>> {
/**
* Returns one session listener
*
* @param <T> Protocol session type.
*/
+@Deprecated
public interface SessionNegotiator<T extends ProtocolSession<?>> extends ChannelInboundHandler {
}
*
* @param <S> session type
*/
+@Deprecated
public interface SessionNegotiatorFactory<M, S extends ProtocolSession<?>, L extends SessionListener<?, ?, ?>> {
/**
* Create a new negotiator attached to a channel, which will notify
/**
* Marker interface for grouping session termination cause.
*/
+@Deprecated
public interface TerminationReason {
/**
*
* Both these caps can be combined, with the strategy giving up as soon as the first one is reached.
*/
+@Deprecated
@ThreadSafe
public final class TimedReconnectStrategy implements ReconnectStrategy {
private static final Logger LOG = LoggerFactory.getLogger(TimedReconnectStrategy.class);
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-parent</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>message-bus-api</artifactId>
+ <name>${project.artifactId}</name>
+
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-inventory</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>yang-ext</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-topology</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${project.build.directory}/generated-sources/sal</outputBaseDir>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${project.build.directory}/generated-sources/config</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>1.8</version>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${project.build.directory}/generated-sources/config</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Export-Package>org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.*</Export-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+module event-aggregator {
+ // FIXME: this module needs to be split up to concepts and API
+ // as the concepts are shared with the other model in this
+ // package.
+ yang-version 1;
+ namespace "urn:cisco:params:xml:ns:yang:messagebus:eventaggregator";
+ prefix "eventaggregator";
+
+ organization "Cisco Systems, Inc.";
+ contact "Robert Gallas";
+
+ description
+ "Module implementing message but RPC.
+
+ Copyright (c)2014 Cisco Systems, Inc. All rights reserved.
+
+ This program and the accompanying materials are made available
+ under the terms of the Eclipse Public License v1.0 which
+ accompanies this distribution, and is available at
+ http://www.eclipse.org/legal/epl-v10.html";
+
+ revision "2014-12-02" {
+ description "Initial revision";
+ }
+
+ typedef pattern {
+ type string {
+ length 1..max;
+ }
+
+ // FIXME: make this a regular expression
+ description "A match pattern. Specifically this is a wildcard pattern.";
+ }
+
+ typedef notification-pattern {
+ type pattern;
+ description
+ "Pattern for matching candidate notification types. This pattern is to be
+ applied against the concatenation of the namespace of the module which
+ defines that particular notification, followed by a single colon, and
+ then followed by notification identifier, as supplied in the argument to
+ the notification statement.";
+ }
+
+ typedef topic-id {
+ type string {
+ length 1..max;
+ }
+ description
+ "A topic identifier. It uniquely defines a topic as seen by the the user
+ of this model's RPCs";
+ }
+
+ // FIXME: we would really like to share instances here, but that requires some sort
+ // of sane reference counting. The reason for sharing is the data path part
+ // of notification delivery -- multiple creators of topics can still share
+ // a single data path.
+ rpc create-topic {
+ description
+ "Create a new topic. A topic is an aggregation of several notification
+ types from a set of nodes. Each successful invocation results in a unique
+ topic being created. The caller is responsible for removing the topic
+ once it is no longer needed.";
+
+ input {
+ leaf notification-pattern {
+ type notification-pattern;
+ mandatory true;
+ description
+ "Pattern matching notification which should be forwarded into this
+ topic.";
+ }
+
+ leaf node-id-pattern {
+ type pattern;
+ mandatory true;
+ description
+ "Pattern for matching candidate event source nodes when looking
+ for contributors to the topic. The pattern will be applied against
+ /network-topology/topology/node/node-id";
+ }
+ }
+
+ output {
+ leaf topic-id {
+ type topic-id;
+ mandatory true;
+ }
+ }
+ }
+
+ rpc destroy-topic {
+ description
+ "Destroy a topic. No further messages will be delivered to it.";
+
+ input {
+ leaf topic-id {
+ type topic-id;
+ mandatory true;
+ }
+ }
+ }
+
+ notification topic-notification {
+ description
+ "Notification of an event occuring on a particular node. This notification
+ acts as an encapsulation for the event being delivered.";
+
+ leaf topic-id {
+ type topic-id;
+ mandatory true;
+ description
+ "Topic to which this event is being delivered.";
+ }
+
+ leaf node-id {
+ // FIXME: should be topology node ID
+ type string;
+ mandatory true;
+ description
+ "Node ID of the node which generated the event.";
+ }
+
+ anyxml payload {
+ mandatory true;
+ description
+ "Encapsulated notification. The format is the XML representation of
+ a notification according to RFC6020 section 7.14.2.";
+ }
+ }
+}
--- /dev/null
+module event-source {
+ yang-version 1;
+ namespace "urn:cisco:params:xml:ns:yang:messagebus:eventsource";
+ prefix "eventsource";
+
+ import event-aggregator { prefix aggr; }
+ import network-topology { prefix nt; revision-date "2013-10-21"; }
+ import opendaylight-inventory {prefix inv; revision-date "2013-08-19"; }
+ import yang-ext {prefix ext; revision-date "2013-07-09"; }
+
+ organization "Cisco Systems, Inc.";
+ contact "Robert Gallas";
+
+ description
+ "Base model for a topology where individual nodes can produce events.
+
+ Module implementing event source topology and encapped notification.
+
+ Copyright (c)2014 Cisco Systems, Inc. All rights reserved.
+
+ This program and the accompanying materials are made available
+ under the terms of the Eclipse Public License v1.0 which
+ accompanies this distribution, and is available at
+ http://www.eclipse.org/legal/epl-v10.html";
+
+ revision "2014-12-02" {
+ description "first revision";
+ }
+
+ // FIXME: expand this
+ typedef join-topic-status {
+ type enumeration {
+ enum up;
+ enum down;
+ }
+ description "Object status";
+ }
+
+ // FIXME: migrate to topology
+ typedef node-ref {
+ type leafref {
+ path "/inv:nodes/inv:node/inv:id";
+ }
+ }
+
+ grouping topology-event-source-type {
+ container topology-event-source {
+ presence "indicates an event source-aware topology";
+ }
+ }
+
+ rpc join-topic {
+ input {
+ leaf node {
+ ext:context-reference "inv:node-context";
+ type "instance-identifier";
+ }
+ leaf topic-id {
+ type aggr:topic-id;
+ description "in current implementation notification-pattern is defined by topic-id.
+ By persisting topic definition we could omit notification-pattern";
+ }
+ leaf notification-pattern {
+ type aggr:notification-pattern;
+ }
+ }
+
+ output {
+ leaf status {
+ type join-topic-status;
+ }
+ }
+ }
+
+ augment "/nt:network-topology/nt:topology/nt:topology-types" {
+ uses topology-event-source-type;
+ }
+
+ augment "/nt:network-topology/nt:topology/nt:node" {
+ when "../../nt:topology-types/topology-event-source";
+ leaf event-source-node {
+ type node-ref;
+ }
+ }
+}
<!-- Clustering -->
<module>sal-remoterpc-connector</module>
+
+ <!-- Message Bus -->
+ <module>messagebus-api</module>
</modules>
<build>
}
protected int adjustedIndex(long logEntryIndex) {
- if(snapshotIndex < 0){
+ if (snapshotIndex < 0) {
return (int) logEntryIndex;
}
return (int) (logEntryIndex - (snapshotIndex + 1));
return journal.size();
}
+ @Override
+ public int dataSize() {
+ return dataSize;
+ }
+
@Override
public boolean isPresent(long logEntryIndex) {
if (logEntryIndex > lastIndex()) {
previousSnapshotIndex = -1;
previousSnapshotTerm = -1;
dataSize = 0;
+ // need to recalc the datasize based on the entries left after precommit.
+ for(ReplicatedLogEntry logEntry : journal) {
+ dataSize += logEntry.size();
+ }
+
}
@Override
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
-import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
import org.opendaylight.controller.cluster.raft.behaviors.AbstractRaftActorBehavior;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
handleCaptureSnapshotReply(((CaptureSnapshotReply) message).getSnapshot());
} else {
- if (!(message instanceof AppendEntriesMessages.AppendEntries)
- && !(message instanceof AppendEntriesReply) && !(message instanceof SendHeartBeat)) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: onReceiveCommand: message: {}", persistenceId(), message.getClass());
- }
- }
-
RaftActorBehavior oldBehavior = currentBehavior;
currentBehavior = currentBehavior.handleMessage(getSender(), message);
/**
* This method is called during recovery to reconstruct the state of the actor.
*
- * @param snapshot A snapshot of the state of the actor
+ * @param snapshotBytes A snapshot of the state of the actor
*/
protected abstract void applyRecoverySnapshot(byte[] snapshotBytes);
LOG.info("{}: Persisting of snapshot done:{}", persistenceId(), sn.getLogMessage());
- //be greedy and remove entries from in-mem journal which are in the snapshot
- // and update snapshotIndex and snapshotTerm without waiting for the success,
+ long dataThreshold = Runtime.getRuntime().totalMemory() *
+ getRaftActorContext().getConfigParams().getSnapshotDataThresholdPercentage() / 100;
+ if (context.getReplicatedLog().dataSize() > dataThreshold) {
+ // if memory is less, clear the log based on lastApplied.
+ // this could/should only happen if one of the followers is down
+ // as normally we keep removing from the log when its replicated to all.
+ context.getReplicatedLog().snapshotPreCommit(captureSnapshot.getLastAppliedIndex(),
+ captureSnapshot.getLastAppliedTerm());
- context.getReplicatedLog().snapshotPreCommit(
- captureSnapshot.getLastAppliedIndex(),
- captureSnapshot.getLastAppliedTerm());
+ } else {
+ // clear the log based on replicatedToAllIndex
+ context.getReplicatedLog().snapshotPreCommit(captureSnapshot.getReplicatedToAllIndex(),
+ captureSnapshot.getReplicatedToAllTerm());
+ }
+ getCurrentBehavior().setReplicatedToAllIndex(captureSnapshot.getReplicatedToAllIndex());
LOG.info("{}: Removed in-memory snapshotted entries, adjusted snaphsotIndex:{} " +
"and term:{}", persistenceId(), captureSnapshot.getLastAppliedIndex(),
// FIXME: Maybe this should be done after the command is saved
journal.subList(adjustedIndex , journal.size()).clear();
- persistence().persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>(){
+ persistence().persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>() {
- @Override public void apply(DeleteEntries param)
- throws Exception {
+ @Override
+ public void apply(DeleteEntries param)
+ throws Exception {
//FIXME : Doing nothing for now
dataSize = 0;
- for(ReplicatedLogEntry entry : journal){
+ for (ReplicatedLogEntry entry : journal) {
dataSize += entry.size();
}
}
appendAndPersist(replicatedLogEntry, null);
}
- @Override
- public int dataSize() {
- return dataSize;
- }
-
public void appendAndPersist(
final ReplicatedLogEntry replicatedLogEntry,
final Procedure<ReplicatedLogEntry> callback) {
long dataSizeForCheck = dataSize;
dataSizeSinceLastSnapshot += logEntrySize;
- long journalSize = lastIndex()+1;
+ long journalSize = lastIndex() + 1;
if(!hasFollowers()) {
// When we do not have followers we do not maintain an in-memory log
}
// send a CaptureSnapshot to self to make the expensive operation async.
- getSelf().tell(new CaptureSnapshot(
- lastIndex(), lastTerm(), lastAppliedIndex, lastAppliedTerm),
+ long replicatedToAllIndex = getCurrentBehavior().getReplicatedToAllIndex();
+ ReplicatedLogEntry replicatedToAllEntry = context.getReplicatedLog().get(replicatedToAllIndex);
+ getSelf().tell(new CaptureSnapshot(lastIndex(), lastTerm(), lastAppliedIndex, lastAppliedTerm,
+ (replicatedToAllEntry != null ? replicatedToAllEntry.getIndex() : -1),
+ (replicatedToAllEntry != null ? replicatedToAllEntry.getTerm() : -1)),
null);
context.setSnapshotCaptureInitiated(true);
}
- if(callback != null){
+ if (callback != null){
callback.apply(replicatedLogEntry);
}
}
* sets snapshot term
* @param snapshotTerm
*/
- public void setSnapshotTerm(long snapshotTerm);
+ void setSnapshotTerm(long snapshotTerm);
/**
* Clears the journal entries with startIndex(inclusive) and endIndex (exclusive)
* @param startIndex
* @param endIndex
*/
- public void clear(int startIndex, int endIndex);
+ void clear(int startIndex, int endIndex);
/**
* Handles all the bookkeeping in order to perform a rollback in the
* @param snapshotCapturedIndex
* @param snapshotCapturedTerm
*/
- public void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm);
+ void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm);
/**
* Sets the Replicated log to state after snapshot success.
*/
- public void snapshotCommit();
+ void snapshotCommit();
/**
* Restores the replicated log to a state in the event of a save snapshot failure
*/
- public void snapshotRollback();
+ void snapshotRollback();
/**
* Size of the data in the log (in bytes)
*/
- public int dataSize();
+ int dataSize();
+
}
private long lastIndex;
private long lastTerm;
private boolean installSnapshotInitiated;
+ private long replicatedToAllIndex;
+ private long replicatedToAllTerm;
public CaptureSnapshot(long lastIndex, long lastTerm,
- long lastAppliedIndex, long lastAppliedTerm) {
- this(lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm, false);
+ long lastAppliedIndex, long lastAppliedTerm, long replicatedToAllIndex, long replicatedToAllTerm) {
+ this(lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm, replicatedToAllIndex , replicatedToAllTerm, false);
}
public CaptureSnapshot(long lastIndex, long lastTerm,long lastAppliedIndex,
- long lastAppliedTerm, boolean installSnapshotInitiated) {
+ long lastAppliedTerm, long replicatedToAllIndex, long replicatedToAllTerm, boolean installSnapshotInitiated) {
this.lastIndex = lastIndex;
this.lastTerm = lastTerm;
this.lastAppliedIndex = lastAppliedIndex;
this.lastAppliedTerm = lastAppliedTerm;
this.installSnapshotInitiated = installSnapshotInitiated;
+ this.replicatedToAllIndex = replicatedToAllIndex;
+ this.replicatedToAllTerm = replicatedToAllTerm;
}
public long getLastAppliedIndex() {
public boolean isInstallSnapshotInitiated() {
return installSnapshotInitiated;
}
+
+ public long getReplicatedToAllIndex() {
+ return replicatedToAllIndex;
+ }
+
+ public long getReplicatedToAllTerm() {
+ return replicatedToAllTerm;
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.base.messages;
-
-/**
- * Internal message by Leader to initiate an install snapshot
- */
-public class InitiateInstallSnapshot {
-}
-
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
private Optional<ByteString> snapshot;
- private long replicatedToAllIndex = -1;
-
public AbstractLeader(RaftActorContext context) {
- super(context);
+ super(context, RaftState.Leader);
final Builder<String, FollowerLogInformation> ftlBuilder = ImmutableMap.builder();
for (String followerId : context.getPeerAddresses().keySet()) {
leaderId = context.getId();
- LOG.debug("{}: Election: Leader has following peers: {}", context.getId(), getFollowerIds());
+ LOG.debug("{}: Election: Leader has following peers: {}", logName(), getFollowerIds());
minReplicationCount = getMajorityVoteCount(getFollowerIds().size());
// Upon election: send initial empty AppendEntries RPCs
// (heartbeat) to each server; repeat during idle periods to
// prevent election timeouts (§5.2)
- sendAppendEntries(0);
+ sendAppendEntries(0, false);
}
/**
return followerToLog.keySet();
}
- private Optional<ByteString> getSnapshot() {
- return snapshot;
- }
-
@VisibleForTesting
void setSnapshot(Optional<ByteString> snapshot) {
this.snapshot = snapshot;
protected RaftActorBehavior handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: handleAppendEntries: {}", context.getId(), appendEntries);
- }
+ LOG.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
return this;
}
protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
AppendEntriesReply appendEntriesReply) {
- if(! appendEntriesReply.isSuccess()) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: handleAppendEntriesReply: {}", context.getId(), appendEntriesReply);
- }
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("{}: handleAppendEntriesReply: {}", logName(), appendEntriesReply);
+ } else if(LOG.isDebugEnabled() && !appendEntriesReply.isSuccess()) {
+ LOG.debug("{}: handleAppendEntriesReply: {}", logName(), appendEntriesReply);
}
// Update the FollowerLogInformation
followerToLog.get(followerId);
if(followerLogInformation == null){
- LOG.error("{}: handleAppendEntriesReply - unknown follower {}", context.getId(), followerId);
+ LOG.error("{}: handleAppendEntriesReply - unknown follower {}", logName(), followerId);
return this;
}
// Apply the change to the state machine
if (context.getCommitIndex() > context.getLastApplied()) {
+ LOG.debug("{}: handleAppendEntriesReply: applying to log - commitIndex: {}, lastAppliedIndex: {}",
+ logName(), context.getCommitIndex(), context.getLastApplied());
+
applyLogToStateMachine(context.getCommitIndex());
}
}
//Send the next log entry immediately, if possible, no need to wait for heartbeat to trigger that event
- sendUpdatesToFollower(followerId, followerLogInformation, false);
+ sendUpdatesToFollower(followerId, followerLogInformation, false, false);
return this;
}
private void purgeInMemoryLog() {
- //find the lowest index across followers which has been replicated to all. -1 if there are no followers.
+ //find the lowest index across followers which has been replicated to all.
+ // lastApplied if there are no followers, so that we keep clearing the log for single-node
// we would delete the in-mem log from that index on, in-order to minimize mem usage
// we would also share this info thru AE with the followers so that they can delete their log entries as well.
- long minReplicatedToAllIndex = followerToLog.isEmpty() ? -1 : Long.MAX_VALUE;
+ long minReplicatedToAllIndex = followerToLog.isEmpty() ? context.getLastApplied() : Long.MAX_VALUE;
for (FollowerLogInformation info : followerToLog.values()) {
minReplicatedToAllIndex = Math.min(minReplicatedToAllIndex, info.getMatchIndex());
}
- replicatedToAllIndex = fakeSnapshot(minReplicatedToAllIndex, replicatedToAllIndex);
+ super.performSnapshotWithoutCapture(minReplicatedToAllIndex);
}
@Override
return this;
}
- @Override
- public RaftState state() {
- return RaftState.Leader;
- }
-
@Override
public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
Preconditions.checkNotNull(sender, "sender should not be null");
// set currentTerm = T, convert to follower (§5.1)
// This applies to all RPC messages and responses
if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
- LOG.debug("{}: Term {} in \"{}\" message is greater than leader's term {}", context.getId(),
- rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
+ LOG.debug("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
+ logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
sendHeartBeat();
return this;
- } else if(message instanceof InitiateInstallSnapshot) {
- installSnapshotIfNeeded();
-
} else if(message instanceof SendInstallSnapshot) {
// received from RaftActor
setSnapshot(Optional.of(((SendInstallSnapshot) message).getSnapshot()));
}
private void handleInstallSnapshotReply(InstallSnapshotReply reply) {
+ LOG.debug("{}: handleInstallSnapshotReply: {}", logName(), reply);
+
String followerId = reply.getFollowerId();
FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
if (followerToSnapshot == null) {
LOG.error("{}: FollowerId {} in InstallSnapshotReply not known to Leader",
- context.getId(), followerId);
+ logName(), followerId);
return;
}
//this was the last chunk reply
if(LOG.isDebugEnabled()) {
LOG.debug("{}: InstallSnapshotReply received, " +
- "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
- context.getId(), reply.getChunkIndex(), followerId,
+ "last chunk received, Chunk: {}. Follower: {} Setting nextIndex: {}",
+ logName(), reply.getChunkIndex(), followerId,
context.getReplicatedLog().getSnapshotIndex() + 1
);
}
context.getReplicatedLog().getSnapshotIndex() + 1);
mapFollowerToSnapshot.remove(followerId);
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: followerToLog.get(followerId).getNextIndex()=" +
- context.getId(), followerToLog.get(followerId).getNextIndex());
- }
+ LOG.debug("{}: follower: {}, matchIndex set to {}, nextIndex set to {}",
+ logName(), followerId, followerLogInformation.getMatchIndex(),
+ followerLogInformation.getNextIndex());
if (mapFollowerToSnapshot.isEmpty()) {
// once there are no pending followers receiving snapshots
}
} else {
LOG.info("{}: InstallSnapshotReply received sending snapshot chunk failed, Will retry, Chunk: {}",
- context.getId(), reply.getChunkIndex());
+ logName(), reply.getChunkIndex());
followerToSnapshot.markSendStatus(false);
}
} else {
LOG.error("{}: Chunk index {} in InstallSnapshotReply from follower {} does not match expected index {}",
- context.getId(), reply.getChunkIndex(), followerId,
+ logName(), reply.getChunkIndex(), followerId,
followerToSnapshot.getChunkIndex());
if(reply.getChunkIndex() == INVALID_CHUNK_INDEX){
private void replicate(Replicate replicate) {
long logIndex = replicate.getReplicatedLogEntry().getIndex();
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Replicate message {}", context.getId(), logIndex);
- }
+ LOG.debug("{}: Replicate message: identifier: {}, logIndex: {}", logName(),
+ replicate.getIdentifier(), logIndex);
// Create a tracker entry we will use this later to notify the
// client actor
context.setCommitIndex(logIndex);
applyLogToStateMachine(logIndex);
} else {
- sendAppendEntries(0);
+ sendAppendEntries(0, false);
}
}
- private void sendAppendEntries(long timeSinceLastActivityInterval) {
+ private void sendAppendEntries(long timeSinceLastActivityInterval, boolean isHeartbeat) {
// Send an AppendEntries to all followers
for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
final String followerId = e.getKey();
// This checks helps not to send a repeat message to the follower
if(!followerLogInformation.isFollowerActive() ||
followerLogInformation.timeSinceLastActivity() >= timeSinceLastActivityInterval) {
- sendUpdatesToFollower(followerId, followerLogInformation, true);
+ sendUpdatesToFollower(followerId, followerLogInformation, true, isHeartbeat);
}
}
}
*/
private void sendUpdatesToFollower(String followerId, FollowerLogInformation followerLogInformation,
- boolean sendHeartbeat) {
+ boolean sendHeartbeat, boolean isHeartbeat) {
ActorSelection followerActor = context.getPeerActorSelection(followerId);
if (followerActor != null) {
} else {
long leaderLastIndex = context.getReplicatedLog().lastIndex();
long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
- if (isFollowerActive &&
- context.getReplicatedLog().isPresent(followerNextIndex)) {
+
+ if(!isHeartbeat || LOG.isTraceEnabled()) {
+ LOG.debug("{}: Checking sendAppendEntries for follower {}, leaderLastIndex: {}, leaderSnapShotIndex: {}",
+ logName(), followerId, leaderLastIndex, leaderSnapShotIndex);
+ }
+
+ if (isFollowerActive && context.getReplicatedLog().isPresent(followerNextIndex)) {
+
+ LOG.debug("{}: sendAppendEntries: {} is present for follower {}", logName(),
+ followerNextIndex, followerId);
+
// FIXME : Sending one entry at a time
final List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
sendAppendEntriesToFollower(followerActor, followerNextIndex, entries, followerId);
} else if (isFollowerActive && followerNextIndex >= 0 &&
- leaderLastIndex >= followerNextIndex) {
+ leaderLastIndex > followerNextIndex && !context.isSnapshotCaptureInitiated()) {
// if the followers next index is not present in the leaders log, and
// if the follower is just not starting and if leader's index is more than followers index
// then snapshot should be sent
if (LOG.isDebugEnabled()) {
- LOG.debug("InitiateInstallSnapshot to follower:{}," +
- "follower-nextIndex:{}, leader-snapshot-index:{}, " +
- "leader-last-index:{}", followerId,
- followerNextIndex, leaderSnapShotIndex, leaderLastIndex
- );
+ LOG.debug(String.format("%s: InitiateInstallSnapshot to follower: %s," +
+ "follower-nextIndex: %d, leader-snapshot-index: %d, " +
+ "leader-last-index: %d", logName(), followerId,
+ followerNextIndex, leaderSnapShotIndex, leaderLastIndex));
}
- actor().tell(new InitiateInstallSnapshot(), actor());
// Send heartbeat to follower whenever install snapshot is initiated.
sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
Collections.<ReplicatedLogEntry>emptyList(), followerId);
+ initiateCaptureSnapshot(followerId, followerNextIndex);
+
} else if(sendHeartbeat) {
//we send an AppendEntries, even if the follower is inactive
// in-order to update the followers timestamp, in case it becomes active again
AppendEntries appendEntries = new AppendEntries(currentTerm(), context.getId(),
prevLogIndex(followerNextIndex),
prevLogTerm(followerNextIndex), entries,
- context.getCommitIndex(), replicatedToAllIndex);
+ context.getCommitIndex(), super.getReplicatedToAllIndex());
- if(!entries.isEmpty()) {
- LOG.debug("{}: Sending AppendEntries to follower {}: {}", context.getId(), followerId,
+ if(!entries.isEmpty() || LOG.isTraceEnabled()) {
+ LOG.debug("{}: Sending AppendEntries to follower {}: {}", logName(), followerId,
appendEntries);
}
}
/**
- * An installSnapshot is scheduled at a interval that is a multiple of
- * a HEARTBEAT_INTERVAL. This is to avoid the need to check for installing
- * snapshots at every heartbeat.
- *
* Install Snapshot works as follows
- * 1. Leader sends a InitiateInstallSnapshot message to self
- * 2. Leader then initiates the capture snapshot by sending a CaptureSnapshot message to actor
- * 3. RaftActor on receipt of the CaptureSnapshotReply (from Shard), stores the received snapshot in the replicated log
+ * 1. Leader initiates the capture snapshot by sending a CaptureSnapshot message to actor
+ * 2. RaftActor on receipt of the CaptureSnapshotReply (from Shard), stores the received snapshot in the replicated log
* and makes a call to Leader's handleMessage , with SendInstallSnapshot message.
- * 4. Leader , picks the snapshot from im-mem ReplicatedLog and sends it in chunks to the Follower
- * 5. On complete, Follower sends back a InstallSnapshotReply.
- * 6. On receipt of the InstallSnapshotReply for the last chunk, Leader marks the install complete for that follower
+ * 3. Leader , picks the snapshot from im-mem ReplicatedLog and sends it in chunks to the Follower
+ * 4. On complete, Follower sends back a InstallSnapshotReply.
+ * 5. On receipt of the InstallSnapshotReply for the last chunk, Leader marks the install complete for that follower
* and replenishes the memory by deleting the snapshot in Replicated log.
- *
+ * 6. If another follower requires a snapshot and a snapshot has been collected (via CaptureSnapshotReply)
+ * then send the existing snapshot in chunks to the follower.
+ * @param followerId
+ * @param followerNextIndex
*/
- private void installSnapshotIfNeeded() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: installSnapshotIfNeeded, followers {}", context.getId(), followerToLog.keySet());
- }
-
- for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
- final ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
-
- if (followerActor != null) {
- long nextIndex = e.getValue().getNextIndex();
-
- if (!context.getReplicatedLog().isPresent(nextIndex) &&
- context.getReplicatedLog().isInSnapshot(nextIndex)) {
- LOG.info("{}: {} follower needs a snapshot install", context.getId(), e.getKey());
- if (snapshot.isPresent()) {
- // if a snapshot is present in the memory, most likely another install is in progress
- // no need to capture snapshot
- sendSnapshotChunk(followerActor, e.getKey());
-
- } else if (!context.isSnapshotCaptureInitiated()) {
- initiateCaptureSnapshot();
- //we just need 1 follower who would need snapshot to be installed.
- // when we have the snapshot captured, we would again check (in SendInstallSnapshot)
- // who needs an install and send to all who need
- break;
- }
+ private void initiateCaptureSnapshot(String followerId, long followerNextIndex) {
+ if (!context.getReplicatedLog().isPresent(followerNextIndex) &&
+ context.getReplicatedLog().isInSnapshot(followerNextIndex)) {
+ if (snapshot.isPresent()) {
+ // if a snapshot is present in the memory, most likely another install is in progress
+ // no need to capture snapshot.
+ // This could happen if another follower needs an install when one is going on.
+ final ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ sendSnapshotChunk(followerActor, followerId);
+
+ } else if (!context.isSnapshotCaptureInitiated()) {
+
+ LOG.info("{}: Initiating Snapshot Capture to Install Snapshot, Leader:{}", logName(), getLeaderId());
+ ReplicatedLogEntry lastAppliedEntry = context.getReplicatedLog().get(context.getLastApplied());
+ long lastAppliedIndex = -1;
+ long lastAppliedTerm = -1;
+
+ if (lastAppliedEntry != null) {
+ lastAppliedIndex = lastAppliedEntry.getIndex();
+ lastAppliedTerm = lastAppliedEntry.getTerm();
+ } else if (context.getReplicatedLog().getSnapshotIndex() > -1) {
+ lastAppliedIndex = context.getReplicatedLog().getSnapshotIndex();
+ lastAppliedTerm = context.getReplicatedLog().getSnapshotTerm();
}
+
+ boolean isInstallSnapshotInitiated = true;
+ long replicatedToAllIndex = super.getReplicatedToAllIndex();
+ ReplicatedLogEntry replicatedToAllEntry = context.getReplicatedLog().get(replicatedToAllIndex);
+ actor().tell(new CaptureSnapshot(lastIndex(), lastTerm(), lastAppliedIndex, lastAppliedTerm,
+ (replicatedToAllEntry != null ? replicatedToAllEntry.getIndex() : -1),
+ (replicatedToAllEntry != null ? replicatedToAllEntry.getTerm() : -1),
+ isInstallSnapshotInitiated), actor());
+ context.setSnapshotCaptureInitiated(true);
}
}
}
- // on every install snapshot, we try to capture the snapshot.
- // Once a capture is going on, another one issued will get ignored by RaftActor.
- private void initiateCaptureSnapshot() {
- LOG.info("{}: Initiating Snapshot Capture to Install Snapshot, Leader:{}", context.getId(), getLeaderId());
- ReplicatedLogEntry lastAppliedEntry = context.getReplicatedLog().get(context.getLastApplied());
- long lastAppliedIndex = -1;
- long lastAppliedTerm = -1;
-
- if (lastAppliedEntry != null) {
- lastAppliedIndex = lastAppliedEntry.getIndex();
- lastAppliedTerm = lastAppliedEntry.getTerm();
- } else if (context.getReplicatedLog().getSnapshotIndex() > -1) {
- lastAppliedIndex = context.getReplicatedLog().getSnapshotIndex();
- lastAppliedTerm = context.getReplicatedLog().getSnapshotTerm();
- }
-
- boolean isInstallSnapshotInitiated = true;
- actor().tell(new CaptureSnapshot(lastIndex(), lastTerm(),
- lastAppliedIndex, lastAppliedTerm, isInstallSnapshotInitiated),
- actor());
- context.setSnapshotCaptureInitiated(true);
- }
-
private void sendInstallSnapshot() {
+ LOG.debug("{}: sendInstallSnapshot", logName());
for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
context.getReplicatedLog().getSnapshotIndex(),
context.getReplicatedLog().getSnapshotTerm(),
nextSnapshotChunk,
- followerToSnapshot.incrementChunkIndex(),
- followerToSnapshot.getTotalChunks(),
+ followerToSnapshot.incrementChunkIndex(),
+ followerToSnapshot.getTotalChunks(),
Optional.of(followerToSnapshot.getLastChunkHashCode())
).toSerializable(),
actor()
);
LOG.info("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}",
- context.getId(), followerActor.path(),
+ logName(), followerActor.path(),
followerToSnapshot.getChunkIndex(),
followerToSnapshot.getTotalChunks());
}
} catch (IOException e) {
- LOG.error("{}: InstallSnapshot failed for Leader.", context.getId(), e);
+ LOG.error("{}: InstallSnapshot failed for Leader.", logName(), e);
}
}
mapFollowerToSnapshot.put(followerId, followerToSnapshot);
}
ByteString nextChunk = followerToSnapshot.getNextChunk();
- if (LOG.isDebugEnabled()) {
- LOG.debug("{}: Leader's snapshot nextChunk size:{}", context.getId(), nextChunk.size());
- }
+
+ LOG.debug("{}: next snapshot chunk size for follower {}: {}", logName(), followerId, nextChunk.size());
+
return nextChunk;
}
private void sendHeartBeat() {
if (!followerToLog.isEmpty()) {
- sendAppendEntries(context.getConfigParams().getHeartBeatInterval().toMillis());
+ LOG.trace("{}: Sending heartbeat", logName());
+ sendAppendEntries(context.getConfigParams().getHeartBeatInterval().toMillis(), true);
}
}
((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
if(LOG.isDebugEnabled()) {
LOG.debug("{}: Snapshot {} bytes, total chunks to send:{}",
- context.getId(), size, totalChunks);
+ logName(), size, totalChunks);
}
replyReceivedForOffset = -1;
chunkIndex = AbstractLeader.FIRST_CHUNK_INDEX;
}
}
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Next chunk: length={}, offset={},size={}", context.getId(),
+
+ LOG.debug("{}: Next chunk: length={}, offset={},size={}", logName(),
snapshotLength, start, size);
- }
+
ByteString substring = getSnapshotBytes().substring(start, start + size);
nextChunkHashCode = substring.hashCode();
return substring;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.SerializationUtils;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
*/
protected String leaderId = null;
+ private long replicatedToAllIndex = -1;
- protected AbstractRaftActorBehavior(RaftActorContext context) {
+ private final String logName;
+
+ private final RaftState state;
+
+ protected AbstractRaftActorBehavior(RaftActorContext context, RaftState state) {
this.context = context;
+ this.state = state;
this.LOG = context.getLogger();
+
+ logName = String.format("%s (%s)", context.getId(), state);
+ }
+
+ @Override
+ public RaftState state() {
+ return state;
+ }
+
+ public String logName() {
+ return logName;
+ }
+
+ @Override
+ public void setReplicatedToAllIndex(long replicatedToAllIndex) {
+ this.replicatedToAllIndex = replicatedToAllIndex;
+ }
+
+ @Override
+ public long getReplicatedToAllIndex() {
+ return replicatedToAllIndex;
}
/**
if (appendEntries.getTerm() < currentTerm()) {
if(LOG.isDebugEnabled()) {
LOG.debug("{}: Cannot append entries because sender term {} is less than {}",
- context.getId(), appendEntries.getTerm(), currentTerm());
+ logName(), appendEntries.getTerm(), currentTerm());
}
sender.tell(
* @param requestVote
* @return
*/
- protected RaftActorBehavior requestVote(ActorRef sender,
- RequestVote requestVote) {
+ protected RaftActorBehavior requestVote(ActorRef sender, RequestVote requestVote) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Received {}", context.getId(), requestVote);
- }
+ LOG.debug("{}: In requestVote: {}", logName(), requestVote);
boolean grantVote = false;
}
}
- sender.tell(new RequestVoteReply(currentTerm(), grantVote), actor());
+ RequestVoteReply reply = new RequestVoteReply(currentTerm(), grantVote);
+
+ LOG.debug("{}: requestVote returning: {}", logName(), reply);
+
+ sender.tell(reply, actor());
return this;
}
// around as the rest wont be present either
LOG.warn(
"{}: Missing index {} from log. Cannot apply state. Ignoring {} to {}",
- context.getId(), i, i, index);
+ logName(), i, i, index);
break;
}
}
if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Setting last applied to {}", context.getId(), newLastApplied);
+ LOG.debug("{}: Setting last applied to {}", logName(), newLastApplied);
}
context.setLastApplied(newLastApplied);
}
protected RaftActorBehavior switchBehavior(RaftActorBehavior behavior) {
- LOG.info("{} :- Switching from behavior {} to {}", context.getId(), this.state(), behavior.state());
+ LOG.info("{} :- Switching from behavior {} to {}", logName(), this.state(), behavior.state());
try {
close();
} catch (Exception e) {
- LOG.error("{}: Failed to close behavior : {}", context.getId(), this.state(), e);
+ LOG.error("{}: Failed to close behavior : {}", logName(), this.state(), e);
}
return behavior;
}
- protected long fakeSnapshot(final long minReplicatedToAllIndex, final long currentReplicatedIndex) {
+ /**
+ * Performs a snapshot with no capture on the replicated log.
+ * It clears the log from the supplied index or last-applied-1 which ever is minimum.
+ *
+ * @param snapshotCapturedIndex
+ */
+ protected void performSnapshotWithoutCapture(final long snapshotCapturedIndex) {
// we would want to keep the lastApplied as its used while capturing snapshots
- long tempMin = Math.min(minReplicatedToAllIndex,
- (context.getLastApplied() > -1 ? context.getLastApplied() - 1 : -1));
+ long lastApplied = context.getLastApplied();
+ long tempMin = Math.min(snapshotCapturedIndex, (lastApplied > -1 ? lastApplied - 1 : -1));
if (tempMin > -1 && context.getReplicatedLog().isPresent(tempMin)) {
- context.getReplicatedLog().snapshotPreCommit(tempMin, context.getTermInformation().getCurrentTerm());
+ LOG.debug("{}: fakeSnapshot purging log to {} for term {}", logName(), tempMin,
+ context.getTermInformation().getCurrentTerm());
+
+ //use the term of the temp-min, since we check for isPresent, entry will not be null
+ ReplicatedLogEntry entry = context.getReplicatedLog().get(tempMin);
+ context.getReplicatedLog().snapshotPreCommit(tempMin, entry.getTerm());
context.getReplicatedLog().snapshotCommit();
- return tempMin;
+ setReplicatedToAllIndex(tempMin);
}
- return currentReplicatedIndex;
}
+
}
private final Set<String> peers;
public Candidate(RaftActorContext context) {
- super(context);
+ super(context, RaftState.Candidate);
peers = context.getPeerAddresses().keySet();
if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Election: Candidate has following peers: {}", context.getId(), peers);
+ LOG.debug("{}: Election: Candidate has following peers: {}", logName(), peers);
}
votesRequired = getMajorityVoteCount(peers.size());
AppendEntries appendEntries) {
if(LOG.isDebugEnabled()) {
- LOG.debug("{}: handleAppendEntries: {}", context.getId(), appendEntries);
+ LOG.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
}
return this;
}
@Override protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
- RequestVoteReply requestVoteReply) {
+ RequestVoteReply requestVoteReply) {
+
+ LOG.debug("{}: handleRequestVoteReply: {}, current voteCount: {}", logName(), requestVoteReply,
+ voteCount);
if (requestVoteReply.isVoteGranted()) {
voteCount++;
return this;
}
- @Override public RaftState state() {
- return RaftState.Candidate;
- }
-
@Override
public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
RaftRPC rpc = (RaftRPC) message;
if(LOG.isDebugEnabled()) {
- LOG.debug("{}: RaftRPC message received {} my term is {}", context.getId(), rpc,
+ LOG.debug("{}: RaftRPC message received {}, my term is {}", logName(), rpc,
context.getTermInformation().getCurrentTerm());
}
}
if (message instanceof ElectionTimeout) {
+ LOG.debug("{}: Received ElectionTimeout", logName());
+
if (votesRequired == 0) {
// If there are no peers then we should be a Leader
// We wait for the election timeout to occur before declare
// Increment the election term and vote for self
long currentTerm = context.getTermInformation().getCurrentTerm();
- context.getTermInformation().updateAndPersist(currentTerm + 1,
- context.getId());
+ long newTerm = currentTerm + 1;
+ context.getTermInformation().updateAndPersist(newTerm, context.getId());
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Starting new term {}", context.getId(), (currentTerm + 1));
- }
+ LOG.debug("{}: Starting new term {}", logName(), newTerm);
// Request for a vote
// TODO: Retry request for vote if replies do not arrive in a reasonable
for (String peerId : peers) {
ActorSelection peerActor = context.getPeerActorSelection(peerId);
if(peerActor != null) {
- peerActor.tell(new RequestVote(
+ RequestVote requestVote = new RequestVote(
context.getTermInformation().getCurrentTerm(),
context.getId(),
context.getReplicatedLog().lastIndex(),
- context.getReplicatedLog().lastTerm()),
- context.getActor()
- );
- }
- }
+ context.getReplicatedLog().lastTerm());
+ LOG.debug("{}: Sending {} to peer {}", logName(), requestVote, peerId);
+ peerActor.tell(requestVote, context.getActor());
+ }
+ }
}
@Override public void close() throws Exception {
private SnapshotTracker snapshotTracker = null;
public Follower(RaftActorContext context) {
- super(context);
+ super(context, RaftState.Follower);
scheduleElection(electionDuration());
}
@Override protected RaftActorBehavior handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
- if(appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: handleAppendEntries: {}", context.getId(), appendEntries);
- }
+ int numLogEntries = appendEntries.getEntries() != null ? appendEntries.getEntries().size() : 0;
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("{}: handleAppendEntries: {}", logName(), appendEntries);
+ } else if(LOG.isDebugEnabled() && numLogEntries > 0) {
+ LOG.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
}
// TODO : Refactor this method into a bunch of smaller methods
boolean outOfSync = true;
// First check if the logs are in sync or not
- if (lastIndex() == -1
- && appendEntries.getPrevLogIndex() != -1) {
+ long lastIndex = lastIndex();
+ if (lastIndex == -1 && appendEntries.getPrevLogIndex() != -1) {
// The follower's log is out of sync because the leader does have
// an entry at prevLogIndex and this follower has no entries in
// it's log.
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: The followers log is empty and the senders prevLogIndex is {}",
- context.getId(), appendEntries.getPrevLogIndex());
- }
-
- } else if (lastIndex() > -1
- && appendEntries.getPrevLogIndex() != -1
- && !prevEntryPresent) {
+ LOG.debug("{}: The followers log is empty and the senders prevLogIndex is {}",
+ logName(), appendEntries.getPrevLogIndex());
+ } else if (lastIndex > -1 && appendEntries.getPrevLogIndex() != -1 && !prevEntryPresent) {
// The follower's log is out of sync because the Leader's
// prevLogIndex entry was not found in it's log
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: The log is not empty but the prevLogIndex {} was not found in it",
- context.getId(), appendEntries.getPrevLogIndex());
- }
-
- } else if (lastIndex() > -1
- && prevEntryPresent
- && prevLogTerm != appendEntries.getPrevLogTerm()) {
+ LOG.debug("{}: The log is not empty but the prevLogIndex {} was not found in it",
+ logName(), appendEntries.getPrevLogIndex());
+ } else if (lastIndex > -1 && prevEntryPresent && prevLogTerm != appendEntries.getPrevLogTerm()) {
// The follower's log is out of sync because the Leader's
// prevLogIndex entry does exist in the follower's log but it has
// a different term in it
- if (LOG.isDebugEnabled()) {
- LOG.debug(
- "{}: Cannot append entries because previous entry term {} is not equal to append entries prevLogTerm {}"
- , context.getId(), prevLogTerm
- , appendEntries.getPrevLogTerm());
- }
+ LOG.debug(
+ "{}: Cannot append entries because previous entry term {} is not equal to append entries prevLogTerm {}",
+ logName(), prevLogTerm, appendEntries.getPrevLogTerm());
} else {
outOfSync = false;
}
if (outOfSync) {
// We found that the log was out of sync so just send a negative
// reply and return
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Follower ({}) is out-of-sync, " +
- "so sending negative reply, lastIndex():{}, lastTerm():{}",
- context.getId(), context.getId(), lastIndex(), lastTerm()
- );
- }
- sender.tell(
- new AppendEntriesReply(context.getId(), currentTerm(), false,
- lastIndex(), lastTerm()), actor()
- );
+
+ LOG.debug("{}: Follower is out-of-sync, so sending negative reply, lastIndex: {}, lastTerm: {}",
+ logName(), lastIndex, lastTerm());
+
+ sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
+ lastTerm()), actor());
return this;
}
- if (appendEntries.getEntries() != null
- && appendEntries.getEntries().size() > 0) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Number of entries to be appended = {}", context.getId(),
+ if (appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
+
+ LOG.debug("{}: Number of entries to be appended = {}", logName(),
appendEntries.getEntries().size());
- }
// 3. If an existing entry conflicts with a new one (same index
// but different terms), delete the existing entry and all that
break;
}
- if (newEntry.getTerm() == matchEntry
- .getTerm()) {
+ if (newEntry.getTerm() == matchEntry.getTerm()) {
continue;
}
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Removing entries from log starting at {}", context.getId(),
+ LOG.debug("{}: Removing entries from log starting at {}", logName(),
matchEntry.getIndex());
- }
// Entries do not match so remove all subsequent entries
- context.getReplicatedLog()
- .removeFromAndPersist(matchEntry.getIndex());
+ context.getReplicatedLog().removeFromAndPersist(matchEntry.getIndex());
break;
}
}
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: After cleanup entries to be added from = {}", context.getId(),
- (addEntriesFrom + lastIndex()));
- }
+ lastIndex = lastIndex();
+ LOG.debug("{}: After cleanup entries to be added from = {}", logName(),
+ (addEntriesFrom + lastIndex));
// 4. Append any new entries not already in the log
- for (int i = addEntriesFrom;
- i < appendEntries.getEntries().size(); i++) {
+ for (int i = addEntriesFrom; i < appendEntries.getEntries().size(); i++) {
+ ReplicatedLogEntry entry = appendEntries.getEntries().get(i);
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Append entry to log {}", context.getId(),
- appendEntries.getEntries().get(i).getData());
- }
- context.getReplicatedLog().appendAndPersist(appendEntries.getEntries().get(i));
- }
+ LOG.debug("{}: Append entry to log {}", logName(), entry.getData());
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Log size is now {}", context.getId(), context.getReplicatedLog().size());
+ context.getReplicatedLog().appendAndPersist(entry);
}
- }
+ LOG.debug("{}: Log size is now {}", logName(), context.getReplicatedLog().size());
+ }
// 5. If leaderCommit > commitIndex, set commitIndex =
// min(leaderCommit, index of last new entry)
+ lastIndex = lastIndex();
long prevCommitIndex = context.getCommitIndex();
- context.setCommitIndex(Math.min(appendEntries.getLeaderCommit(),
- context.getReplicatedLog().lastIndex()));
+ context.setCommitIndex(Math.min(appendEntries.getLeaderCommit(), lastIndex));
if (prevCommitIndex != context.getCommitIndex()) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Commit index set to {}", context.getId(), context.getCommitIndex());
- }
+ LOG.debug("{}: Commit index set to {}", logName(), context.getCommitIndex());
}
// If commitIndex > lastApplied: increment lastApplied, apply
// log[lastApplied] to state machine (§5.3)
// check if there are any entries to be applied. last-applied can be equal to last-index
if (appendEntries.getLeaderCommit() > context.getLastApplied() &&
- context.getLastApplied() < lastIndex()) {
+ context.getLastApplied() < lastIndex) {
if(LOG.isDebugEnabled()) {
LOG.debug("{}: applyLogToStateMachine, " +
- "appendEntries.getLeaderCommit():{}," +
- "context.getLastApplied():{}, lastIndex():{}", context.getId(),
- appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex()
- );
+ "appendEntries.getLeaderCommit(): {}," +
+ "context.getLastApplied(): {}, lastIndex(): {}", logName(),
+ appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex);
}
applyLogToStateMachine(appendEntries.getLeaderCommit());
}
- sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), true,
- lastIndex(), lastTerm()), actor());
+ AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
+ lastIndex, lastTerm());
+
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("{}: handleAppendEntries returning : {}", logName(), reply);
+ } else if(LOG.isDebugEnabled() && numLogEntries > 0) {
+ LOG.debug("{}: handleAppendEntries returning : {}", logName(), reply);
+ }
+
+ sender.tell(reply, actor());
if (!context.isSnapshotCaptureInitiated()) {
- fakeSnapshot(appendEntries.getReplicatedToAllIndex(), appendEntries.getReplicatedToAllIndex());
+ super.performSnapshotWithoutCapture(appendEntries.getReplicatedToAllIndex());
}
return this;
return this;
}
- @Override public RaftState state() {
- return RaftState.Follower;
- }
-
@Override public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
Object message = fromSerializableMessage(originalMessage);
// set currentTerm = T, convert to follower (§5.1)
// This applies to all RPC messages and responses
if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
+ LOG.debug("{}: Term {} in \"{}\" message is greater than follower's term {} - updating term",
+ logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
+
context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
}
}
if (message instanceof ElectionTimeout) {
+ LOG.debug("{}: Received ElectionTimeout - switching to Candidate", logName());
return switchBehavior(new Candidate(context));
} else if (message instanceof InstallSnapshot) {
private void handleInstallSnapshot(ActorRef sender, InstallSnapshot installSnapshot) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: InstallSnapshot received by follower " +
- "datasize:{} , Chunk:{}/{}", context.getId(), installSnapshot.getData().size(),
- installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks()
- );
- }
+
+ LOG.debug("{}: InstallSnapshot received from leader {}, datasize: {} , Chunk: {}/{}",
+ logName(), installSnapshot.getLeaderId(), installSnapshot.getData().size(),
+ installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks());
if(snapshotTracker == null){
snapshotTracker = new SnapshotTracker(LOG, installSnapshot.getTotalChunks());
}
- sender.tell(new InstallSnapshotReply(
- currentTerm(), context.getId(), installSnapshot.getChunkIndex(),
- true), actor());
+ InstallSnapshotReply reply = new InstallSnapshotReply(
+ currentTerm(), context.getId(), installSnapshot.getChunkIndex(), true);
+
+ LOG.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
+
+ sender.tell(reply, actor());
} catch (SnapshotTracker.InvalidChunkException e) {
+ LOG.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
-1, false), actor());
snapshotTracker = null;
} catch (Exception e){
- LOG.error("{}: Exception in InstallSnapshot of follower", context.getId(), e);
+ LOG.error("{}: Exception in InstallSnapshot of follower", logName(), e);
+
//send reply with success as false. The chunk will be sent again on failure
sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
installSnapshot.getChunkIndex(), false), actor());
ByteString getSnapshotChunksCollected(){
return snapshotTracker != null ? snapshotTracker.getCollectedChunks() : ByteString.EMPTY;
}
-
-
}
* @return
*/
String getLeaderId();
+
+ /**
+ * setting the index of the log entry which is replicated to all nodes
+ * @param replicatedToAllIndex
+ */
+ void setReplicatedToAllIndex(long replicatedToAllIndex);
+
+ /**
+ * getting the index of the log entry which is replicated to all nodes
+ * @return
+ */
+ long getReplicatedToAllIndex();
}
return replicatedToAllIndex;
}
+
@Override
public String toString() {
- final StringBuilder sb =
- new StringBuilder("AppendEntries{");
- sb.append("term=").append(getTerm());
- sb.append("leaderId='").append(leaderId).append('\'');
- sb.append(", prevLogIndex=").append(prevLogIndex);
- sb.append(", prevLogTerm=").append(prevLogTerm);
- sb.append(", entries=").append(entries);
- sb.append(", leaderCommit=").append(leaderCommit);
- sb.append(", replicatedToAllIndex=").append(replicatedToAllIndex);
- sb.append('}');
- return sb.toString();
+ StringBuilder builder = new StringBuilder();
+ builder.append("AppendEntries [term=").append(term).append(", leaderId=").append(leaderId)
+ .append(", prevLogIndex=").append(prevLogIndex).append(", prevLogTerm=").append(prevLogTerm)
+ .append(", entries=").append(entries).append(", leaderCommit=").append(leaderCommit)
+ .append(", replicatedToAllIndex=").append(replicatedToAllIndex).append("]");
+ return builder.toString();
}
public <T extends Object> Object toSerializable() {
return followerId;
}
- @Override public String toString() {
- final StringBuilder sb =
- new StringBuilder("AppendEntriesReply{");
- sb.append("term=").append(term);
- sb.append(", success=").append(success);
- sb.append(", logLastIndex=").append(logLastIndex);
- sb.append(", logLastTerm=").append(logLastTerm);
- sb.append(", followerId='").append(followerId).append('\'');
- sb.append('}');
- return sb.toString();
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("AppendEntriesReply [term=").append(term).append(", success=").append(success)
+ .append(", logLastIndex=").append(logLastIndex).append(", logLastTerm=").append(logLastTerm)
+ .append(", followerId=").append(followerId).append("]");
+ return builder.toString();
}
}
return installSnapshot;
}
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("InstallSnapshot [term=").append(term).append(", leaderId=").append(leaderId)
+ .append(", lastIncludedIndex=").append(lastIncludedIndex).append(", lastIncludedTerm=")
+ .append(lastIncludedTerm).append(", data=").append(data).append(", chunkIndex=").append(chunkIndex)
+ .append(", totalChunks=").append(totalChunks).append(", lastChunkHashCode=").append(lastChunkHashCode)
+ .append("]");
+ return builder.toString();
+ }
}
public boolean isSuccess() {
return success;
}
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("InstallSnapshotReply [term=").append(term).append(", followerId=").append(followerId)
+ .append(", chunkIndex=").append(chunkIndex).append(", success=").append(success).append("]");
+ return builder.toString();
+ }
}
this.lastLogTerm = lastLogTerm;
}
- @Override public String toString() {
- final StringBuilder sb =
- new StringBuilder("RequestVote{");
- sb.append("term='").append(getTerm()).append('\'');
- sb.append("candidateId='").append(candidateId).append('\'');
- sb.append(", lastLogIndex=").append(lastLogIndex);
- sb.append(", lastLogTerm=").append(lastLogTerm);
- sb.append('}');
- return sb.toString();
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("RequestVote [term=").append(term).append(", candidateId=").append(candidateId)
+ .append(", lastLogIndex=").append(lastLogIndex).append(", lastLogTerm=").append(lastLogTerm)
+ .append("]");
+ return builder.toString();
}
}
public boolean isVoteGranted() {
return voteGranted;
}
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("RequestVoteReply [term=").append(term).append(", voteGranted=").append(voteGranted).append("]");
+ return builder.toString();
+ }
}
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
+
/**
*
*/
@Test
public void testSnapshotPreCommit() {
+ //add 4 more entries
replicatedLogImpl.append(new MockReplicatedLogEntry(2, 4, new MockPayload("E")));
replicatedLogImpl.append(new MockReplicatedLogEntry(2, 5, new MockPayload("F")));
replicatedLogImpl.append(new MockReplicatedLogEntry(3, 6, new MockPayload("G")));
replicatedLogImpl.append(new MockReplicatedLogEntry(3, 7, new MockPayload("H")));
+ //sending negative values should not cause any changes
+ replicatedLogImpl.snapshotPreCommit(-1, -1);
+ assertEquals(8, replicatedLogImpl.size());
+ assertEquals(-1, replicatedLogImpl.getSnapshotIndex());
+
replicatedLogImpl.snapshotPreCommit(4, 3);
assertEquals(3, replicatedLogImpl.size());
assertEquals(4, replicatedLogImpl.getSnapshotIndex());
assertEquals(0, replicatedLogImpl.size());
assertEquals(7, replicatedLogImpl.getSnapshotIndex());
+ }
+
+ @Test
+ public void testIsPresent() {
+ assertTrue(replicatedLogImpl.isPresent(0));
+ assertTrue(replicatedLogImpl.isPresent(1));
+ assertTrue(replicatedLogImpl.isPresent(2));
+ assertTrue(replicatedLogImpl.isPresent(3));
+
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 4, new MockPayload("D")));
+ replicatedLogImpl.snapshotPreCommit(3, 2); //snapshot on 3
+ replicatedLogImpl.snapshotCommit();
+
+ assertFalse(replicatedLogImpl.isPresent(0));
+ assertFalse(replicatedLogImpl.isPresent(1));
+ assertFalse(replicatedLogImpl.isPresent(2));
+ assertFalse(replicatedLogImpl.isPresent(3));
+ assertTrue(replicatedLogImpl.isPresent(4));
+
+ replicatedLogImpl.snapshotPreCommit(4, 2); //snapshot on 4
+ replicatedLogImpl.snapshotCommit();
+ assertFalse(replicatedLogImpl.isPresent(4));
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 5, new MockPayload("D")));
+ assertTrue(replicatedLogImpl.isPresent(5));
}
// create a snapshot for test
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
-import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
new MockRaftActorContext.MockPayload("C"),
new MockRaftActorContext.MockPayload("D")));
- mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, -1, 1));
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1,-1, 1, -1, 1));
RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
mockRaftActor.setCurrentBehavior(new Follower(raftActorContext));
- mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, 2, 1));
+ long replicatedToAllIndex = 1;
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, 2, 1, replicatedToAllIndex, 1));
verify(mockRaftActor.delegate).createSnapshot();
verify(dataPersistenceProvider).deleteMessages(100);
- assertEquals(2, mockRaftActor.getReplicatedLog().size());
+ assertEquals(3, mockRaftActor.getReplicatedLog().size());
+ assertEquals(1, mockRaftActor.getCurrentBehavior().getReplicatedToAllIndex());
+ assertNotNull(mockRaftActor.getReplicatedLog().get(2));
assertNotNull(mockRaftActor.getReplicatedLog().get(3));
assertNotNull(mockRaftActor.getReplicatedLog().get(4));
// Index 2 will not be in the log because it was removed due to snapshotting
- assertNull(mockRaftActor.getReplicatedLog().get(2));
+ assertNull(mockRaftActor.getReplicatedLog().get(1));
+ assertNull(mockRaftActor.getReplicatedLog().get(0));
}
};
mockRaftActor.setCurrentBehavior(new Leader(raftActorContext));
- mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, -1, 1));
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, -1, 1, -1, 1));
mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
assertEquals(8, leaderActor.getReplicatedLog().size());
- leaderActor.onReceiveCommand(new CaptureSnapshot(6, 1, 4, 1));
+ leaderActor.onReceiveCommand(new CaptureSnapshot(6, 1, 4, 1, 4, 1));
+
leaderActor.getRaftActorContext().setSnapshotCaptureInitiated(true);
verify(leaderActor.delegate).createSnapshot();
followerActor.waitForInitializeBehaviorComplete();
- // create 8 entries in the log - 0 to 4 are applied and will get picked up as part of the capture snapshot
+
Follower follower = new Follower(followerActor.getRaftActorContext());
followerActor.setCurrentBehavior(follower);
assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state());
+ // create 6 entries in the log - 0 to 4 are applied and will get picked up as part of the capture snapshot
MockRaftActorContext.MockReplicatedLogBuilder logBuilder = new MockRaftActorContext.MockReplicatedLogBuilder();
followerActor.getRaftActorContext().setReplicatedLog(logBuilder.createEntries(0, 6, 1).build());
- // log as indices 0-5
+ // log has indices 0-5
assertEquals(6, followerActor.getReplicatedLog().size());
//snapshot on 4
- followerActor.onReceiveCommand(new CaptureSnapshot(5, 1, 4, 1));
+ followerActor.onReceiveCommand(new CaptureSnapshot(5, 1, 4, 1, 4, 1));
+
followerActor.getRaftActorContext().setSnapshotCaptureInitiated(true);
verify(followerActor.delegate).createSnapshot();
followerActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
assertFalse(followerActor.getRaftActorContext().isSnapshotCaptureInitiated());
- // capture snapshot reply should remove the snapshotted entries only
+ // capture snapshot reply should remove the snapshotted entries only till replicatedToAllIndex
assertEquals(3, followerActor.getReplicatedLog().size()); //indexes 5,6,7 left in the log
assertEquals(7, followerActor.getReplicatedLog().lastIndex());
// create 5 entries in the log
MockRaftActorContext.MockReplicatedLogBuilder logBuilder = new MockRaftActorContext.MockReplicatedLogBuilder();
leaderActor.getRaftActorContext().setReplicatedLog(logBuilder.createEntries(5, 10, 1).build());
+
//set the snapshot index to 4 , 0 to 4 are snapshotted
leaderActor.getRaftActorContext().getReplicatedLog().setSnapshotIndex(4);
+ //setting replicatedToAllIndex = 9, for the log to clear
+ leader.setReplicatedToAllIndex(9);
assertEquals(5, leaderActor.getReplicatedLog().size());
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
leaderActor.onReceiveCommand(new AppendEntriesReply(follower1Id, 1, true, 9, 1));
assertEquals(5, leaderActor.getReplicatedLog().size());
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
// set the 2nd follower nextIndex to 1 which has been snapshotted
leaderActor.onReceiveCommand(new AppendEntriesReply(follower2Id, 1, true, 0, 1));
assertEquals(5, leaderActor.getReplicatedLog().size());
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
// simulate a real snapshot
- leaderActor.onReceiveCommand(new InitiateInstallSnapshot());
+ leaderActor.onReceiveCommand(new SendHeartBeat());
assertEquals(5, leaderActor.getReplicatedLog().size());
assertEquals(String.format("expected to be Leader but was %s. Current Leader = %s ",
leaderActor.getCurrentBehavior().state(), leaderActor.getLeaderId())
leaderActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
- assertEquals("Real snapshot didn't clear the log till lastApplied", 0, leaderActor.getReplicatedLog().size());
+ assertEquals("Real snapshot didn't clear the log till replicatedToAllIndex", 0, leaderActor.getReplicatedLog().size());
//reply from a slow follower after should not raise errors
leaderActor.onReceiveCommand(new AppendEntriesReply(follower2Id, 1, true, 5, 1));
package org.opendaylight.controller.cluster.raft.behaviors;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
+import java.util.ArrayList;
+import java.util.List;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.AbstractActorTest;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
public abstract class AbstractRaftActorBehaviorTest extends AbstractActorTest {
private final ActorRef behaviorActor = getSystem().actorOf(Props.create(
}
@Test
- public void testFakeSnapshots() {
+ public void testPerformSnapshot() {
MockRaftActorContext context = new MockRaftActorContext("test", getSystem(), behaviorActor);
- AbstractRaftActorBehavior behavior = new Leader(context);
- context.getTermInformation().update(1, "leader");
+ AbstractRaftActorBehavior abstractBehavior = (AbstractRaftActorBehavior) createBehavior(context);
+ if (abstractBehavior instanceof Candidate) {
+ return;
+ }
- //entry with 1 index=0 entry with replicatedToAllIndex = 0, does not do anything, returns the
+ context.getTermInformation().update(1, "test");
+
+ //log has 1 entry with replicatedToAllIndex = 0, does not do anything, returns the
context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 1, 1).build());
context.setLastApplied(0);
- assertEquals(-1, behavior.fakeSnapshot(0, -1));
+ abstractBehavior.performSnapshotWithoutCapture(0);
+ assertEquals(-1, abstractBehavior.getReplicatedToAllIndex());
assertEquals(1, context.getReplicatedLog().size());
//2 entries, lastApplied still 0, no purging.
- context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0,2,1).build());
+ context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 2, 1).build());
context.setLastApplied(0);
- assertEquals(-1, behavior.fakeSnapshot(0, -1));
+ abstractBehavior.performSnapshotWithoutCapture(0);
+ assertEquals(-1, abstractBehavior.getReplicatedToAllIndex());
assertEquals(2, context.getReplicatedLog().size());
//2 entries, lastApplied still 0, no purging.
- context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0,2,1).build());
+ context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 2, 1).build());
context.setLastApplied(1);
- assertEquals(0, behavior.fakeSnapshot(0, -1));
+ abstractBehavior.performSnapshotWithoutCapture(0);
+ assertEquals(0, abstractBehavior.getReplicatedToAllIndex());
assertEquals(1, context.getReplicatedLog().size());
//5 entries, lastApplied =2 and replicatedIndex = 3, but since we want to keep the lastapplied, indices 0 and 1 will only get purged
- context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0,5,1).build());
+ context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 5, 1).build());
context.setLastApplied(2);
- assertEquals(1, behavior.fakeSnapshot(3, 1));
+ abstractBehavior.performSnapshotWithoutCapture(3);
+ assertEquals(1, abstractBehavior.getReplicatedToAllIndex());
assertEquals(3, context.getReplicatedLog().size());
-
+ // scenario where Last applied > Replicated to all index (becoz of a slow follower)
+ context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+ context.setLastApplied(2);
+ abstractBehavior.performSnapshotWithoutCapture(1);
+ assertEquals(1, abstractBehavior.getReplicatedToAllIndex());
+ assertEquals(1, context.getReplicatedLog().size());
}
+
protected void assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(
ActorRef actorRef, RaftRPC rpc) {
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.IsolatedLeaderCheck;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
assertTrue(raftBehavior instanceof Leader);
- // we might receive some heartbeat messages, so wait till we InitiateInstallSnapshot
+ // we might receive some heartbeat messages, so wait till we get CaptureSnapshot
Boolean[] matches = new ReceiveWhile<Boolean>(Boolean.class, duration("2 seconds")) {
@Override
protected Boolean match(Object o) throws Exception {
- if (o instanceof InitiateInstallSnapshot) {
+ if (o instanceof CaptureSnapshot) {
return true;
}
return false;
}
}.get();
- boolean initiateInitiateInstallSnapshot = false;
+ boolean captureSnapshot = false;
for (Boolean b: matches) {
- initiateInitiateInstallSnapshot = b | initiateInitiateInstallSnapshot;
+ captureSnapshot = b | captureSnapshot;
}
- assertTrue(initiateInitiateInstallSnapshot);
+ assertTrue(captureSnapshot);
}};
}
ActorRef followerActor = getTestActor();
Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
-
+ peerAddresses.put(followerActor.path().toString(), followerActor.path().toString());
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext(leaderActor);
+ MockRaftActorContext actorContext = (MockRaftActorContext) createActorContext(leaderActor);
actorContext.setPeerAddresses(peerAddresses);
Map<String, String> leadersSnapshot = new HashMap<>();
leader.setSnapshot(Optional.<ByteString>absent());
// new entry
- ReplicatedLogImplEntry entry =
- new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
+ ReplicatedLogImplEntry entry = new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
new MockRaftActorContext.MockPayload("D"));
actorContext.getReplicatedLog().append(entry);
- // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
+ //update follower timestamp
+ leader.markFollowerActive(followerActor.path().toString());
+
RaftActorBehavior raftBehavior = leader.handleMessage(
- leaderActor, new InitiateInstallSnapshot());
+ senderActor, new Replicate(null, "state-id", entry));
CaptureSnapshot cs = MessageCollectorActor.
getFirstMatching(leaderActor, CaptureSnapshot.class);
assertEquals(2, cs.getLastTerm());
// if an initiate is started again when first is in progress, it shouldnt initiate Capture
- raftBehavior = leader.handleMessage(leaderActor, new InitiateInstallSnapshot());
+ leader.handleMessage(senderActor, new Replicate(null, "state-id", entry));
List<Object> captureSnapshots = MessageCollectorActor.getAllMatching(leaderActor, CaptureSnapshot.class);
assertEquals("CaptureSnapshot should not get invoked when initiate is in progress", 1, captureSnapshots.size());
protected final Logger LOG = LoggerFactory.getLogger(getClass());
public AbstractUntypedPersistentActor() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Actor created {}", getSelf());
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Actor created {}", getSelf());
}
getContext().
system().
@Override public void onReceiveCommand(Object message) throws Exception {
final String messageType = message.getClass().getSimpleName();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Received message {}", messageType);
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Received message {}", messageType);
}
handleCommand(message);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Done handling message {}", messageType);
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Done handling message {}", messageType);
}
}
@Override public void onReceiveRecover(Object message) throws Exception {
final String messageType = message.getClass().getSimpleName();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Received message {}", messageType);
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Received message {}", messageType);
}
handleRecover(message);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Done handling message {}", messageType);
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Done handling message {}", messageType);
}
}
@Override
public void onReceiveCommand(final Object message) throws Exception {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: onReceiveCommand: Received message {} from {}", persistenceId(), message, getSender());
- }
-
if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
handleCreateTransaction(message);
} else if(message instanceof ForwardedReadyTransaction) {
sender().tell((returnSerialized ? readDataReply.toSerializable(clientTxVersion): readDataReply), self());
} catch (Exception e) {
- LOG.error(String.format("Unexpected error reading path %s", path), e);
+ LOG.debug(String.format("Unexpected error reading path %s", path), e);
shardStats.incrementFailedReadTransactionsCount();
sender().tell(new akka.actor.Status.Failure(e), self());
}
NormalizedNode<?,?> expectedRoot = readStore(shard, YangInstanceIdentifier.builder().build());
- CaptureSnapshot capture = new CaptureSnapshot(-1, -1, -1, -1);
+ CaptureSnapshot capture = new CaptureSnapshot(-1, -1, -1, -1, -1, -1);
shard.tell(capture, getRef());
assertEquals("Snapshot saved", true, latch.get().await(5, TimeUnit.SECONDS));
*/
package org.opendaylight.controller.md.sal.dom.api;
+import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import java.util.Objects;
import javax.annotation.Nonnull;
@Override
public final String toString() {
- return com.google.common.base.Objects.toStringHelper(this).omitNullValues().add("type", type).add("contextReference", getContextReference()).toString();
+ return MoreObjects.toStringHelper(this).omitNullValues().add("type", type).add("contextReference", getContextReference()).toString();
}
}
package org.opendaylight.controller.sal.core.api;
import java.util.concurrent.Future;
-
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+/**
+ * @deprecated Use {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcService} instead.
+ */
+@Deprecated
public interface RpcConsumptionRegistry {
/**
* Sends an RPC to other components registered to the broker.
*/
package org.opendaylight.controller.sal.core.api;
+import com.google.common.util.concurrent.ListenableFuture;
import java.util.Set;
-
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import com.google.common.util.concurrent.ListenableFuture;
-
/**
* {@link Provider}'s implementation of an RPC.
*
* {@link RpcResult}
* <li> {@link Broker} returns the {@link RpcResult} to {@link Consumer}
* </ol>
+ *
+ * @deprecated Use {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation} instead.
*/
+@Deprecated
public interface RpcImplementation extends Provider.ProviderFunctionality {
/**
/**
* Exception reported when no RPC implementation is found in the system.
+ *
+ * @deprecated Use {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException} instead.
*/
+@Deprecated
public class RpcImplementationUnavailableException extends RuntimeException {
private static final long serialVersionUID = 1L;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+/**
+ * @deprecated Use {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService} and {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcService} instead.
+ */
+@Deprecated
public interface RpcProvisionRegistry extends RpcImplementation, BrokerService, RouteChangePublisher<RpcRoutingContext, YangInstanceIdentifier>, DOMService {
/**
package org.opendaylight.controller.sal.core.api;
import java.util.EventListener;
-
import org.opendaylight.yangtools.yang.common.QName;
+/**
+ * @deprecated Use {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener} instead.
+ */
+@Deprecated
public interface RpcRegistrationListener extends EventListener {
public void onRpcImplementationAdded(QName name);
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import com.google.common.base.Objects;
-import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.MoreObjects;
+import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
@Override
public String toString() {
- return addToStringAttributes(Objects.toStringHelper(this)).toString();
+ return addToStringAttributes(MoreObjects.toStringHelper(this)).toString();
}
protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
import akka.actor.Props;
import akka.actor.UntypedActor;
import akka.japi.Creator;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
private final Configuration configuration;
private final String followerId;
private final Logger LOG = LoggerFactory.getLogger(DummyShard.class);
+ private long lastMessageIndex = -1;
+ private long lastMessageSize = 0;
public DummyShard(Configuration configuration, String followerId) {
this.configuration = configuration;
}
protected void handleAppendEntries(AppendEntries req) throws InterruptedException {
- LOG.info("{} - Received AppendEntries message : leader term, index, size = {}, {}, {}", followerId, req.getTerm(),req.getLeaderCommit(), req.getEntries().size());
+
+ LOG.info("{} - Received AppendEntries message : leader term = {}, index = {}, prevLogIndex = {}, size = {}",
+ followerId, req.getTerm(),req.getLeaderCommit(), req.getPrevLogIndex(), req.getEntries().size());
+
+ if(lastMessageIndex == req.getLeaderCommit() && req.getEntries().size() > 0 && lastMessageSize > 0){
+ LOG.error("{} - Duplicate message with leaderCommit = {} prevLogIndex = {} received", followerId, req.getLeaderCommit(), req.getPrevLogIndex());
+ }
+
+ lastMessageIndex = req.getLeaderCommit();
+ lastMessageSize = req.getEntries().size();
+
long lastIndex = req.getLeaderCommit();
- if (req.getEntries().size() > 0)
- lastIndex = req.getEntries().get(0).getIndex();
+ if (req.getEntries().size() > 0) {
+ for(ReplicatedLogEntry entry : req.getEntries()) {
+ lastIndex = entry.getIndex();
+ }
+ }
- if (configuration.shouldCauseTrouble()) {
+ if (configuration.shouldCauseTrouble() && req.getEntries().size() > 0) {
boolean ignore = false;
if (configuration.shouldDropReplies()) {
--- /dev/null
+org.slf4j.simpleLogger.showDateTime=true
+org.slf4j.simpleLogger.dateTimeFormat=hh:mm:ss,S a
+org.slf4j.simpleLogger.logFile=System.out
+org.slf4j.simpleLogger.showShortLogName=true
+org.slf4j.simpleLogger.levelInBrackets=true
+org.slf4j.simpleLogger.defaultLogLevel=info
\ No newline at end of file
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
-import com.google.common.base.Objects;
-import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.MoreObjects;
+import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Preconditions;
-
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.slf4j.Logger;
@Override
public final String toString() {
- return addToStringAttributes(Objects.toStringHelper(this)).toString();
+ return addToStringAttributes(MoreObjects.toStringHelper(this)).toString();
}
/**
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkState;
-import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;