Fix DeleteEntries persisting with wrong index 03/89703/5
authortadei.bilan <tadei.bilan@pantheon.tech>
Tue, 12 May 2020 14:03:37 +0000 (17:03 +0300)
committerRobert Varga <nite@hq.sk>
Tue, 19 May 2020 08:40:18 +0000 (08:40 +0000)
During persisting we delete entries, so in case of persisting DeleteEntries with adjusted index,
 index is being adjusted twice, that results in additional entries being deleted during recovery.

JIRA: CONTROLLER-1934

Signed-off-by: tadei.bilan <tadei.bilan@pantheon.tech>
Change-Id: Ib326f02630281ed52982dab2f48c01503b791ef7

opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImpl.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationTest.java

index c22f6f431bd4cf833ab6eb7c275563e9d357d386..66d7b352a22f3ec94a896104098d3145c4fc30d5 100644 (file)
@@ -42,10 +42,9 @@ final class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
 
     @Override
     public boolean removeFromAndPersist(final long logEntryIndex) {
-        // FIXME: Maybe this should be done after the command is saved
         long adjustedIndex = removeFrom(logEntryIndex);
         if (adjustedIndex >= 0) {
-            context.getPersistenceProvider().persist(new DeleteEntries(adjustedIndex), NoopProcedure.instance());
+            context.getPersistenceProvider().persist(new DeleteEntries(logEntryIndex), NoopProcedure.instance());
             return true;
         }
 
index de6a4909c3c46ec5aac42a8455c69c2f2b446644..2ef77c162e72c54df841e8501eda299cc0d56ba3 100644 (file)
@@ -204,6 +204,51 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
         assertEquals("Follower state", expFollowerState, follower2Underlying.getState());
     }
 
+    @Test
+    public void testRecoveryDeleteEntries() {
+        send2InitialPayloads();
+
+        sendPayloadData(leaderActor, "two");
+
+        // This should trigger a snapshot.
+        sendPayloadData(leaderActor, "three");
+
+        MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+        MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyJournalEntries.class, 2);
+
+        // Disconnect follower from leader
+        killActor(follower1Actor);
+
+        // Send another payloads
+        sendPayloadData(leaderActor, "four");
+        sendPayloadData(leaderActor, "five");
+
+        verifyRaftState(leaderActor, raftState -> {
+            assertEquals("leader journal last index", 5, leaderContext.getReplicatedLog().lastIndex());
+        });
+
+        // Remove entries started from 4 index
+        leaderActor.underlyingActor().getReplicatedLog().removeFromAndPersist(4);
+
+        verifyRaftState(leaderActor, raftState -> {
+            assertEquals("leader journal last index", 3, leaderContext.getReplicatedLog().lastIndex());
+        });
+
+        // Send new payloads
+        final MockPayload payload4 = sendPayloadData(leaderActor,"newFour");
+        final MockPayload payload5 = sendPayloadData(leaderActor,"newFive");
+
+        verifyRaftState(leaderActor, raftState -> {
+            assertEquals("leader journal last index", 5, leaderContext.getReplicatedLog().lastIndex());
+        });
+
+        reinstateLeaderActor();
+
+        assertEquals("Leader last index", 5 , leaderActor.underlyingActor().getReplicatedLog().lastIndex());
+        assertEquals(payload4 ,leaderActor.underlyingActor().getReplicatedLog().get(4).getData());
+        assertEquals(payload5 ,leaderActor.underlyingActor().getReplicatedLog().get(5).getData());
+    }
+
     private void reinstateLeaderActor() {
         killActor(leaderActor);