15d98b5289d1db98050e1f956dbf8a4859830d4e
[controller.git] / opendaylight / md-sal / sal-akka-raft / src / main / java / org / opendaylight / controller / cluster / raft / RaftActorRecoverySupport.java
1 /*
2  * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.cluster.raft;
9
10 import akka.persistence.RecoveryCompleted;
11 import akka.persistence.SnapshotOffer;
12 import com.google.common.base.Stopwatch;
13 import java.io.ByteArrayInputStream;
14 import java.io.ObjectInputStream;
15 import java.util.Collections;
16 import org.opendaylight.controller.cluster.PersistentDataProvider;
17 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
18 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
19 import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries;
20 import org.opendaylight.controller.cluster.raft.persisted.MigratedSerializable;
21 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
22 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
23 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
24 import org.slf4j.Logger;
25 /**
26  * Support class that handles persistence recovery for a RaftActor.
27  *
28  * @author Thomas Pantelis
29  */
30 class RaftActorRecoverySupport {
31     private final RaftActorContext context;
32     private final RaftActorRecoveryCohort cohort;
33
34     private int currentRecoveryBatchCount;
35     private boolean dataRecoveredWithPersistenceDisabled;
36     private boolean anyDataRecovered;
37     private boolean hasMigratedDataRecovered;
38
39     private Stopwatch recoveryTimer;
40     private final Logger log;
41
42     RaftActorRecoverySupport(final RaftActorContext context, final RaftActorRecoveryCohort cohort) {
43         this.context = context;
44         this.cohort = cohort;
45         this.log = context.getLogger();
46     }
47
48     boolean handleRecoveryMessage(Object message, PersistentDataProvider persistentProvider) {
49         log.trace("{}: handleRecoveryMessage: {}", context.getId(), message);
50
51         anyDataRecovered = anyDataRecovered || !(message instanceof RecoveryCompleted);
52
53         if(isMigratedSerializable(message)) {
54             hasMigratedDataRecovered = true;
55         }
56
57         boolean recoveryComplete = false;
58         if (message instanceof UpdateElectionTerm) {
59             context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
60                     ((UpdateElectionTerm) message).getVotedFor());
61         } else if (message instanceof SnapshotOffer) {
62             onRecoveredSnapshot((SnapshotOffer) message);
63         } else if (message instanceof ReplicatedLogEntry) {
64             onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
65         } else if (message instanceof ApplyJournalEntries) {
66             onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
67         } else if (message instanceof DeleteEntries) {
68             onDeleteEntries((DeleteEntries) message);
69         } else if (message instanceof ServerConfigurationPayload) {
70             context.updatePeerIds((ServerConfigurationPayload)message);
71         } else if (message instanceof RecoveryCompleted) {
72             recoveryComplete = true;
73             onRecoveryCompletedMessage(persistentProvider);
74         }
75
76         return recoveryComplete;
77     }
78
79     private void possiblyRestoreFromSnapshot() {
80         byte[] restoreFromSnapshot = cohort.getRestoreFromSnapshot();
81         if(restoreFromSnapshot == null) {
82             return;
83         }
84
85         if(anyDataRecovered) {
86             log.warn("{}: The provided restore snapshot was not applied because the persistence store is not empty",
87                     context.getId());
88             return;
89         }
90
91         try(ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(restoreFromSnapshot))) {
92             Snapshot snapshot = (Snapshot) ois.readObject();
93
94             log.debug("{}: Deserialized restore snapshot: {}", context.getId(), snapshot);
95
96             context.getSnapshotManager().apply(new ApplySnapshot(snapshot));
97         } catch(Exception e) {
98             log.error("{}: Error deserializing snapshot restore", context.getId(), e);
99         }
100     }
101
102     private ReplicatedLog replicatedLog() {
103         return context.getReplicatedLog();
104     }
105
106     private void initRecoveryTimer() {
107         if(recoveryTimer == null) {
108             recoveryTimer = Stopwatch.createStarted();
109         }
110     }
111
112     private void onRecoveredSnapshot(SnapshotOffer offer) {
113         if(log.isDebugEnabled()) {
114             log.debug("{}: SnapshotOffer called..", context.getId());
115         }
116
117         initRecoveryTimer();
118
119         Snapshot snapshot = (Snapshot) offer.snapshot();
120
121         for(ReplicatedLogEntry entry: snapshot.getUnAppliedEntries()) {
122             if(isMigratedPayload(entry)) {
123                 hasMigratedDataRecovered = true;
124             }
125         }
126
127         if(!context.getPersistenceProvider().isRecoveryApplicable()) {
128             // We may have just transitioned to disabled and have a snapshot containing state data and/or log
129             // entries - we don't want to preserve these, only the server config and election term info.
130
131             snapshot = Snapshot.create(new byte[0], Collections.emptyList(), -1, -1, -1, -1,
132                     snapshot.getElectionTerm(), snapshot.getElectionVotedFor(), snapshot.getServerConfiguration());
133         }
134
135         // Create a replicated log with the snapshot information
136         // The replicated log can be used later on to retrieve this snapshot
137         // when we need to install it on a peer
138
139         context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context));
140         context.setLastApplied(snapshot.getLastAppliedIndex());
141         context.setCommitIndex(snapshot.getLastAppliedIndex());
142         context.getTermInformation().update(snapshot.getElectionTerm(), snapshot.getElectionVotedFor());
143
144         Stopwatch timer = Stopwatch.createStarted();
145
146         // Apply the snapshot to the actors state
147         cohort.applyRecoverySnapshot(snapshot.getState());
148
149         if (snapshot.getServerConfiguration() != null) {
150             context.updatePeerIds(snapshot.getServerConfiguration());
151
152             if(isMigratedSerializable(snapshot.getServerConfiguration())) {
153                 hasMigratedDataRecovered = true;
154             }
155         }
156
157         timer.stop();
158         log.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size={}",
159                 context.getId(), timer.toString(), replicatedLog().getSnapshotIndex(),
160                 replicatedLog().getSnapshotTerm(), replicatedLog().size());
161     }
162
163     private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
164         if(log.isDebugEnabled()) {
165             log.debug("{}: Received ReplicatedLogEntry for recovery: index: {}, size: {}", context.getId(),
166                     logEntry.getIndex(), logEntry.size());
167         }
168
169         if(isServerConfigurationPayload(logEntry)){
170             context.updatePeerIds((ServerConfigurationPayload)logEntry.getData());
171         }
172
173         if(isMigratedPayload(logEntry)) {
174             hasMigratedDataRecovered = true;
175         }
176
177         if(context.getPersistenceProvider().isRecoveryApplicable()) {
178             replicatedLog().append(logEntry);
179         } else if(!isPersistentPayload(logEntry)) {
180             dataRecoveredWithPersistenceDisabled = true;
181         }
182     }
183
184     private void onRecoveredApplyLogEntries(long toIndex) {
185         if(!context.getPersistenceProvider().isRecoveryApplicable()) {
186             dataRecoveredWithPersistenceDisabled = true;
187             return;
188         }
189
190         long lastUnappliedIndex = context.getLastApplied() + 1;
191
192         if(log.isDebugEnabled()) {
193             // it can happen that lastUnappliedIndex > toIndex, if the AJE is in the persistent journal
194             // but the entry itself has made it to that state and recovered via the snapshot
195             log.debug("{}: Received apply journal entries for recovery, applying to state: {} to {}",
196                     context.getId(), lastUnappliedIndex, toIndex);
197         }
198
199         long lastApplied = lastUnappliedIndex - 1;
200         for (long i = lastUnappliedIndex; i <= toIndex; i++) {
201             ReplicatedLogEntry logEntry = replicatedLog().get(i);
202             if(logEntry != null) {
203                 lastApplied++;
204                 batchRecoveredLogEntry(logEntry);
205             } else {
206                 // Shouldn't happen but cover it anyway.
207                 log.error("{}: Log entry not found for index {}", context.getId(), i);
208                 break;
209             }
210         }
211
212         context.setLastApplied(lastApplied);
213         context.setCommitIndex(lastApplied);
214     }
215
216     private void onDeleteEntries(DeleteEntries deleteEntries) {
217         if(context.getPersistenceProvider().isRecoveryApplicable()) {
218             replicatedLog().removeFrom(deleteEntries.getFromIndex());
219         } else {
220             dataRecoveredWithPersistenceDisabled = true;
221         }
222     }
223
224     private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
225         initRecoveryTimer();
226
227         int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
228         if(!isServerConfigurationPayload(logEntry)){
229             if(currentRecoveryBatchCount == 0) {
230                 cohort.startLogRecoveryBatch(batchSize);
231             }
232
233             cohort.appendRecoveredLogEntry(logEntry.getData());
234
235             if(++currentRecoveryBatchCount >= batchSize) {
236                 endCurrentLogRecoveryBatch();
237             }
238         }
239     }
240
241     private void endCurrentLogRecoveryBatch() {
242         cohort.applyCurrentLogRecoveryBatch();
243         currentRecoveryBatchCount = 0;
244     }
245
246     private void onRecoveryCompletedMessage(PersistentDataProvider persistentProvider) {
247         if(currentRecoveryBatchCount > 0) {
248             endCurrentLogRecoveryBatch();
249         }
250
251         String recoveryTime = "";
252         if(recoveryTimer != null) {
253             recoveryTimer.stop();
254             recoveryTime = " in " + recoveryTimer.toString();
255             recoveryTimer = null;
256         }
257
258         log.info("Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
259                  "Persistence Id =  " + context.getId() +
260                  " Last index in log = {}, snapshotIndex = {}, snapshotTerm = {}, " +
261                  "journal-size = {}", replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
262                  replicatedLog().getSnapshotTerm(), replicatedLog().size());
263
264         if(dataRecoveredWithPersistenceDisabled ||
265                 hasMigratedDataRecovered && !context.getPersistenceProvider().isRecoveryApplicable()) {
266             if(hasMigratedDataRecovered) {
267                 log.info("{}: Saving snapshot after recovery due to migrated messages", context.getId());
268             } else {
269                 log.info("{}: Saving snapshot after recovery due to data persistence disabled", context.getId());
270             }
271
272             // Either data persistence is disabled and we recovered some data entries (ie we must have just
273             // transitioned to disabled or a persistence backup was restored) or we recovered migrated
274             // messages. Either way, we persist a snapshot and delete all the messages from the akka journal
275             // to clean out unwanted messages.
276
277             Snapshot snapshot = Snapshot.create(new byte[0], Collections.<ReplicatedLogEntry>emptyList(), -1, -1, -1, -1,
278                     context.getTermInformation().getCurrentTerm(), context.getTermInformation().getVotedFor(),
279                     context.getPeerServerInfo(true));
280
281             persistentProvider.saveSnapshot(snapshot);
282
283             persistentProvider.deleteMessages(persistentProvider.getLastSequenceNumber());
284         } else if(hasMigratedDataRecovered) {
285             log.info("{}: Snapshot capture initiated after recovery due to migrated messages", context.getId());
286
287             context.getSnapshotManager().capture(replicatedLog().last(), -1);
288         } else {
289             possiblyRestoreFromSnapshot();
290         }
291     }
292
293     private static boolean isServerConfigurationPayload(ReplicatedLogEntry repLogEntry){
294         return repLogEntry.getData() instanceof ServerConfigurationPayload;
295     }
296
297     private static boolean isPersistentPayload(ReplicatedLogEntry repLogEntry){
298         return repLogEntry.getData() instanceof PersistentPayload;
299     }
300
301     private static boolean isMigratedPayload(ReplicatedLogEntry repLogEntry){
302         return isMigratedSerializable(repLogEntry.getData());
303     }
304
305     private static boolean isMigratedSerializable(Object message){
306         return message instanceof MigratedSerializable && ((MigratedSerializable)message).isMigrated();
307     }
308 }