BUG-5280: move DataTreeCandidate serialization to its own class
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / ShardRecoveryCoordinator.java
1 /*
2  * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.cluster.datastore;
9
10 import com.google.common.base.Preconditions;
11 import java.io.File;
12 import java.io.IOException;
13 import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
14 import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeXMLOutput;
15 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
16 import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
17 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
18 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
19 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
20 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
21 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
22 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
23 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
24 import org.slf4j.Logger;
25
26 /**
27  * Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
28  * and journal log entry batch are de-serialized and applied to their own write transaction
29  * instance in parallel on a thread pool for faster recovery time. However the transactions are
30  * committed to the data store in the order the corresponding snapshot or log batch are received
31  * to preserve data store integrity.
32  *
33  * @author Thomas Pantelis
34  */
35 class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
36     private final ShardDataTree store;
37     private final String shardName;
38     private final Logger log;
39     private final SchemaContext schemaContext;
40     private PruningDataTreeModification transaction;
41     private int size;
42     private final byte[] restoreFromSnapshot;
43
44     ShardRecoveryCoordinator(ShardDataTree store, SchemaContext schemaContext, byte[] restoreFromSnapshot,
45             String shardName, Logger log) {
46         this.store = Preconditions.checkNotNull(store);
47         this.restoreFromSnapshot = restoreFromSnapshot;
48         this.shardName = shardName;
49         this.log = log;
50         this.schemaContext = schemaContext;
51     }
52
53     @Override
54     public void startLogRecoveryBatch(int maxBatchSize) {
55         log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
56         transaction = new PruningDataTreeModification(store.newModification(), store.getDataTree(), schemaContext);
57         size = 0;
58     }
59
60     @Override
61     public void appendRecoveredLogEntry(Payload payload) {
62         Preconditions.checkState(transaction != null, "call startLogRecovery before calling appendRecoveredLogEntry");
63
64         try {
65             if (payload instanceof DataTreeCandidatePayload) {
66                 DataTreeCandidates.applyToModification(transaction, ((DataTreeCandidatePayload)payload).getCandidate());
67                 size++;
68             } else {
69                 log.error("{}: Unknown payload {} received during recovery", shardName, payload);
70             }
71         } catch (IOException e) {
72             log.error("{}: Error extracting ModificationPayload", shardName, e);
73         }
74     }
75
76     private void commitTransaction(PruningDataTreeModification tx) throws DataValidationFailedException {
77         store.commit(tx.getResultingModification());
78     }
79
80     /**
81      * Applies the current batched log entries to the data store.
82      */
83     @Override
84     public void applyCurrentLogRecoveryBatch() {
85         Preconditions.checkState(transaction != null, "call startLogRecovery before calling applyCurrentLogRecoveryBatch");
86
87         log.debug("{}: Applying current log recovery batch with size {}", shardName, size);
88         try {
89             commitTransaction(transaction);
90         } catch (Exception e) {
91             File file = new File(System.getProperty("karaf.data", "."),
92                     "failed-recovery-batch-" + shardName + ".out");
93             DataTreeModificationOutput.toFile(file, transaction.getResultingModification());
94             throw new RuntimeException(String.format(
95                     "%s: Failed to apply recovery batch. Modification data was written to file %s",
96                     shardName, file), e);
97         }
98         transaction = null;
99     }
100
101     /**
102      * Applies a recovered snapshot to the data store.
103      *
104      * @param snapshotBytes the serialized snapshot
105      */
106     @Override
107     public void applyRecoverySnapshot(final byte[] snapshotBytes) {
108         log.debug("{}: Applying recovered snapshot", shardName);
109
110         final NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
111         final PruningDataTreeModification tx = new PruningDataTreeModification(store.newModification(),
112                 store.getDataTree(), schemaContext);
113         tx.write(YangInstanceIdentifier.EMPTY, node);
114         try {
115             commitTransaction(tx);
116         } catch (Exception e) {
117             File file = new File(System.getProperty("karaf.data", "."),
118                     "failed-recovery-snapshot-" + shardName + ".xml");
119             NormalizedNodeXMLOutput.toFile(file, node);
120             throw new RuntimeException(String.format(
121                     "%s: Failed to apply recovery snapshot. Node data was written to file %s",
122                     shardName, file), e);
123         }
124     }
125
126     @Override
127     public byte[] getRestoreFromSnapshot() {
128         return restoreFromSnapshot;
129     }
130 }