Bug 4564: Implement datastore restore from backup file
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / ShardRecoveryCoordinator.java
1 /*
2  * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.cluster.datastore;
9
10 import com.google.common.base.Preconditions;
11 import java.io.IOException;
12 import java.net.URI;
13 import java.util.Set;
14 import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
15 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
16 import org.opendaylight.controller.cluster.datastore.node.utils.transformer.NormalizedNodePruner;
17 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
18 import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
19 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
20 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
21 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
22 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
23 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
24 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
25 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
26 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
27 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
28 import org.slf4j.Logger;
29
30 /**
31  * Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
32  * and journal log entry batch are de-serialized and applied to their own write transaction
33  * instance in parallel on a thread pool for faster recovery time. However the transactions are
34  * committed to the data store in the order the corresponding snapshot or log batch are received
35  * to preserve data store integrity.
36  *
37  * @author Thomas Pantelis
38  */
39 class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
40     private static final YangInstanceIdentifier ROOT = YangInstanceIdentifier.builder().build();
41     private final ShardDataTree store;
42     private final String shardName;
43     private final Logger log;
44     private final Set<URI> validNamespaces;
45     private PruningDataTreeModification transaction;
46     private int size;
47     private final byte[] restoreFromSnapshot;
48
49     ShardRecoveryCoordinator(ShardDataTree store, SchemaContext schemaContext, byte[] restoreFromSnapshot,
50             String shardName, Logger log) {
51         this.store = Preconditions.checkNotNull(store);
52         this.restoreFromSnapshot = restoreFromSnapshot;
53         this.shardName = shardName;
54         this.log = log;
55         this.validNamespaces = NormalizedNodePruner.namespaces(schemaContext);
56     }
57
58     @Override
59     public void startLogRecoveryBatch(int maxBatchSize) {
60         log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
61         transaction = new PruningDataTreeModification(store.newModification(), validNamespaces);
62         size = 0;
63     }
64
65     @Override
66     public void appendRecoveredLogEntry(Payload payload) {
67         Preconditions.checkState(transaction != null, "call startLogRecovery before calling appendRecoveredLogEntry");
68
69         try {
70             if (payload instanceof DataTreeCandidatePayload) {
71                 DataTreeCandidates.applyToModification(transaction, ((DataTreeCandidatePayload)payload).getCandidate());
72                 size++;
73             } else if (payload instanceof ModificationPayload) {
74                 MutableCompositeModification.fromSerializable(
75                     ((ModificationPayload) payload).getModification()).apply(transaction);
76                 size++;
77             } else if (payload instanceof CompositeModificationPayload) {
78                 MutableCompositeModification.fromSerializable(
79                     ((CompositeModificationPayload) payload).getModification()).apply(transaction);
80                 size++;
81             } else if (payload instanceof CompositeModificationByteStringPayload) {
82                 MutableCompositeModification.fromSerializable(
83                         ((CompositeModificationByteStringPayload) payload).getModification()).apply(transaction);
84                 size++;
85             } else {
86                 log.error("{}: Unknown payload {} received during recovery", shardName, payload);
87             }
88         } catch (IOException | ClassNotFoundException e) {
89             log.error("{}: Error extracting ModificationPayload", shardName, e);
90         }
91     }
92
93     private void commitTransaction(PruningDataTreeModification tx) throws DataValidationFailedException {
94         store.commit(tx.getDelegate());
95     }
96
97     /**
98      * Applies the current batched log entries to the data store.
99      */
100     @Override
101     public void applyCurrentLogRecoveryBatch() {
102         Preconditions.checkState(transaction != null, "call startLogRecovery before calling applyCurrentLogRecoveryBatch");
103
104         log.debug("{}: Applying current log recovery batch with size {}", shardName, size);
105         try {
106             commitTransaction(transaction);
107         } catch (DataValidationFailedException e) {
108             log.error("{}: Failed to apply recovery batch", shardName, e);
109         }
110         transaction = null;
111     }
112
113     /**
114      * Applies a recovered snapshot to the data store.
115      *
116      * @param snapshotBytes the serialized snapshot
117      */
118     @Override
119     public void applyRecoverySnapshot(final byte[] snapshotBytes) {
120         log.debug("{}: Applying recovered snapshot", shardName);
121
122         final NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
123         final PruningDataTreeModification tx = new PruningDataTreeModification(store.newModification(), validNamespaces);
124         tx.write(ROOT, node);
125         try {
126             commitTransaction(tx);
127         } catch (DataValidationFailedException e) {
128             log.error("{}: Failed to apply recovery snapshot", shardName, e);
129         }
130     }
131
132     @Override
133     public byte[] getRestoreFromSnapshot() {
134         return restoreFromSnapshot;
135     }
136 }