Remove use of {String,UUID}Identifier
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / ShardRecoveryCoordinator.java
1 /*
2  * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.cluster.datastore;
9
10 import com.google.common.base.Preconditions;
11 import java.io.IOException;
12 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
13 import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
14 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
15 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
16 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
17 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
18 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
19 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
20 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
21 import org.slf4j.Logger;
22
23 /**
24  * Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
25  * and journal log entry batch are de-serialized and applied to their own write transaction
26  * instance in parallel on a thread pool for faster recovery time. However the transactions are
27  * committed to the data store in the order the corresponding snapshot or log batch are received
28  * to preserve data store integrity.
29  *
30  * @author Thomas Pantelis
31  */
32 class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
33     private final ShardDataTree store;
34     private final String shardName;
35     private final Logger log;
36     private final SchemaContext schemaContext;
37     private PruningDataTreeModification transaction;
38     private int size;
39     private final byte[] restoreFromSnapshot;
40
41     ShardRecoveryCoordinator(ShardDataTree store, SchemaContext schemaContext, byte[] restoreFromSnapshot,
42             String shardName, Logger log) {
43         this.store = Preconditions.checkNotNull(store);
44         this.restoreFromSnapshot = restoreFromSnapshot;
45         this.shardName = shardName;
46         this.log = log;
47         this.schemaContext = schemaContext;
48     }
49
50     @Override
51     public void startLogRecoveryBatch(int maxBatchSize) {
52         log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
53         transaction = new PruningDataTreeModification(store.newModification(), store.getDataTree(), schemaContext);
54         size = 0;
55     }
56
57     @Override
58     public void appendRecoveredLogEntry(Payload payload) {
59         Preconditions.checkState(transaction != null, "call startLogRecovery before calling appendRecoveredLogEntry");
60
61         try {
62             if (payload instanceof DataTreeCandidatePayload) {
63                 DataTreeCandidates.applyToModification(transaction, ((DataTreeCandidatePayload)payload).getCandidate());
64                 size++;
65             } else {
66                 log.error("{}: Unknown payload {} received during recovery", shardName, payload);
67             }
68         } catch (IOException e) {
69             log.error("{}: Error extracting ModificationPayload", shardName, e);
70         }
71     }
72
73     private void commitTransaction(PruningDataTreeModification tx) throws DataValidationFailedException {
74         store.commit(tx.getResultingModification());
75     }
76
77     /**
78      * Applies the current batched log entries to the data store.
79      */
80     @Override
81     public void applyCurrentLogRecoveryBatch() {
82         Preconditions.checkState(transaction != null, "call startLogRecovery before calling applyCurrentLogRecoveryBatch");
83
84         log.debug("{}: Applying current log recovery batch with size {}", shardName, size);
85         try {
86             commitTransaction(transaction);
87         } catch (DataValidationFailedException e) {
88             log.error("{}: Failed to apply recovery batch", shardName, e);
89         }
90         transaction = null;
91     }
92
93     /**
94      * Applies a recovered snapshot to the data store.
95      *
96      * @param snapshotBytes the serialized snapshot
97      */
98     @Override
99     public void applyRecoverySnapshot(final byte[] snapshotBytes) {
100         log.debug("{}: Applying recovered snapshot", shardName);
101
102         final NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
103         final PruningDataTreeModification tx = new PruningDataTreeModification(store.newModification(),
104                 store.getDataTree(), schemaContext);
105         tx.write(YangInstanceIdentifier.EMPTY, node);
106         try {
107             commitTransaction(tx);
108         } catch (DataValidationFailedException e) {
109             log.error("{}: Failed to apply recovery snapshot", shardName, e);
110         }
111     }
112
113     @Override
114     public byte[] getRestoreFromSnapshot() {
115         return restoreFromSnapshot;
116     }
117 }