2 * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import com.google.common.base.Preconditions;
12 import java.io.IOException;
13 import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
14 import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeXMLOutput;
15 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
16 import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
17 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
18 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
19 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
20 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
21 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
22 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
23 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
24 import org.slf4j.Logger;
27 * Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
28 * and journal log entry batch are de-serialized and applied to their own write transaction
29 * instance in parallel on a thread pool for faster recovery time. However the transactions are
30 * committed to the data store in the order the corresponding snapshot or log batch are received
31 * to preserve data store integrity.
33 * @author Thomas Pantelis
35 class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
36 private final ShardDataTree store;
37 private final String shardName;
38 private final Logger log;
39 private final SchemaContext schemaContext;
40 private PruningDataTreeModification transaction;
42 private final byte[] restoreFromSnapshot;
44 ShardRecoveryCoordinator(ShardDataTree store, SchemaContext schemaContext, byte[] restoreFromSnapshot,
45 String shardName, Logger log) {
46 this.store = Preconditions.checkNotNull(store);
47 this.restoreFromSnapshot = restoreFromSnapshot;
48 this.shardName = shardName;
50 this.schemaContext = schemaContext;
54 public void startLogRecoveryBatch(int maxBatchSize) {
55 log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
56 transaction = new PruningDataTreeModification(store.newModification(), store.getDataTree(), schemaContext);
61 public void appendRecoveredLogEntry(Payload payload) {
62 Preconditions.checkState(transaction != null, "call startLogRecovery before calling appendRecoveredLogEntry");
65 if (payload instanceof DataTreeCandidatePayload) {
66 DataTreeCandidates.applyToModification(transaction, ((DataTreeCandidatePayload)payload).getCandidate());
69 log.error("{}: Unknown payload {} received during recovery", shardName, payload);
71 } catch (IOException e) {
72 log.error("{}: Error extracting ModificationPayload", shardName, e);
76 private void commitTransaction(PruningDataTreeModification tx) throws DataValidationFailedException {
77 store.commit(tx.getResultingModification());
81 * Applies the current batched log entries to the data store.
84 public void applyCurrentLogRecoveryBatch() {
85 Preconditions.checkState(transaction != null, "call startLogRecovery before calling applyCurrentLogRecoveryBatch");
87 log.debug("{}: Applying current log recovery batch with size {}", shardName, size);
89 commitTransaction(transaction);
90 } catch (Exception e) {
91 File file = new File(System.getProperty("karaf.data", "."),
92 "failed-recovery-batch-" + shardName + ".out");
93 DataTreeModificationOutput.toFile(file, transaction.getResultingModification());
94 throw new RuntimeException(String.format(
95 "%s: Failed to apply recovery batch. Modification data was written to file %s",
102 * Applies a recovered snapshot to the data store.
104 * @param snapshotBytes the serialized snapshot
107 public void applyRecoverySnapshot(final byte[] snapshotBytes) {
108 log.debug("{}: Applying recovered snapshot", shardName);
110 final NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
111 final PruningDataTreeModification tx = new PruningDataTreeModification(store.newModification(),
112 store.getDataTree(), schemaContext);
113 tx.write(YangInstanceIdentifier.EMPTY, node);
115 commitTransaction(tx);
116 } catch (Exception e) {
117 File file = new File(System.getProperty("karaf.data", "."),
118 "failed-recovery-snapshot-" + shardName + ".xml");
119 NormalizedNodeXMLOutput.toFile(file, node);
120 throw new RuntimeException(String.format(
121 "%s: Failed to apply recovery snapshot. Node data was written to file %s",
122 shardName, file), e);
127 public byte[] getRestoreFromSnapshot() {
128 return restoreFromSnapshot;