2 * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import com.google.common.base.Preconditions;
11 import com.google.common.base.Throwables;
13 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
14 import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeXMLOutput;
15 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
16 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
17 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
18 import org.slf4j.Logger;
21 * Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
22 * and journal log entry batch are de-serialized and applied to their own write transaction
23 * instance in parallel on a thread pool for faster recovery time. However the transactions are
24 * committed to the data store in the order the corresponding snapshot or log batch are received
25 * to preserve data store integrity.
27 * @author Thomas Pantelis
29 class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
30 private final ShardDataTree store;
31 private final String shardName;
32 private final Logger log;
33 private final byte[] restoreFromSnapshot;
37 ShardRecoveryCoordinator(final ShardDataTree store, final byte[] restoreFromSnapshot, final String shardName, final Logger log) {
38 this.store = Preconditions.checkNotNull(store);
39 this.shardName = Preconditions.checkNotNull(shardName);
40 this.log = Preconditions.checkNotNull(log);
42 this.restoreFromSnapshot = restoreFromSnapshot;
46 public void startLogRecoveryBatch(final int maxBatchSize) {
47 log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
52 public void appendRecoveredLogEntry(final Payload payload) {
53 Preconditions.checkState(open, "call startLogRecovery before calling appendRecoveredLogEntry");
56 store.applyRecoveryPayload(payload);
57 } catch (Exception e) {
58 log.error("{}: failed to apply payload {}", shardName, payload, e);
59 throw new IllegalStateException(String.format("%s: Failed to apply recovery payload %s",
60 shardName, payload), e);
65 * Applies the current batched log entries to the data store.
68 public void applyCurrentLogRecoveryBatch() {
69 Preconditions.checkState(open, "call startLogRecovery before calling applyCurrentLogRecoveryBatch");
73 private File writeRoot(final String kind, final NormalizedNode<?, ?> node) {
74 final File file = new File(System.getProperty("karaf.data", "."),
75 "failed-recovery-" + kind + "-" + shardName + ".xml");
76 NormalizedNodeXMLOutput.toFile(file, node);
81 * Applies a recovered snapshot to the data store.
83 * @param snapshotBytes the serialized snapshot
86 public void applyRecoverySnapshot(final byte[] snapshotBytes) {
87 log.debug("{}: Applying recovered snapshot", shardName);
89 final ShardDataTreeSnapshot snapshot;
91 snapshot = ShardDataTreeSnapshot.deserialize(snapshotBytes);
92 } catch (Exception e) {
93 log.error("{}: failed to deserialize snapshot", shardName, e);
94 throw Throwables.propagate(e);
98 store.applyRecoverySnapshot(snapshot);
99 } catch (Exception e) {
100 final File f = writeRoot("snapshot", snapshot.getRootNode().orElse(null));
101 throw new IllegalStateException(String.format(
102 "%s: Failed to apply recovery snapshot %s. Node data was written to file %s",
103 shardName, snapshot, f), e);
108 public byte[] getRestoreFromSnapshot() {
109 return restoreFromSnapshot;