2 * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import com.google.common.base.Preconditions;
11 import com.google.common.base.Throwables;
13 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
14 import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeXMLOutput;
15 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
16 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
17 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
18 import org.slf4j.Logger;
21 * Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
22 * and journal log entry batch are de-serialized and applied to their own write transaction
23 * instance in parallel on a thread pool for faster recovery time. However the transactions are
24 * committed to the data store in the order the corresponding snapshot or log batch are received
25 * to preserve data store integrity.
27 * @author Thomas Pantelis
29 class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
30 private final ShardDataTree store;
31 private final String shardName;
32 private final Logger log;
33 private final byte[] restoreFromSnapshot;
37 ShardRecoveryCoordinator(final ShardDataTree store, final byte[] restoreFromSnapshot, final String shardName,
39 this.store = Preconditions.checkNotNull(store);
40 this.shardName = Preconditions.checkNotNull(shardName);
41 this.log = Preconditions.checkNotNull(log);
43 this.restoreFromSnapshot = restoreFromSnapshot;
47 public void startLogRecoveryBatch(final int maxBatchSize) {
48 log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
53 @SuppressWarnings("checkstyle:IllegalCatch")
54 public void appendRecoveredLogEntry(final Payload payload) {
55 Preconditions.checkState(open, "call startLogRecovery before calling appendRecoveredLogEntry");
58 store.applyRecoveryPayload(payload);
59 } catch (Exception e) {
60 log.error("{}: failed to apply payload {}", shardName, payload, e);
61 throw new IllegalStateException(String.format("%s: Failed to apply recovery payload %s",
62 shardName, payload), e);
67 * Applies the current batched log entries to the data store.
70 public void applyCurrentLogRecoveryBatch() {
71 Preconditions.checkState(open, "call startLogRecovery before calling applyCurrentLogRecoveryBatch");
75 private File writeRoot(final String kind, final NormalizedNode<?, ?> node) {
76 final File file = new File(System.getProperty("karaf.data", "."),
77 "failed-recovery-" + kind + "-" + shardName + ".xml");
78 NormalizedNodeXMLOutput.toFile(file, node);
83 * Applies a recovered snapshot to the data store.
85 * @param snapshotBytes the serialized snapshot
88 @SuppressWarnings("checkstyle:IllegalCatch")
89 public void applyRecoverySnapshot(final byte[] snapshotBytes) {
90 log.debug("{}: Applying recovered snapshot", shardName);
92 final ShardDataTreeSnapshot snapshot;
94 snapshot = ShardDataTreeSnapshot.deserialize(snapshotBytes);
95 } catch (Exception e) {
96 log.error("{}: failed to deserialize snapshot", shardName, e);
97 throw Throwables.propagate(e);
101 store.applyRecoverySnapshot(snapshot);
102 } catch (Exception e) {
103 final File f = writeRoot("snapshot", snapshot.getRootNode().orElse(null));
104 throw new IllegalStateException(String.format(
105 "%s: Failed to apply recovery snapshot %s. Node data was written to file %s",
106 shardName, snapshot, f), e);
111 public byte[] getRestoreFromSnapshot() {
112 return restoreFromSnapshot;