import com.google.common.base.Preconditions;
import java.io.File;
-import java.io.IOException;
-import java.util.Map.Entry;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateSupplier;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
+import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
+import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeXMLOutput;
-import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
-import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
/**
*
* @author Thomas Pantelis
*/
-class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
+abstract class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
+ private static final class Simple extends ShardRecoveryCoordinator {
+ Simple(final ShardDataTree store, final String shardName, final Logger log) {
+ super(store, shardName, log);
+ }
+
+ @Override
+ public Snapshot getRestoreFromSnapshot() {
+ return null;
+ }
+ }
+
+ private static final class WithSnapshot extends ShardRecoveryCoordinator {
+ private final Snapshot restoreFromSnapshot;
+
+ WithSnapshot(final ShardDataTree store, final String shardName, final Logger log, final Snapshot snapshot) {
+ super(store, shardName, log);
+ this.restoreFromSnapshot = Preconditions.checkNotNull(snapshot);
+ }
+
+ @Override
+ public Snapshot getRestoreFromSnapshot() {
+ return restoreFromSnapshot;
+ }
+ }
+
private final ShardDataTree store;
private final String shardName;
private final Logger log;
- private final SchemaContext schemaContext;
- private PruningDataTreeModification transaction;
- private int size;
- private final byte[] restoreFromSnapshot;
- ShardRecoveryCoordinator(ShardDataTree store, SchemaContext schemaContext, byte[] restoreFromSnapshot,
- String shardName, Logger log) {
+ private boolean open;
+
+ ShardRecoveryCoordinator(final ShardDataTree store, final String shardName, final Logger log) {
this.store = Preconditions.checkNotNull(store);
- this.restoreFromSnapshot = restoreFromSnapshot;
- this.shardName = shardName;
- this.log = log;
- this.schemaContext = schemaContext;
+ this.shardName = Preconditions.checkNotNull(shardName);
+ this.log = Preconditions.checkNotNull(log);
+ }
+
+ static ShardRecoveryCoordinator create(final ShardDataTree store, final String shardName, final Logger log) {
+ return new Simple(store, shardName, log);
+ }
+
+ static ShardRecoveryCoordinator forSnapshot(final ShardDataTree store, final String shardName, final Logger log,
+ final Snapshot snapshot) {
+ return new WithSnapshot(store, shardName, log, snapshot);
}
@Override
- public void startLogRecoveryBatch(int maxBatchSize) {
+ public void startLogRecoveryBatch(final int maxBatchSize) {
log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
- transaction = new PruningDataTreeModification(store.newModification(), store.getDataTree(), schemaContext);
- size = 0;
+ open = true;
}
@Override
- public void appendRecoveredLogEntry(Payload payload) {
- Preconditions.checkState(transaction != null, "call startLogRecovery before calling appendRecoveredLogEntry");
+ @SuppressWarnings("checkstyle:IllegalCatch")
+ public void appendRecoveredLogEntry(final Payload payload) {
+ Preconditions.checkState(open, "call startLogRecovery before calling appendRecoveredLogEntry");
try {
- if (payload instanceof DataTreeCandidateSupplier) {
- final Entry<Optional<TransactionIdentifier>, DataTreeCandidate> e =
- ((DataTreeCandidateSupplier)payload).getCandidate();
-
- DataTreeCandidates.applyToModification(transaction, e.getValue());
- size++;
-
- if (e.getKey().isPresent()) {
- // FIXME: BUG-5280: propagate transaction state
- }
- } else {
- log.error("{}: Unknown payload {} received during recovery", shardName, payload);
- }
- } catch (IOException e) {
- log.error("{}: Error extracting payload", shardName, e);
+ store.applyRecoveryPayload(payload);
+ } catch (Exception e) {
+ log.error("{}: failed to apply payload {}", shardName, payload, e);
+ throw new IllegalStateException(String.format("%s: Failed to apply recovery payload %s",
+ shardName, payload), e);
}
}
- private void commitTransaction(PruningDataTreeModification tx) throws DataValidationFailedException {
- store.commit(tx.getResultingModification());
- }
-
/**
* Applies the current batched log entries to the data store.
*/
@Override
public void applyCurrentLogRecoveryBatch() {
- Preconditions.checkState(transaction != null, "call startLogRecovery before calling applyCurrentLogRecoveryBatch");
+ Preconditions.checkState(open, "call startLogRecovery before calling applyCurrentLogRecoveryBatch");
+ open = false;
+ }
- log.debug("{}: Applying current log recovery batch with size {}", shardName, size);
- try {
- commitTransaction(transaction);
- } catch (Exception e) {
- File file = new File(System.getProperty("karaf.data", "."),
- "failed-recovery-batch-" + shardName + ".out");
- DataTreeModificationOutput.toFile(file, transaction.getResultingModification());
- throw new RuntimeException(String.format(
- "%s: Failed to apply recovery batch. Modification data was written to file %s",
- shardName, file), e);
- }
- transaction = null;
+ private File writeRoot(final String kind, final NormalizedNode<?, ?> node) {
+ final File file = new File(System.getProperty("karaf.data", "."),
+ "failed-recovery-" + kind + "-" + shardName + ".xml");
+ NormalizedNodeXMLOutput.toFile(file, node);
+ return file;
}
/**
* Applies a recovered snapshot to the data store.
*
- * @param snapshotBytes the serialized snapshot
+ * @param snapshotState the serialized snapshot
*/
@Override
- public void applyRecoverySnapshot(final byte[] snapshotBytes) {
+ @SuppressWarnings("checkstyle:IllegalCatch")
+ public void applyRecoverySnapshot(final Snapshot.State snapshotState) {
+ if (!(snapshotState instanceof ShardSnapshotState)) {
+ log.debug("{}: applyRecoverySnapshot ignoring snapshot: {}", shardName, snapshotState);
+ }
+
log.debug("{}: Applying recovered snapshot", shardName);
- final NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
- final PruningDataTreeModification tx = new PruningDataTreeModification(store.newModification(),
- store.getDataTree(), schemaContext);
- tx.write(YangInstanceIdentifier.EMPTY, node);
+ ShardDataTreeSnapshot shardSnapshot = ((ShardSnapshotState)snapshotState).getSnapshot();
try {
- commitTransaction(tx);
+ store.applyRecoverySnapshot(shardSnapshot);
} catch (Exception e) {
- File file = new File(System.getProperty("karaf.data", "."),
- "failed-recovery-snapshot-" + shardName + ".xml");
- NormalizedNodeXMLOutput.toFile(file, node);
- throw new RuntimeException(String.format(
- "%s: Failed to apply recovery snapshot. Node data was written to file %s",
- shardName, file), e);
+ final File f = writeRoot("snapshot", shardSnapshot.getRootNode().orElse(null));
+ throw new IllegalStateException(String.format(
+ "%s: Failed to apply recovery snapshot %s. Node data was written to file %s",
+ shardName, shardSnapshot, f), e);
}
}
-
- @Override
- public byte[] getRestoreFromSnapshot() {
- return restoreFromSnapshot;
- }
}