X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatastore%2FShardRecoveryCoordinator.java;h=82a6b720f07d05303991235619515641af02acd6;hp=797641978d2cd47cc7eed57c12e77e2334cb943c;hb=288a70d15252b3c5fafd202fe7935563f05da9c8;hpb=f16ea6ce80e6c0a47eae0cc1cf9d404c57d7880d diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java index 797641978d..82a6b720f0 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java @@ -7,9 +7,14 @@ */ package org.opendaylight.controller.cluster.datastore; +import com.google.common.base.Preconditions; import java.io.IOException; +import java.net.URI; +import java.util.Set; import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload; import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification; +import org.opendaylight.controller.cluster.datastore.node.utils.transformer.NormalizedNodePruner; +import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification; import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils; import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort; import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload; @@ -17,10 +22,9 @@ import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Compos import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree; import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException; +import org.opendaylight.yangtools.yang.model.api.SchemaContext; import org.slf4j.Logger; /** @@ -34,27 +38,34 @@ import org.slf4j.Logger; */ class ShardRecoveryCoordinator implements RaftActorRecoveryCohort { private static final YangInstanceIdentifier ROOT = YangInstanceIdentifier.builder().build(); - private final DataTree store; + private final ShardDataTree store; private final String shardName; private final Logger log; - private DataTreeModification transaction; + private final Set validNamespaces; + private PruningDataTreeModification transaction; private int size; + private final byte[] restoreFromSnapshot; - ShardRecoveryCoordinator(ShardDataTree store, String shardName, Logger log) { - this.store = store.getDataTree(); + ShardRecoveryCoordinator(ShardDataTree store, SchemaContext schemaContext, byte[] restoreFromSnapshot, + String shardName, Logger log) { + this.store = Preconditions.checkNotNull(store); + this.restoreFromSnapshot = restoreFromSnapshot; this.shardName = shardName; this.log = log; + this.validNamespaces = NormalizedNodePruner.namespaces(schemaContext); } @Override public void startLogRecoveryBatch(int maxBatchSize) { log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize); - transaction = store.takeSnapshot().newModification(); + transaction = new PruningDataTreeModification(store.newModification(), validNamespaces); size = 0; } @Override public void appendRecoveredLogEntry(Payload payload) { + Preconditions.checkState(transaction != null, "call startLogRecovery before calling appendRecoveredLogEntry"); + try { if (payload instanceof DataTreeCandidatePayload) { DataTreeCandidates.applyToModification(transaction, ((DataTreeCandidatePayload)payload).getCandidate()); @@ -79,10 +90,8 @@ class ShardRecoveryCoordinator implements RaftActorRecoveryCohort { } } - private void commitTransaction(DataTreeModification tx) throws DataValidationFailedException { - tx.ready(); - store.validate(tx); - store.commit(store.prepare(tx)); + private void commitTransaction(PruningDataTreeModification tx) throws DataValidationFailedException { + store.commit(tx.getDelegate()); } /** @@ -90,6 +99,8 @@ class ShardRecoveryCoordinator implements RaftActorRecoveryCohort { */ @Override public void applyCurrentLogRecoveryBatch() { + Preconditions.checkState(transaction != null, "call startLogRecovery before calling applyCurrentLogRecoveryBatch"); + log.debug("{}: Applying current log recovery batch with size {}", shardName, size); try { commitTransaction(transaction); @@ -109,7 +120,7 @@ class ShardRecoveryCoordinator implements RaftActorRecoveryCohort { log.debug("{}: Applying recovered snapshot", shardName); final NormalizedNode node = SerializationUtils.deserializeNormalizedNode(snapshotBytes); - final DataTreeModification tx = store.takeSnapshot().newModification(); + final PruningDataTreeModification tx = new PruningDataTreeModification(store.newModification(), validNamespaces); tx.write(ROOT, node); try { commitTransaction(tx); @@ -117,4 +128,9 @@ class ShardRecoveryCoordinator implements RaftActorRecoveryCohort { log.error("{}: Failed to apply recovery snapshot", shardName, e); } } + + @Override + public byte[] getRestoreFromSnapshot() { + return restoreFromSnapshot; + } }