X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatastore%2FShardRecoveryCoordinator.java;h=dae3383a2e10eb78a83798b2a5dd52de7000f891;hp=01a124b6977c801e3f273c57341efe91d97c52b2;hb=93e6f3bfc003d4ce2d968761dff963615a0b799d;hpb=ebaf3d71465066033d5882c61cdd2ec63b29d980 diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java index 01a124b697..dae3383a2e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java @@ -7,21 +7,17 @@ */ package org.opendaylight.controller.cluster.datastore; -import com.google.common.collect.Lists; +import com.google.common.base.Preconditions; import java.io.IOException; -import java.util.List; -import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload; -import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification; +import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification; import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils; import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload; import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; -import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore; -import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort; -import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; +import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates; +import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException; +import org.opendaylight.yangtools.yang.model.api.SchemaContext; import org.slf4j.Logger; /** @@ -31,56 +27,51 @@ import org.slf4j.Logger; * committed to the data store in the order the corresponding snapshot or log batch are received * to preserve data store integrity. * - * @author Thomas Panetelis + * @author Thomas Pantelis */ class ShardRecoveryCoordinator implements RaftActorRecoveryCohort { - - private final InMemoryDOMDataStore store; - private List currentLogRecoveryBatch; + private final ShardDataTree store; private final String shardName; private final Logger log; - - ShardRecoveryCoordinator(InMemoryDOMDataStore store, String shardName, Logger log) { - this.store = store; + private final SchemaContext schemaContext; + private PruningDataTreeModification transaction; + private int size; + private final byte[] restoreFromSnapshot; + + ShardRecoveryCoordinator(ShardDataTree store, SchemaContext schemaContext, byte[] restoreFromSnapshot, + String shardName, Logger log) { + this.store = Preconditions.checkNotNull(store); + this.restoreFromSnapshot = restoreFromSnapshot; this.shardName = shardName; this.log = log; + this.schemaContext = schemaContext; } @Override public void startLogRecoveryBatch(int maxBatchSize) { - currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize); - log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize); + transaction = new PruningDataTreeModification(store.newModification(), store.getDataTree(), schemaContext); + size = 0; } @Override public void appendRecoveredLogEntry(Payload payload) { + Preconditions.checkState(transaction != null, "call startLogRecovery before calling appendRecoveredLogEntry"); + try { - if(payload instanceof ModificationPayload) { - currentLogRecoveryBatch.add((ModificationPayload) payload); - } else if (payload instanceof CompositeModificationPayload) { - currentLogRecoveryBatch.add(new ModificationPayload(MutableCompositeModification.fromSerializable( - ((CompositeModificationPayload) payload).getModification()))); - } else if (payload instanceof CompositeModificationByteStringPayload) { - currentLogRecoveryBatch.add(new ModificationPayload(MutableCompositeModification.fromSerializable( - ((CompositeModificationByteStringPayload) payload).getModification()))); + if (payload instanceof DataTreeCandidatePayload) { + DataTreeCandidates.applyToModification(transaction, ((DataTreeCandidatePayload)payload).getCandidate()); + size++; } else { log.error("{}: Unknown payload {} received during recovery", shardName, payload); } } catch (IOException e) { log.error("{}: Error extracting ModificationPayload", shardName, e); } - } - private void commitTransaction(DOMStoreWriteTransaction transaction) { - DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready(); - try { - commitCohort.preCommit().get(); - commitCohort.commit().get(); - } catch (Exception e) { - log.error("{}: Failed to commit Tx on recovery", shardName, e); - } + private void commitTransaction(PruningDataTreeModification tx) throws DataValidationFailedException { + store.commit(tx.getResultingModification()); } /** @@ -88,20 +79,15 @@ class ShardRecoveryCoordinator implements RaftActorRecoveryCohort { */ @Override public void applyCurrentLogRecoveryBatch() { - log.debug("{}: Applying current log recovery batch with size {}", shardName, currentLogRecoveryBatch.size()); + Preconditions.checkState(transaction != null, "call startLogRecovery before calling applyCurrentLogRecoveryBatch"); - DOMStoreWriteTransaction writeTx = store.newWriteOnlyTransaction(); - for(ModificationPayload payload: currentLogRecoveryBatch) { - try { - MutableCompositeModification.fromSerializable(payload.getModification()).apply(writeTx); - } catch (Exception e) { - log.error("{}: Error extracting ModificationPayload", shardName, e); - } + log.debug("{}: Applying current log recovery batch with size {}", shardName, size); + try { + commitTransaction(transaction); + } catch (DataValidationFailedException e) { + log.error("{}: Failed to apply recovery batch", shardName, e); } - - commitTransaction(writeTx); - - currentLogRecoveryBatch = null; + transaction = null; } /** @@ -111,14 +97,21 @@ class ShardRecoveryCoordinator implements RaftActorRecoveryCohort { */ @Override public void applyRecoverySnapshot(final byte[] snapshotBytes) { - log.debug("{}: Applyng recovered sbapshot", shardName); - - DOMStoreWriteTransaction writeTx = store.newWriteOnlyTransaction(); - - NormalizedNode node = SerializationUtils.deserializeNormalizedNode(snapshotBytes); + log.debug("{}: Applying recovered snapshot", shardName); - writeTx.write(YangInstanceIdentifier.builder().build(), node); + final NormalizedNode node = SerializationUtils.deserializeNormalizedNode(snapshotBytes); + final PruningDataTreeModification tx = new PruningDataTreeModification(store.newModification(), + store.getDataTree(), schemaContext); + tx.write(YangInstanceIdentifier.EMPTY, node); + try { + commitTransaction(tx); + } catch (DataValidationFailedException e) { + log.error("{}: Failed to apply recovery snapshot", shardName, e); + } + } - commitTransaction(writeTx); + @Override + public byte[] getRestoreFromSnapshot() { + return restoreFromSnapshot; } }