import com.google.common.base.Preconditions;
import java.io.File;
import java.io.IOException;
+import java.util.Map.Entry;
+import java.util.Optional;
+import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateSupplier;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeXMLOutput;
import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
/**
private final ShardDataTree store;
private final String shardName;
private final Logger log;
- private final SchemaContext schemaContext;
private PruningDataTreeModification transaction;
private int size;
private final byte[] restoreFromSnapshot;
- ShardRecoveryCoordinator(ShardDataTree store, SchemaContext schemaContext, byte[] restoreFromSnapshot,
- String shardName, Logger log) {
+ ShardRecoveryCoordinator(ShardDataTree store, byte[] restoreFromSnapshot, String shardName, Logger log) {
this.store = Preconditions.checkNotNull(store);
this.restoreFromSnapshot = restoreFromSnapshot;
- this.shardName = shardName;
- this.log = log;
- this.schemaContext = schemaContext;
+ this.shardName = Preconditions.checkNotNull(shardName);
+ this.log = Preconditions.checkNotNull(log);
}
@Override
public void startLogRecoveryBatch(int maxBatchSize) {
log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
- transaction = new PruningDataTreeModification(store.newModification(), store.getDataTree(), schemaContext);
+ transaction = new PruningDataTreeModification(store.newModification(), store.getDataTree(),
+ store.getSchemaContext());
size = 0;
}
Preconditions.checkState(transaction != null, "call startLogRecovery before calling appendRecoveredLogEntry");
try {
- if (payload instanceof DataTreeCandidatePayload) {
- DataTreeCandidates.applyToModification(transaction, ((DataTreeCandidatePayload)payload).getCandidate());
+ if (payload instanceof DataTreeCandidateSupplier) {
+ final Entry<Optional<TransactionIdentifier>, DataTreeCandidate> e =
+ ((DataTreeCandidateSupplier)payload).getCandidate();
+
+ DataTreeCandidates.applyToModification(transaction, e.getValue());
size++;
+
+ if (e.getKey().isPresent()) {
+ // FIXME: BUG-5280: propagate transaction state
+ }
} else {
log.error("{}: Unknown payload {} received during recovery", shardName, payload);
}
} catch (IOException e) {
- log.error("{}: Error extracting ModificationPayload", shardName, e);
+ log.error("{}: Error extracting payload", shardName, e);
}
}
final NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
final PruningDataTreeModification tx = new PruningDataTreeModification(store.newModification(),
- store.getDataTree(), schemaContext);
+ store.getDataTree(), store.getSchemaContext());
tx.write(YangInstanceIdentifier.EMPTY, node);
try {
commitTransaction(tx);