*/
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
* committed to the data store in the order the corresponding snapshot or log batch are received
* to preserve data store integrity.
*
- * @author Thomas Panetelis
+ * @author Thomas Pantelis
*/
-class ShardRecoveryCoordinator {
-
- private static final int TIME_OUT = 10;
-
- private static final Logger LOG = LoggerFactory.getLogger(ShardRecoveryCoordinator.class);
-
- private final List<DOMStoreWriteTransaction> resultingTxList = Lists.newArrayList();
- private final SchemaContext schemaContext;
+class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
+ private static final YangInstanceIdentifier ROOT = YangInstanceIdentifier.builder().build();
+ private final DataTree store;
private final String shardName;
- private final ExecutorService executor;
+ private final Logger log;
+ private DataTreeModification transaction;
+ private int size;
- ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext) {
- this.schemaContext = schemaContext;
+ ShardRecoveryCoordinator(ShardDataTree store, String shardName, Logger log) {
+ this.store = store.getDataTree();
this.shardName = shardName;
-
- executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(),
- new ThreadFactoryBuilder().setDaemon(true)
- .setNameFormat("ShardRecovery-" + shardName + "-%d").build());
- }
-
- /**
- * Submits a batch of journal log entries.
- *
- * @param logEntries the serialized journal log entries
- * @param resultingTx the write Tx to which to apply the entries
- */
- void submit(List<Object> logEntries, DOMStoreWriteTransaction resultingTx) {
- LogRecoveryTask task = new LogRecoveryTask(logEntries, resultingTx);
- resultingTxList.add(resultingTx);
- executor.execute(task);
+ this.log = log;
}
- /**
- * Submits a snapshot.
- *
- * @param snapshotBytes the serialized snapshot
- * @param resultingTx the write Tx to which to apply the entries
- */
- void submit(byte[] snapshotBytes, DOMStoreWriteTransaction resultingTx) {
- SnapshotRecoveryTask task = new SnapshotRecoveryTask(snapshotBytes, resultingTx);
- resultingTxList.add(resultingTx);
- executor.execute(task);
+ @Override
+ public void startLogRecoveryBatch(int maxBatchSize) {
+ log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
+ transaction = store.takeSnapshot().newModification();
+ size = 0;
}
- Collection<DOMStoreWriteTransaction> getTransactions() {
- // Shutdown the executor and wait for task completion.
- executor.shutdown();
-
+ @Override
+ public void appendRecoveredLogEntry(Payload payload) {
try {
- if(executor.awaitTermination(TIME_OUT, TimeUnit.MINUTES)) {
- return resultingTxList;
+ if (payload instanceof DataTreeCandidatePayload) {
+ DataTreeCandidates.applyToModification(transaction, ((DataTreeCandidatePayload)payload).getCandidate());
+ size++;
+ } else if (payload instanceof ModificationPayload) {
+ MutableCompositeModification.fromSerializable(
+ ((ModificationPayload) payload).getModification()).apply(transaction);
+ size++;
+ } else if (payload instanceof CompositeModificationPayload) {
+ MutableCompositeModification.fromSerializable(
+ ((CompositeModificationPayload) payload).getModification()).apply(transaction);
+ size++;
+ } else if (payload instanceof CompositeModificationByteStringPayload) {
+ MutableCompositeModification.fromSerializable(
+ ((CompositeModificationByteStringPayload) payload).getModification()).apply(transaction);
+ size++;
} else {
- LOG.error("Recovery for shard {} timed out after {} minutes", shardName, TIME_OUT);
+ log.error("{}: Unknown payload {} received during recovery", shardName, payload);
}
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
+ } catch (IOException | ClassNotFoundException e) {
+ log.error("{}: Error extracting ModificationPayload", shardName, e);
}
-
- return Collections.emptyList();
}
- private static abstract class ShardRecoveryTask implements Runnable {
-
- final DOMStoreWriteTransaction resultingTx;
-
- ShardRecoveryTask(DOMStoreWriteTransaction resultingTx) {
- this.resultingTx = resultingTx;
- }
+ private void commitTransaction(DataTreeModification tx) throws DataValidationFailedException {
+ tx.ready();
+ store.validate(tx);
+ store.commit(store.prepare(tx));
}
- private class LogRecoveryTask extends ShardRecoveryTask {
-
- private final List<Object> logEntries;
-
- LogRecoveryTask(List<Object> logEntries, DOMStoreWriteTransaction resultingTx) {
- super(resultingTx);
- this.logEntries = logEntries;
- }
-
- @Override
- public void run() {
- for(int i = 0; i < logEntries.size(); i++) {
- MutableCompositeModification.fromSerializable(
- logEntries.get(i)).apply(resultingTx);
- // Null out to GC quicker.
- logEntries.set(i, null);
- }
+ /**
+ * Applies the current batched log entries to the data store.
+ */
+ @Override
+ public void applyCurrentLogRecoveryBatch() {
+ log.debug("{}: Applying current log recovery batch with size {}", shardName, size);
+ try {
+ commitTransaction(transaction);
+ } catch (DataValidationFailedException e) {
+ log.error("{}: Failed to apply recovery batch", shardName, e);
}
+ transaction = null;
}
- private class SnapshotRecoveryTask extends ShardRecoveryTask {
-
- private final byte[] snapshotBytes;
-
- SnapshotRecoveryTask(byte[] snapshotBytes, DOMStoreWriteTransaction resultingTx) {
- super(resultingTx);
- this.snapshotBytes = snapshotBytes;
- }
-
- @Override
- public void run() {
- NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
-
- // delete everything first
- resultingTx.delete(YangInstanceIdentifier.builder().build());
+ /**
+ * Applies a recovered snapshot to the data store.
+ *
+ * @param snapshotBytes the serialized snapshot
+ */
+ @Override
+ public void applyRecoverySnapshot(final byte[] snapshotBytes) {
+ log.debug("{}: Applying recovered snapshot", shardName);
- // Add everything from the remote node back
- resultingTx.write(YangInstanceIdentifier.builder().build(), node);
+ final NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
+ final DataTreeModification tx = store.takeSnapshot().newModification();
+ tx.write(ROOT, node);
+ try {
+ commitTransaction(tx);
+ } catch (DataValidationFailedException e) {
+ log.error("{}: Failed to apply recovery snapshot", shardName, e);
}
}
}