X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-akka-raft%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fraft%2FReplicatedLogImpl.java;h=6167aac6d2ad71da7225c388ce4bb68bbb139070;hp=c32839c490eb0b53d69192b2c58f4a141fe422f0;hb=7cb260aeb0738104e3bee8a086de9e2e5f77b7e0;hpb=608760751ce7fcf4e84e86a8b33d43bc1d9984d6 diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImpl.java index c32839c490..6167aac6d2 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImpl.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImpl.java @@ -7,120 +7,126 @@ */ package org.opendaylight.controller.cluster.raft; -import akka.japi.Procedure; +import static java.util.Objects.requireNonNull; + import java.util.Collections; import java.util.List; -import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries; -import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior; +import java.util.function.Consumer; +import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries; +import org.opendaylight.controller.cluster.raft.persisted.Snapshot; /** * Implementation of ReplicatedLog used by the RaftActor. */ -class ReplicatedLogImpl extends AbstractReplicatedLogImpl { +final class ReplicatedLogImpl extends AbstractReplicatedLogImpl { private static final int DATA_SIZE_DIVIDER = 5; - private long dataSizeSinceLastSnapshot = 0L; private final RaftActorContext context; - private final RaftActorBehavior currentBehavior; + private long dataSizeSinceLastSnapshot = 0L; - private final Procedure deleteProcedure = new Procedure() { - @Override - public void apply(DeleteEntries notUsed) { - } - }; + private ReplicatedLogImpl(final long snapshotIndex, final long snapshotTerm, + final List unAppliedEntries, + final RaftActorContext context) { + super(snapshotIndex, snapshotTerm, unAppliedEntries, context.getId()); + this.context = requireNonNull(context); + } - static ReplicatedLog newInstance(Snapshot snapshot, RaftActorContext context, - RaftActorBehavior currentBehavior) { + static ReplicatedLog newInstance(final Snapshot snapshot, final RaftActorContext context) { return new ReplicatedLogImpl(snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(), - snapshot.getUnAppliedEntries(), context, currentBehavior); + snapshot.getUnAppliedEntries(), context); } - static ReplicatedLog newInstance(RaftActorContext context, RaftActorBehavior currentBehavior) { - return new ReplicatedLogImpl(-1L, -1L, Collections.emptyList(), context, - currentBehavior); + static ReplicatedLog newInstance(final RaftActorContext context) { + return new ReplicatedLogImpl(-1L, -1L, Collections.emptyList(), context); } - private ReplicatedLogImpl(long snapshotIndex, long snapshotTerm, List unAppliedEntries, - RaftActorContext context, RaftActorBehavior currentBehavior) { - super(snapshotIndex, snapshotTerm, unAppliedEntries); - this.context = context; - this.currentBehavior = currentBehavior; + @Override + public boolean removeFromAndPersist(final long logEntryIndex) { + long adjustedIndex = removeFrom(logEntryIndex); + if (adjustedIndex >= 0) { + context.getPersistenceProvider().persist(new DeleteEntries(logEntryIndex), NoopProcedure.instance()); + return true; + } + + return false; } @Override - public void removeFromAndPersist(long logEntryIndex) { - // FIXME: Maybe this should be done after the command is saved - long adjustedIndex = removeFrom(logEntryIndex); - if(adjustedIndex >= 0) { - context.getPersistenceProvider().persist(new DeleteEntries(adjustedIndex), deleteProcedure); + public boolean shouldCaptureSnapshot(final long logIndex) { + final ConfigParams config = context.getConfigParams(); + if ((logIndex + 1) % config.getSnapshotBatchCount() == 0) { + return true; } + + final long absoluteThreshold = config.getSnapshotDataThreshold(); + final long dataThreshold = absoluteThreshold != 0 ? absoluteThreshold * ConfigParams.MEGABYTE + : context.getTotalMemory() * config.getSnapshotDataThresholdPercentage() / 100; + return getDataSizeForSnapshotCheck() > dataThreshold; } @Override - public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry) { - appendAndPersist(replicatedLogEntry, null); + public void captureSnapshotIfReady(final ReplicatedLogEntry replicatedLogEntry) { + if (shouldCaptureSnapshot(replicatedLogEntry.getIndex())) { + boolean started = context.getSnapshotManager().capture(replicatedLogEntry, + context.getCurrentBehavior().getReplicatedToAllIndex()); + if (started && !context.hasFollowers()) { + dataSizeSinceLastSnapshot = 0; + } + } + } + + private long getDataSizeForSnapshotCheck() { + if (!context.hasFollowers()) { + // When we do not have followers we do not maintain an in-memory log + // due to this the journalSize will never become anything close to the + // snapshot batch count. In fact will mostly be 1. + // Similarly since the journal's dataSize depends on the entries in the + // journal the journal's dataSize will never reach a value close to the + // memory threshold. + // By maintaining the dataSize outside the journal we are tracking essentially + // what we have written to the disk however since we no longer are in + // need of doing a snapshot just for the sake of freeing up memory we adjust + // the real size of data by the DATA_SIZE_DIVIDER so that we do not snapshot as often + // as if we were maintaining a real snapshot + return dataSizeSinceLastSnapshot / DATA_SIZE_DIVIDER; + } else { + return dataSize(); + } } @Override - public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry, - final Procedure callback) { + public boolean appendAndPersist(final ReplicatedLogEntry replicatedLogEntry, + final Consumer callback, final boolean doAsync) { - if(context.getLogger().isDebugEnabled()) { - context.getLogger().debug("{}: Append log entry and persist {} ", context.getId(), replicatedLogEntry); + context.getLogger().debug("{}: Append log entry and persist {} ", context.getId(), replicatedLogEntry); + + if (!append(replicatedLogEntry)) { + return false; } - // FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs - append(replicatedLogEntry); - - // When persisting events with persist it is guaranteed that the - // persistent actor will not receive further commands between the - // persist call and the execution(s) of the associated event - // handler. This also holds for multiple persist calls in context - // of a single command. - context.getPersistenceProvider().persist(replicatedLogEntry, - new Procedure() { - @Override - public void apply(ReplicatedLogEntry evt) throws Exception { - int logEntrySize = replicatedLogEntry.size(); - - long dataSizeForCheck = dataSize(); - - dataSizeSinceLastSnapshot += logEntrySize; - - if (!context.hasFollowers()) { - // When we do not have followers we do not maintain an in-memory log - // due to this the journalSize will never become anything close to the - // snapshot batch count. In fact will mostly be 1. - // Similarly since the journal's dataSize depends on the entries in the - // journal the journal's dataSize will never reach a value close to the - // memory threshold. - // By maintaining the dataSize outside the journal we are tracking essentially - // what we have written to the disk however since we no longer are in - // need of doing a snapshot just for the sake of freeing up memory we adjust - // the real size of data by the DATA_SIZE_DIVIDER so that we do not snapshot as often - // as if we were maintaining a real snapshot - dataSizeForCheck = dataSizeSinceLastSnapshot / DATA_SIZE_DIVIDER; - } - long journalSize = replicatedLogEntry.getIndex() + 1; - long dataThreshold = context.getTotalMemory() * - context.getConfigParams().getSnapshotDataThresholdPercentage() / 100; - - if ((journalSize % context.getConfigParams().getSnapshotBatchCount() == 0 - || dataSizeForCheck > dataThreshold)) { - - boolean started = context.getSnapshotManager().capture(replicatedLogEntry, - currentBehavior.getReplicatedToAllIndex()); - - if(started){ - dataSizeSinceLastSnapshot = 0; - } - } - - if (callback != null){ - callback.apply(replicatedLogEntry); - } - } - } - ); + if (doAsync) { + context.getPersistenceProvider().persistAsync(replicatedLogEntry, + entry -> persistCallback(entry, callback)); + } else { + context.getPersistenceProvider().persist(replicatedLogEntry, entry -> syncPersistCallback(entry, callback)); + } + + return true; + } + + private void persistCallback(final ReplicatedLogEntry persistedLogEntry, + final Consumer callback) { + context.getExecutor().execute(() -> syncPersistCallback(persistedLogEntry, callback)); + } + + private void syncPersistCallback(final ReplicatedLogEntry persistedLogEntry, + final Consumer callback) { + context.getLogger().debug("{}: persist complete {}", context.getId(), persistedLogEntry); + + dataSizeSinceLastSnapshot += persistedLogEntry.size(); + + if (callback != null) { + callback.accept(persistedLogEntry); + } } -} \ No newline at end of file +}