2 * Copyright (c) 2019 Pantheon Technologies, s.r.o. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.akka.segjournal;
10 import static com.google.common.base.Verify.verify;
11 import static com.google.common.base.Verify.verifyNotNull;
12 import static java.util.Objects.requireNonNull;
14 import akka.actor.AbstractActor;
15 import akka.actor.Props;
16 import akka.persistence.AtomicWrite;
17 import akka.persistence.PersistentRepr;
18 import com.codahale.metrics.Histogram;
19 import com.codahale.metrics.Meter;
20 import com.codahale.metrics.MetricRegistry;
21 import com.codahale.metrics.Timer;
22 import com.google.common.base.MoreObjects;
23 import io.atomix.storage.StorageLevel;
24 import io.atomix.storage.journal.Indexed;
25 import io.atomix.storage.journal.SegmentedJournal;
26 import io.atomix.storage.journal.SegmentedJournalReader;
27 import io.atomix.storage.journal.SegmentedJournalWriter;
28 import io.atomix.utils.serializer.Namespace;
30 import java.io.Serializable;
31 import java.util.ArrayList;
32 import java.util.List;
33 import java.util.Optional;
34 import java.util.concurrent.TimeUnit;
35 import java.util.function.Consumer;
36 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
37 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
38 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
39 import org.opendaylight.controller.cluster.reporting.MetricsReporter;
40 import org.slf4j.Logger;
41 import org.slf4j.LoggerFactory;
42 import scala.concurrent.Future;
43 import scala.concurrent.Promise;
44 import scala.jdk.javaapi.CollectionConverters;
47 * This actor handles a single PersistentActor's journal. The journal is split into two {@link SegmentedJournal}s:
49 * <li>A memory-mapped data journal, containing actual data entries</li>
50 * <li>A simple file journal, containing sequence numbers of last deleted entry</li>
54 * This is a conscious design decision to minimize the amount of data that is being stored in the data journal while
55 * speeding up normal operations. Since the SegmentedJournal is an append-only linear log and Akka requires the ability
56 * to delete persistence entries, we need ability to mark a subset of a SegmentedJournal as deleted. While we could
57 * treat such delete requests as normal events, this leads to a mismatch between SegmentedJournal indices (as exposed by
58 * {@link Indexed}) and Akka sequence numbers -- requiring us to potentially perform costly deserialization to find the
59 * index corresponding to a particular sequence number, or maintain moderately-complex logic and data structures to
60 * perform that mapping in sub-linear time complexity.
63 * Split-file approach allows us to treat sequence numbers and indices as equivalent, without maintaining any explicit
64 * mapping information. The only additional information we need to maintain is the last deleted sequence number.
66 * @author Robert Varga
68 final class SegmentedJournalActor extends AbstractActor {
69 abstract static class AsyncMessage<T> {
70 final Promise<T> promise = Promise.apply();
73 private static final class ReadHighestSequenceNr extends AsyncMessage<Long> {
74 private final long fromSequenceNr;
76 ReadHighestSequenceNr(final long fromSequenceNr) {
77 this.fromSequenceNr = fromSequenceNr;
81 public String toString() {
82 return MoreObjects.toStringHelper(this).add("fromSequenceNr", fromSequenceNr).toString();
86 private static final class ReplayMessages extends AsyncMessage<Void> {
87 private final long fromSequenceNr;
88 private final long toSequenceNr;
89 private final long max;
90 private final Consumer<PersistentRepr> replayCallback;
92 ReplayMessages(final long fromSequenceNr,
93 final long toSequenceNr, final long max, final Consumer<PersistentRepr> replayCallback) {
94 this.fromSequenceNr = fromSequenceNr;
95 this.toSequenceNr = toSequenceNr;
97 this.replayCallback = requireNonNull(replayCallback);
101 public String toString() {
102 return MoreObjects.toStringHelper(this).add("fromSequenceNr", fromSequenceNr)
103 .add("toSequenceNr", toSequenceNr).add("max", max).toString();
107 static final class WriteMessages {
108 private final List<AtomicWrite> requests = new ArrayList<>();
109 private final List<Promise<Optional<Exception>>> results = new ArrayList<>();
111 Future<Optional<Exception>> add(final AtomicWrite write) {
112 final Promise<Optional<Exception>> promise = Promise.apply();
114 results.add(promise);
115 return promise.future();
119 public String toString() {
120 return MoreObjects.toStringHelper(this).add("requests", requests).toString();
124 private static final class DeleteMessagesTo extends AsyncMessage<Void> {
125 final long toSequenceNr;
127 DeleteMessagesTo(final long toSequenceNr) {
128 this.toSequenceNr = toSequenceNr;
132 public String toString() {
133 return MoreObjects.toStringHelper(this).add("toSequenceNr", toSequenceNr).toString();
137 private static final Logger LOG = LoggerFactory.getLogger(SegmentedJournalActor.class);
138 private static final Namespace DELETE_NAMESPACE = Namespace.builder().register(Long.class).build();
139 private static final int DELETE_SEGMENT_SIZE = 64 * 1024;
141 private final String persistenceId;
142 private final StorageLevel storage;
143 private final int maxSegmentSize;
144 private final int maxEntrySize;
145 private final File directory;
147 // Tracks the time it took us to write a batch of messages
148 private Timer batchWriteTime;
149 // Tracks the number of individual messages written
150 private Meter messageWriteCount;
151 // Tracks the size distribution of messages
152 private Histogram messageSize;
154 private SegmentedJournal<DataJournalEntry> dataJournal;
155 private SegmentedJournal<Long> deleteJournal;
156 private long lastDelete;
158 // Tracks largest message size we have observed either during recovery or during write
159 private int largestObservedSize;
161 SegmentedJournalActor(final String persistenceId, final File directory, final StorageLevel storage,
162 final int maxEntrySize, final int maxSegmentSize) {
163 this.persistenceId = requireNonNull(persistenceId);
164 this.directory = requireNonNull(directory);
165 this.storage = requireNonNull(storage);
166 this.maxEntrySize = maxEntrySize;
167 this.maxSegmentSize = maxSegmentSize;
170 static Props props(final String persistenceId, final File directory, final StorageLevel storage,
171 final int maxEntrySize, final int maxSegmentSize) {
172 return Props.create(SegmentedJournalActor.class, requireNonNull(persistenceId), directory, storage,
173 maxEntrySize, maxSegmentSize);
177 public Receive createReceive() {
178 return receiveBuilder()
179 .match(DeleteMessagesTo.class, this::handleDeleteMessagesTo)
180 .match(ReadHighestSequenceNr.class, this::handleReadHighestSequenceNr)
181 .match(ReplayMessages.class, this::handleReplayMessages)
182 .match(WriteMessages.class, this::handleWriteMessages)
183 .matchAny(this::handleUnknown)
188 public void preStart() throws Exception {
189 LOG.debug("{}: actor starting", persistenceId);
192 final MetricRegistry registry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry();
193 final String actorName = self().path().parent().toStringWithoutAddress() + '/' + directory.getName();
195 batchWriteTime = registry.timer(MetricRegistry.name(actorName, "batchWriteTime"));
196 messageWriteCount = registry.meter(MetricRegistry.name(actorName, "messageWriteCount"));
197 messageSize = registry.histogram(MetricRegistry.name(actorName, "messageSize"));
201 public void postStop() throws Exception {
202 LOG.debug("{}: actor stopping", persistenceId);
203 if (dataJournal != null) {
205 LOG.debug("{}: data journal closed", persistenceId);
208 if (deleteJournal != null) {
209 deleteJournal.close();
210 LOG.debug("{}: delete journal closed", persistenceId);
211 deleteJournal = null;
213 LOG.debug("{}: actor stopped", persistenceId);
217 static AsyncMessage<Void> deleteMessagesTo(final long toSequenceNr) {
218 return new DeleteMessagesTo(toSequenceNr);
221 static AsyncMessage<Long> readHighestSequenceNr(final long fromSequenceNr) {
222 return new ReadHighestSequenceNr(fromSequenceNr);
225 static AsyncMessage<Void> replayMessages(final long fromSequenceNr, final long toSequenceNr, final long max,
226 final Consumer<PersistentRepr> replayCallback) {
227 return new ReplayMessages(fromSequenceNr, toSequenceNr, max, replayCallback);
230 private void handleDeleteMessagesTo(final DeleteMessagesTo message) {
233 LOG.debug("{}: delete messages {}", persistenceId, message);
234 final long to = Long.min(dataJournal.writer().getLastIndex(), message.toSequenceNr);
235 LOG.debug("{}: adjusted delete to {}", persistenceId, to);
237 if (lastDelete < to) {
238 LOG.debug("{}: deleting entries up to {}", persistenceId, to);
241 final SegmentedJournalWriter<Long> deleteWriter = deleteJournal.writer();
242 final Indexed<Long> entry = deleteWriter.append(lastDelete);
243 deleteWriter.commit(entry.index());
244 dataJournal.writer().commit(lastDelete);
246 LOG.debug("{}: compaction started", persistenceId);
247 dataJournal.compact(lastDelete + 1);
248 deleteJournal.compact(entry.index());
249 LOG.debug("{}: compaction finished", persistenceId);
251 LOG.debug("{}: entries up to {} already deleted", persistenceId, lastDelete);
254 message.promise.success(null);
257 private void handleReadHighestSequenceNr(final ReadHighestSequenceNr message) {
258 LOG.debug("{}: looking for highest sequence on {}", persistenceId, message);
260 if (directory.isDirectory()) {
262 sequence = dataJournal.writer().getLastIndex();
267 LOG.debug("{}: highest sequence is {}", message, sequence);
268 message.promise.success(sequence);
271 @SuppressWarnings("checkstyle:illegalCatch")
272 private void handleReplayMessages(final ReplayMessages message) {
273 LOG.debug("{}: replaying messages {}", persistenceId, message);
276 final long from = Long.max(lastDelete + 1, message.fromSequenceNr);
277 LOG.debug("{}: adjusted fromSequenceNr to {}", persistenceId, from);
279 try (SegmentedJournalReader<DataJournalEntry> reader = dataJournal.openReader(from)) {
281 while (reader.hasNext() && count < message.max) {
282 final Indexed<DataJournalEntry> next = reader.next();
283 if (next.index() > message.toSequenceNr) {
287 LOG.trace("{}: replay {}", persistenceId, next);
288 updateLargestSize(next.size());
289 final DataJournalEntry entry = next.entry();
290 verify(entry instanceof FromPersistence, "Unexpected entry %s", entry);
292 final PersistentRepr repr = ((FromPersistence) entry).toRepr(persistenceId, next.index());
293 LOG.debug("{}: replaying {}", persistenceId, repr);
294 message.replayCallback.accept(repr);
297 LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
298 } catch (Exception e) {
299 LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e);
300 message.promise.failure(e);
302 message.promise.success(null);
306 @SuppressWarnings("checkstyle:illegalCatch")
307 private void handleWriteMessages(final WriteMessages message) {
310 final SegmentedJournalWriter<DataJournalEntry> writer = dataJournal.writer();
311 final long startTicks = System.nanoTime();
312 final int count = message.requests.size();
313 final long start = writer.getLastIndex();
315 for (int i = 0; i < count; ++i) {
316 final long mark = writer.getLastIndex();
318 writeRequest(writer, message.requests.get(i));
319 } catch (Exception e) {
320 LOG.warn("{}: failed to write out request", persistenceId, e);
321 message.results.get(i).success(Optional.of(e));
322 writer.truncate(mark);
326 message.results.get(i).success(Optional.empty());
329 batchWriteTime.update(System.nanoTime() - startTicks, TimeUnit.NANOSECONDS);
330 messageWriteCount.mark(writer.getLastIndex() - start);
333 private void writeRequest(final SegmentedJournalWriter<DataJournalEntry> writer, final AtomicWrite request) {
334 for (PersistentRepr repr : CollectionConverters.asJava(request.payload())) {
335 final Object payload = repr.payload();
336 if (!(payload instanceof Serializable)) {
337 throw new UnsupportedOperationException("Non-serializable payload encountered " + payload.getClass());
340 final int size = writer.append(new ToPersistence(repr)).size();
341 messageSize.update(size);
342 updateLargestSize(size);
346 private void handleUnknown(final Object message) {
347 LOG.error("{}: Received unknown message {}", persistenceId, message);
350 private void updateLargestSize(final int size) {
351 if (size > largestObservedSize) {
352 largestObservedSize = size;
356 private void ensureOpen() {
357 if (dataJournal != null) {
358 verifyNotNull(deleteJournal);
362 deleteJournal = SegmentedJournal.<Long>builder().withDirectory(directory).withName("delete")
363 .withNamespace(DELETE_NAMESPACE).withMaxSegmentSize(DELETE_SEGMENT_SIZE).build();
364 final Indexed<Long> lastEntry = deleteJournal.writer().getLastEntry();
365 lastDelete = lastEntry == null ? 0 : lastEntry.entry();
367 dataJournal = SegmentedJournal.<DataJournalEntry>builder()
368 .withStorageLevel(storage).withDirectory(directory).withName("data")
369 .withNamespace(Namespace.builder()
370 .register(new DataJournalEntrySerializer(context().system()),
371 FromPersistence.class, ToPersistence.class)
373 .withMaxEntrySize(maxEntrySize).withMaxSegmentSize(maxSegmentSize)
375 final SegmentedJournalWriter<DataJournalEntry> writer = dataJournal.writer();
376 writer.commit(lastDelete);
377 LOG.debug("{}: journal open with last index {}, deleted to {}", persistenceId, writer.getLastIndex(),