2 * Copyright (c) 2019, 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.akka.segjournal;
10 import static com.google.common.base.Verify.verify;
12 import akka.actor.ActorSystem;
13 import akka.persistence.AtomicWrite;
14 import akka.persistence.PersistentRepr;
15 import com.codahale.metrics.Histogram;
16 import io.atomix.storage.StorageLevel;
17 import io.atomix.storage.journal.Indexed;
18 import io.atomix.storage.journal.SegmentedJournal;
19 import io.atomix.storage.journal.SegmentedJournalReader;
20 import io.atomix.storage.journal.SegmentedJournalWriter;
21 import io.atomix.utils.serializer.Namespace;
23 import java.io.Serializable;
24 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
25 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
26 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages;
27 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
28 import org.slf4j.Logger;
29 import org.slf4j.LoggerFactory;
30 import scala.jdk.javaapi.CollectionConverters;
33 * Version 0 data journal, where every journal entry maps to exactly one segmented file entry.
35 * @author Robert Varga
37 final class DataJournalV0 extends DataJournal {
38 private static final Logger LOG = LoggerFactory.getLogger(DataJournalV0.class);
40 private final SegmentedJournal<DataJournalEntry> entries;
42 DataJournalV0(final String persistenceId, final Histogram messageSize, final ActorSystem system,
43 final StorageLevel storage, final File directory, final int maxEntrySize, final int maxSegmentSize) {
44 super(persistenceId, messageSize);
45 entries = SegmentedJournal.<DataJournalEntry>builder()
46 .withStorageLevel(storage).withDirectory(directory).withName("data")
47 .withNamespace(Namespace.builder()
48 .register(new DataJournalEntrySerializer(system), FromPersistence.class, ToPersistence.class)
50 .withMaxEntrySize(maxEntrySize).withMaxSegmentSize(maxSegmentSize)
55 long lastWrittenSequenceNr() {
56 return entries.writer().getLastIndex();
60 void deleteTo(final long sequenceNr) {
61 entries.writer().commit(sequenceNr);
65 void compactTo(final long sequenceNr) {
66 entries.compact(sequenceNr + 1);
75 @SuppressWarnings("checkstyle:illegalCatch")
76 void handleReplayMessages(final ReplayMessages message, final long fromSequenceNr) {
77 try (SegmentedJournalReader<DataJournalEntry> reader = entries.openReader(fromSequenceNr)) {
79 while (reader.hasNext() && count < message.max) {
80 final Indexed<DataJournalEntry> next = reader.next();
81 if (next.index() > message.toSequenceNr) {
85 LOG.trace("{}: replay {}", persistenceId, next);
86 updateLargestSize(next.size());
87 final DataJournalEntry entry = next.entry();
88 verify(entry instanceof FromPersistence, "Unexpected entry %s", entry);
90 final PersistentRepr repr = ((FromPersistence) entry).toRepr(persistenceId, next.index());
91 LOG.debug("{}: replaying {}", persistenceId, repr);
92 message.replayCallback.accept(repr);
95 LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
96 } catch (Exception e) {
97 LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e);
98 message.promise.failure(e);
100 message.promise.success(null);
105 @SuppressWarnings("checkstyle:illegalCatch")
106 void handleWriteMessages(final WriteMessages message) {
107 final int count = message.size();
108 final SegmentedJournalWriter<DataJournalEntry> writer = entries.writer();
110 for (int i = 0; i < count; ++i) {
111 final long mark = writer.getLastIndex();
112 final AtomicWrite request = message.getRequest(i);
114 for (PersistentRepr repr : CollectionConverters.asJava(request.payload())) {
115 final Object payload = repr.payload();
116 if (!(payload instanceof Serializable)) {
117 throw new UnsupportedOperationException("Non-serializable payload encountered "
118 + payload.getClass());
121 recordMessageSize(writer.append(new ToPersistence(repr)).size());
123 } catch (Exception e) {
124 LOG.warn("{}: failed to write out request", persistenceId, e);
125 message.setFailure(i, e);
126 writer.truncate(mark);
130 message.setSuccess(i);