2 * Copyright (c) 2019, 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.akka.segjournal;
10 import akka.actor.ActorSystem;
11 import akka.persistence.PersistentRepr;
12 import com.codahale.metrics.Histogram;
13 import com.google.common.base.VerifyException;
14 import io.atomix.storage.journal.JournalSerdes;
15 import io.atomix.storage.journal.SegmentedJournal;
16 import io.atomix.storage.journal.SegmentedJournalReader;
17 import io.atomix.storage.journal.SegmentedJournalWriter;
18 import io.atomix.storage.journal.StorageLevel;
20 import java.io.Serializable;
21 import java.util.List;
22 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
23 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
24 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages;
25 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
26 import org.slf4j.Logger;
27 import org.slf4j.LoggerFactory;
28 import scala.jdk.javaapi.CollectionConverters;
31 * Version 0 data journal, where every journal entry maps to exactly one segmented file entry.
33 final class DataJournalV0 extends DataJournal {
34 private static final Logger LOG = LoggerFactory.getLogger(DataJournalV0.class);
36 private final SegmentedJournal<DataJournalEntry> entries;
38 DataJournalV0(final String persistenceId, final Histogram messageSize, final ActorSystem system,
39 final StorageLevel storage, final File directory, final int maxEntrySize, final int maxSegmentSize) {
40 super(persistenceId, messageSize);
41 entries = SegmentedJournal.<DataJournalEntry>builder()
42 .withStorageLevel(storage).withDirectory(directory).withName("data")
43 .withNamespace(JournalSerdes.builder()
44 .register(new DataJournalEntrySerializer(system), FromPersistence.class, ToPersistence.class)
46 .withMaxEntrySize(maxEntrySize).withMaxSegmentSize(maxSegmentSize)
51 long lastWrittenSequenceNr() {
52 return entries.writer().getLastIndex();
56 void deleteTo(final long sequenceNr) {
57 entries.writer().commit(sequenceNr);
61 void compactTo(final long sequenceNr) {
62 entries.compact(sequenceNr + 1);
71 @SuppressWarnings("checkstyle:illegalCatch")
72 void handleReplayMessages(final ReplayMessages message, final long fromSequenceNr) {
73 try (var reader = entries.openReader(fromSequenceNr)) {
74 handleReplayMessages(reader, message);
75 } catch (Exception e) {
76 LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e);
77 message.promise.failure(e);
79 message.promise.success(null);
83 private void handleReplayMessages(final SegmentedJournalReader<DataJournalEntry> reader,
84 final ReplayMessages message) {
86 while (reader.hasNext() && count < message.max) {
87 final var next = reader.next();
88 if (next.index() > message.toSequenceNr) {
92 LOG.trace("{}: replay {}", persistenceId, next);
93 updateLargestSize(next.size());
94 final var entry = next.entry();
95 if (entry instanceof FromPersistence fromPersistence) {
96 final var repr = fromPersistence.toRepr(persistenceId, next.index());
97 LOG.debug("{}: replaying {}", persistenceId, repr);
98 message.replayCallback.accept(repr);
101 throw new VerifyException("Unexpected entry " + entry);
104 LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
108 @SuppressWarnings("checkstyle:illegalCatch")
109 long handleWriteMessages(final WriteMessages message) {
110 final int count = message.size();
111 final var writer = entries.writer();
114 for (int i = 0; i < count; ++i) {
115 final long mark = writer.getLastIndex();
116 final var request = message.getRequest(i);
118 final var reprs = CollectionConverters.asJava(request.payload());
119 LOG.trace("{}: append {}/{}: {} items at mark {}", persistenceId, i, count, reprs.size(), mark);
121 bytes += writePayload(writer, reprs);
122 } catch (Exception e) {
123 LOG.warn("{}: failed to write out request {}/{} reverting to {}", persistenceId, i, count, mark, e);
124 message.setFailure(i, e);
125 writer.truncate(mark);
129 message.setSuccess(i);
135 private long writePayload(final SegmentedJournalWriter<DataJournalEntry> writer, final List<PersistentRepr> reprs) {
137 for (var repr : reprs) {
138 final Object payload = repr.payload();
139 if (!(payload instanceof Serializable)) {
140 throw new UnsupportedOperationException("Non-serializable payload encountered "
141 + payload.getClass());
144 LOG.trace("{}: starting append of {}", persistenceId, payload);
145 final var entry = writer.append(new ToPersistence(repr));
146 final int size = entry.size();
147 LOG.trace("{}: finished append of {} with {} bytes at {}", persistenceId, payload, size, entry.index());
148 recordMessageSize(size);