935ded32e27931c423325fd6831958780a0119cc
[controller.git] /
1 /*
2  * Copyright (c) 2019, 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.akka.segjournal;
9
10 import akka.actor.ActorSystem;
11 import akka.persistence.PersistentRepr;
12 import com.codahale.metrics.Histogram;
13 import com.google.common.base.VerifyException;
14 import io.atomix.storage.journal.JournalReader;
15 import io.atomix.storage.journal.JournalSerdes;
16 import io.atomix.storage.journal.JournalWriter;
17 import io.atomix.storage.journal.SegmentedByteBufJournal;
18 import io.atomix.storage.journal.SegmentedJournal;
19 import io.atomix.storage.journal.StorageLevel;
20 import java.io.File;
21 import java.io.Serializable;
22 import java.util.ArrayList;
23 import java.util.List;
24 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
25 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
26 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages;
27 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
28 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WrittenMessages;
29 import org.slf4j.Logger;
30 import org.slf4j.LoggerFactory;
31 import scala.jdk.javaapi.CollectionConverters;
32
33 /**
34  * Version 0 data journal, where every journal entry maps to exactly one segmented file entry.
35  */
36 final class DataJournalV0 extends DataJournal {
37     private static final Logger LOG = LoggerFactory.getLogger(DataJournalV0.class);
38
39     private final SegmentedJournal<DataJournalEntry> entries;
40
41     DataJournalV0(final String persistenceId, final Histogram messageSize, final ActorSystem system,
42             final StorageLevel storage, final File directory, final int maxEntrySize, final int maxSegmentSize) {
43         super(persistenceId, messageSize);
44
45         final var serdes = JournalSerdes.builder()
46             .register(new DataJournalEntrySerdes(system), FromPersistence.class, ToPersistence.class)
47             .build();
48
49         entries = new SegmentedJournal<>(SegmentedByteBufJournal.builder()
50             .withDirectory(directory)
51             .withName("data")
52             .withStorageLevel(storage)
53             .withMaxEntrySize(maxEntrySize)
54             .withMaxSegmentSize(maxSegmentSize)
55             .build(), serdes.toReadMapper(), serdes.toWriteMapper());
56     }
57
58     @Override
59     long lastWrittenSequenceNr() {
60         return entries.lastIndex();
61     }
62
63     @Override
64     void deleteTo(final long sequenceNr) {
65         entries.writer().commit(sequenceNr);
66     }
67
68     @Override
69     void compactTo(final long sequenceNr) {
70         entries.compact(sequenceNr + 1);
71     }
72
73     @Override
74     void close() {
75         flush();
76         entries.close();
77     }
78
79     @Override
80     void flush() {
81         entries.writer().flush();
82     }
83
84     @Override
85     @SuppressWarnings("checkstyle:illegalCatch")
86     void handleReplayMessages(final ReplayMessages message, final long fromSequenceNr) {
87         try (var reader = entries.openReader(fromSequenceNr)) {
88             handleReplayMessages(reader, message);
89         } catch (Exception e) {
90             LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e);
91             message.promise.failure(e);
92         } finally {
93             message.promise.success(null);
94         }
95     }
96
97     private void handleReplayMessages(final JournalReader<DataJournalEntry> reader, final ReplayMessages message) {
98         int count = 0;
99         while (count < message.max && reader.getNextIndex() <= message.toSequenceNr) {
100             final var repr = reader.tryNext((index, entry, size) -> {
101                 LOG.trace("{}: replay index={} entry={}", persistenceId, index, entry);
102                 updateLargestSize(size);
103                 if (entry instanceof FromPersistence fromPersistence) {
104                     return fromPersistence.toRepr(persistenceId, index);
105                 }
106                 throw new VerifyException("Unexpected entry " + entry);
107             });
108
109             if (repr == null) {
110                 break;
111             }
112
113             LOG.debug("{}: replaying {}", persistenceId, repr);
114             message.replayCallback.accept(repr);
115             count++;
116         }
117         LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
118     }
119
120     @Override
121     @SuppressWarnings("checkstyle:illegalCatch")
122     WrittenMessages handleWriteMessages(final WriteMessages message) {
123         final int count = message.size();
124         final var responses = new ArrayList<>();
125         final var writer = entries.writer();
126         long writtenBytes = 0;
127
128         for (int i = 0; i < count; ++i) {
129             final long prevNextIndex = writer.getNextIndex();
130             final var request = message.getRequest(i);
131
132             final var reprs = CollectionConverters.asJava(request.payload());
133             LOG.trace("{}: append {}/{}: {} items at mark {}", persistenceId, i, count, reprs.size(), prevNextIndex);
134             try {
135                 writtenBytes += writePayload(writer, reprs);
136             } catch (Exception e) {
137                 LOG.warn("{}: failed to write out request {}/{} reverting to {}", persistenceId, i, count,
138                     prevNextIndex, e);
139                 responses.add(e);
140                 writer.reset(prevNextIndex);
141                 continue;
142             }
143             responses.add(null);
144         }
145
146         return new WrittenMessages(message, responses, writtenBytes);
147     }
148
149     private long writePayload(final JournalWriter<DataJournalEntry> writer, final List<PersistentRepr> reprs) {
150         long bytes = 0;
151         for (var repr : reprs) {
152             final Object payload = repr.payload();
153             if (!(payload instanceof Serializable)) {
154                 throw new UnsupportedOperationException("Non-serializable payload encountered "
155                         + payload.getClass());
156             }
157
158             LOG.trace("{}: starting append of {}", persistenceId, payload);
159             final var entry = writer.append(new ToPersistence(repr));
160             final int size = entry.size();
161             LOG.trace("{}: finished append of {} with {} bytes at {}", persistenceId, payload, size, entry.index());
162             recordMessageSize(size);
163             bytes += size;
164         }
165         return bytes;
166     }
167 }