Update DataJournal interface
[controller.git] / opendaylight / md-sal / sal-akka-segmented-journal / src / main / java / org / opendaylight / controller / akka / segjournal / DataJournalV0.java
1 /*
2  * Copyright (c) 2019, 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.akka.segjournal;
9
10 import static com.google.common.base.Verify.verify;
11
12 import akka.actor.ActorSystem;
13 import akka.persistence.AtomicWrite;
14 import akka.persistence.PersistentRepr;
15 import com.codahale.metrics.Histogram;
16 import io.atomix.storage.StorageLevel;
17 import io.atomix.storage.journal.Indexed;
18 import io.atomix.storage.journal.SegmentedJournal;
19 import io.atomix.storage.journal.SegmentedJournalReader;
20 import io.atomix.storage.journal.SegmentedJournalWriter;
21 import io.atomix.utils.serializer.Namespace;
22 import java.io.File;
23 import java.io.Serializable;
24 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
25 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
26 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages;
27 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
28 import org.slf4j.Logger;
29 import org.slf4j.LoggerFactory;
30 import scala.jdk.javaapi.CollectionConverters;
31
32 /**
33  * Version 0 data journal, where every journal entry maps to exactly one segmented file entry.
34  *
35  * @author Robert Varga
36  */
37 final class DataJournalV0 extends DataJournal {
38     private static final Logger LOG = LoggerFactory.getLogger(DataJournalV0.class);
39
40     private final SegmentedJournal<DataJournalEntry> entries;
41
42     DataJournalV0(final String persistenceId, final Histogram messageSize, final ActorSystem system,
43             final StorageLevel storage, final File directory, final int maxEntrySize, final int maxSegmentSize) {
44         super(persistenceId, messageSize);
45         entries = SegmentedJournal.<DataJournalEntry>builder()
46                 .withStorageLevel(storage).withDirectory(directory).withName("data")
47                 .withNamespace(Namespace.builder()
48                     .register(new DataJournalEntrySerializer(system), FromPersistence.class, ToPersistence.class)
49                     .build())
50                 .withMaxEntrySize(maxEntrySize).withMaxSegmentSize(maxSegmentSize)
51                 .build();
52     }
53
54     @Override
55     long lastWrittenSequenceNr() {
56         return entries.writer().getLastIndex();
57     }
58
59     @Override
60     void deleteTo(final long sequenceNr) {
61         entries.writer().commit(sequenceNr);
62     }
63
64     @Override
65     void compactTo(final long sequenceNr) {
66         entries.compact(sequenceNr + 1);
67     }
68
69     @Override
70     void close() {
71         entries.close();
72     }
73
74     @Override
75     @SuppressWarnings("checkstyle:illegalCatch")
76     void handleReplayMessages(final ReplayMessages message, final long fromSequenceNr) {
77         try (SegmentedJournalReader<DataJournalEntry> reader = entries.openReader(fromSequenceNr)) {
78             int count = 0;
79             while (reader.hasNext() && count < message.max) {
80                 final Indexed<DataJournalEntry> next = reader.next();
81                 if (next.index() > message.toSequenceNr) {
82                     break;
83                 }
84
85                 LOG.trace("{}: replay {}", persistenceId, next);
86                 updateLargestSize(next.size());
87                 final DataJournalEntry entry = next.entry();
88                 verify(entry instanceof FromPersistence, "Unexpected entry %s", entry);
89
90                 final PersistentRepr repr = ((FromPersistence) entry).toRepr(persistenceId, next.index());
91                 LOG.debug("{}: replaying {}", persistenceId, repr);
92                 message.replayCallback.accept(repr);
93                 count++;
94             }
95             LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
96         } catch (Exception e) {
97             LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e);
98             message.promise.failure(e);
99         } finally {
100             message.promise.success(null);
101         }
102     }
103
104     @Override
105     @SuppressWarnings("checkstyle:illegalCatch")
106     void handleWriteMessages(final WriteMessages message) {
107         final int count = message.size();
108         final SegmentedJournalWriter<DataJournalEntry> writer = entries.writer();
109
110         for (int i = 0; i < count; ++i) {
111             final long mark = writer.getLastIndex();
112             final AtomicWrite request = message.getRequest(i);
113             try {
114                 for (PersistentRepr repr : CollectionConverters.asJava(request.payload())) {
115                     final Object payload = repr.payload();
116                     if (!(payload instanceof Serializable)) {
117                         throw new UnsupportedOperationException("Non-serializable payload encountered "
118                                 + payload.getClass());
119                     }
120
121                     recordMessageSize(writer.append(new ToPersistence(repr)).size());
122                 }
123             } catch (Exception e) {
124                 LOG.warn("{}: failed to write out request", persistenceId, e);
125                 message.setFailure(i, e);
126                 writer.truncate(mark);
127                 continue;
128             }
129
130             message.setSuccess(i);
131         }
132         writer.flush();
133     }
134 }