2 * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.raft.utils;
10 import akka.dispatch.Futures;
11 import akka.persistence.AtomicWrite;
12 import akka.persistence.PersistentImpl;
13 import akka.persistence.PersistentRepr;
14 import akka.persistence.journal.japi.AsyncWriteJournal;
15 import com.google.common.util.concurrent.Uninterruptibles;
16 import java.io.Serializable;
17 import java.util.ArrayList;
18 import java.util.Collections;
19 import java.util.LinkedHashMap;
20 import java.util.List;
22 import java.util.Optional;
23 import java.util.concurrent.ConcurrentHashMap;
24 import java.util.concurrent.CountDownLatch;
25 import java.util.concurrent.TimeUnit;
26 import java.util.function.Consumer;
27 import org.apache.commons.lang.SerializationUtils;
28 import org.slf4j.Logger;
29 import org.slf4j.LoggerFactory;
31 import scala.concurrent.Future;
32 import scala.jdk.javaapi.CollectionConverters;
35 * An akka AsyncWriteJournal implementation that stores data in memory. This is intended for testing.
37 * @author Thomas Pantelis
39 public class InMemoryJournal extends AsyncWriteJournal {
41 private static class WriteMessagesComplete {
42 final CountDownLatch latch;
43 final Class<?> ofType;
45 WriteMessagesComplete(final int count, final Class<?> ofType) {
46 this.latch = new CountDownLatch(count);
51 static final Logger LOG = LoggerFactory.getLogger(InMemoryJournal.class);
53 private static final Map<String, Map<Long, Object>> JOURNALS = new ConcurrentHashMap<>();
55 private static final Map<String, CountDownLatch> DELETE_MESSAGES_COMPLETE_LATCHES = new ConcurrentHashMap<>();
57 private static final Map<String, WriteMessagesComplete> WRITE_MESSAGES_COMPLETE = new ConcurrentHashMap<>();
59 private static final Map<String, CountDownLatch> BLOCK_READ_MESSAGES_LATCHES = new ConcurrentHashMap<>();
61 private static Object deserialize(final Object data) {
62 return data instanceof byte[] ? SerializationUtils.deserialize((byte[])data) : data;
65 public static void addEntry(final String persistenceId, final long sequenceNr, final Object data) {
66 Map<Long, Object> journal = JOURNALS.computeIfAbsent(persistenceId, k -> new LinkedHashMap<>());
68 synchronized (journal) {
69 journal.put(sequenceNr, data instanceof Serializable
70 ? SerializationUtils.serialize((Serializable) data) : data);
74 public static void clear() {
76 DELETE_MESSAGES_COMPLETE_LATCHES.clear();
77 WRITE_MESSAGES_COMPLETE.clear();
78 BLOCK_READ_MESSAGES_LATCHES.clear();
81 @SuppressWarnings("unchecked")
82 public static <T> List<T> get(final String persistenceId, final Class<T> type) {
83 Map<Long, Object> journalMap = JOURNALS.get(persistenceId);
84 if (journalMap == null) {
85 return Collections.<T>emptyList();
88 synchronized (journalMap) {
89 List<T> journal = new ArrayList<>(journalMap.size());
90 for (Object entry: journalMap.values()) {
91 Object data = deserialize(entry);
92 if (type.isInstance(data)) {
93 journal.add((T) data);
101 public static Map<Long, Object> get(final String persistenceId) {
102 Map<Long, Object> journalMap = JOURNALS.get(persistenceId);
103 return journalMap != null ? journalMap : Collections.<Long, Object>emptyMap();
106 public static void dumpJournal(final String persistenceId) {
107 StringBuilder builder = new StringBuilder(String.format("Journal log for %s:", persistenceId));
108 Map<Long, Object> journalMap = JOURNALS.get(persistenceId);
109 if (journalMap != null) {
110 synchronized (journalMap) {
111 for (Map.Entry<Long, Object> e: journalMap.entrySet()) {
112 builder.append("\n ").append(e.getKey()).append(" = ").append(deserialize(e.getValue()));
117 LOG.info(builder.toString());
120 public static void waitForDeleteMessagesComplete(final String persistenceId) {
121 if (!Uninterruptibles.awaitUninterruptibly(DELETE_MESSAGES_COMPLETE_LATCHES.get(persistenceId),
122 5, TimeUnit.SECONDS)) {
123 throw new AssertionError("Delete messages did not complete");
127 public static void waitForWriteMessagesComplete(final String persistenceId) {
128 if (!Uninterruptibles.awaitUninterruptibly(WRITE_MESSAGES_COMPLETE.get(persistenceId).latch,
129 5, TimeUnit.SECONDS)) {
130 throw new AssertionError("Journal write messages did not complete");
134 public static void addDeleteMessagesCompleteLatch(final String persistenceId) {
135 DELETE_MESSAGES_COMPLETE_LATCHES.put(persistenceId, new CountDownLatch(1));
138 public static void addWriteMessagesCompleteLatch(final String persistenceId, final int count) {
139 WRITE_MESSAGES_COMPLETE.put(persistenceId, new WriteMessagesComplete(count, null));
142 public static void addWriteMessagesCompleteLatch(final String persistenceId, final int count,
143 final Class<?> ofType) {
144 WRITE_MESSAGES_COMPLETE.put(persistenceId, new WriteMessagesComplete(count, ofType));
147 public static void addBlockReadMessagesLatch(final String persistenceId, final CountDownLatch latch) {
148 BLOCK_READ_MESSAGES_LATCHES.put(persistenceId, latch);
152 public Future<Void> doAsyncReplayMessages(final String persistenceId, final long fromSequenceNr,
153 final long toSequenceNr, final long max, final Consumer<PersistentRepr> replayCallback) {
154 LOG.trace("doAsyncReplayMessages for {}: fromSequenceNr: {}, toSequenceNr: {}", persistenceId,
155 fromSequenceNr,toSequenceNr);
156 return Futures.future(() -> {
157 CountDownLatch blockLatch = BLOCK_READ_MESSAGES_LATCHES.remove(persistenceId);
158 if (blockLatch != null) {
159 Uninterruptibles.awaitUninterruptibly(blockLatch);
162 Map<Long, Object> journal = JOURNALS.get(persistenceId);
163 if (journal == null) {
167 synchronized (journal) {
169 for (Map.Entry<Long,Object> entry : journal.entrySet()) {
170 if (++count <= max && entry.getKey() >= fromSequenceNr && entry.getKey() <= toSequenceNr) {
171 PersistentRepr persistentMessage =
172 new PersistentImpl(deserialize(entry.getValue()), entry.getKey(), persistenceId,
173 null, false, null, null, 0, Option.empty());
174 replayCallback.accept(persistentMessage);
180 }, context().dispatcher());
184 public Future<Long> doAsyncReadHighestSequenceNr(final String persistenceId, final long fromSequenceNr) {
185 LOG.trace("doAsyncReadHighestSequenceNr for {}: fromSequenceNr: {}", persistenceId, fromSequenceNr);
187 // Akka calls this during recovery.
188 Map<Long, Object> journal = JOURNALS.get(persistenceId);
189 if (journal == null) {
190 return Futures.successful(fromSequenceNr);
193 synchronized (journal) {
195 for (Long seqNr : journal.keySet()) {
196 if (seqNr.longValue() >= fromSequenceNr && seqNr.longValue() > highest) {
197 highest = seqNr.longValue();
201 return Futures.successful(highest);
206 public Future<Iterable<Optional<Exception>>> doAsyncWriteMessages(final Iterable<AtomicWrite> messages) {
207 return Futures.future(() -> {
208 for (AtomicWrite write : messages) {
209 for (PersistentRepr repr : CollectionConverters.asJava(write.payload())) {
210 LOG.trace("doAsyncWriteMessages: id: {}: seqNr: {}, payload: {}", repr.persistenceId(),
211 repr.sequenceNr(), repr.payload());
213 addEntry(repr.persistenceId(), repr.sequenceNr(), repr.payload());
215 WriteMessagesComplete complete = WRITE_MESSAGES_COMPLETE.get(repr.persistenceId());
216 if (complete != null) {
217 if (complete.ofType == null || complete.ofType.equals(repr.payload().getClass())) {
218 complete.latch.countDown();
224 return Collections.emptyList();
225 }, context().dispatcher());
229 public Future<Void> doAsyncDeleteMessagesTo(final String persistenceId, final long toSequenceNr) {
230 LOG.trace("doAsyncDeleteMessagesTo: {}", toSequenceNr);
231 Map<Long, Object> journal = JOURNALS.get(persistenceId);
232 if (journal != null) {
233 synchronized (journal) {
234 journal.keySet().removeIf(num -> num <= toSequenceNr);
238 CountDownLatch latch = DELETE_MESSAGES_COMPLETE_LATCHES.get(persistenceId);
243 return Futures.successful(null);