Improve SegmentedJournalActor logging 25/100825/2
authorRobert Varga <robert.varga@pantheon.tech>
Tue, 26 Apr 2022 19:22:58 +0000 (21:22 +0200)
committerRobert Varga <robert.varga@pantheon.tech>
Tue, 26 Apr 2022 19:40:44 +0000 (21:40 +0200)
Log timing around journal open and each write message. Also include
the number of bytes written in each message, so there is a correlation.

JIRA: CONTROLLER-2043
Change-Id: I27bb6bc6edf0baf4ec0b3ec0d07f6bfd1adf867e
Signed-off-by: Robert Varga <robert.varga@pantheon.tech>
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournal.java
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalV0.java
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedJournalActor.java

index 678749b1c16cf9a4ddd9d0a052d763b70541ab19..9ba9394429ff82f831e1283c8020bf8c62c25dab 100644 (file)
@@ -80,6 +80,7 @@ abstract class DataJournal {
      * Handle a request to store some messages.
      *
      * @param message Request message
+     * @return number of bytes written
      */
-    abstract void handleWriteMessages(@NonNull WriteMessages message);
+    abstract long handleWriteMessages(@NonNull WriteMessages message);
 }
index d422ab14c73281b7c4ad25b71c0406ebeaab5092..3203d6617110674d08c86e79edc72b6b6db48393 100644 (file)
@@ -104,9 +104,10 @@ final class DataJournalV0 extends DataJournal {
 
     @Override
     @SuppressWarnings("checkstyle:illegalCatch")
-    void handleWriteMessages(final WriteMessages message) {
+    long handleWriteMessages(final WriteMessages message) {
         final int count = message.size();
         final SegmentedJournalWriter<DataJournalEntry> writer = entries.writer();
+        long bytes = 0;
 
         for (int i = 0; i < count; ++i) {
             final long mark = writer.getLastIndex();
@@ -115,22 +116,9 @@ final class DataJournalV0 extends DataJournal {
             final List<PersistentRepr> reprs = CollectionConverters.asJava(request.payload());
             LOG.trace("{}: append {}/{}: {} items at mark {}", persistenceId, i, count, reprs.size(), mark);
             try {
-                for (PersistentRepr repr : reprs) {
-                    final Object payload = repr.payload();
-                    if (!(payload instanceof Serializable)) {
-                        throw new UnsupportedOperationException("Non-serializable payload encountered "
-                                + payload.getClass());
-                    }
-
-                    LOG.trace("{}: starting append of {}", persistenceId, payload);
-                    final Indexed<ToPersistence> entry = writer.append(new ToPersistence(repr));
-                    final int size = entry.size();
-                    LOG.trace("{}: finished append of {} with {} bytes at {}", persistenceId, payload, size,
-                        entry.index());
-                    recordMessageSize(size);
-                }
+                bytes += writePayload(writer, reprs);
             } catch (Exception e) {
-                LOG.warn("{}: failed to write out request", persistenceId, e);
+                LOG.warn("{}: failed to write out request {}/{} reverting to {}", persistenceId, i, count, mark, e);
                 message.setFailure(i, e);
                 writer.truncate(mark);
                 continue;
@@ -139,5 +127,25 @@ final class DataJournalV0 extends DataJournal {
             message.setSuccess(i);
         }
         writer.flush();
+        return bytes;
+    }
+
+    private long writePayload(final SegmentedJournalWriter<DataJournalEntry> writer, final List<PersistentRepr> reprs) {
+        long bytes = 0;
+        for (PersistentRepr repr : reprs) {
+            final Object payload = repr.payload();
+            if (!(payload instanceof Serializable)) {
+                throw new UnsupportedOperationException("Non-serializable payload encountered "
+                        + payload.getClass());
+            }
+
+            LOG.trace("{}: starting append of {}", persistenceId, payload);
+            final Indexed<ToPersistence> entry = writer.append(new ToPersistence(repr));
+            final int size = entry.size();
+            LOG.trace("{}: finished append of {} with {} bytes at {}", persistenceId, payload, size, entry.index());
+            recordMessageSize(size);
+            bytes += size;
+        }
+        return bytes;
     }
 }
index e5c5b7807b0b9ea016cdc4aa0835f49daf5ff8dd..d440cbed1c9e73e967cfbd0509755d8d21b585df 100644 (file)
@@ -19,6 +19,7 @@ import com.codahale.metrics.Meter;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Timer;
 import com.google.common.base.MoreObjects;
+import com.google.common.base.Stopwatch;
 import io.atomix.storage.StorageLevel;
 import io.atomix.storage.journal.Indexed;
 import io.atomix.storage.journal.SegmentedJournal;
@@ -289,13 +290,16 @@ final class SegmentedJournalActor extends AbstractActor {
     private void handleWriteMessages(final WriteMessages message) {
         ensureOpen();
 
-        final long startTicks = System.nanoTime();
+        final Stopwatch sw = Stopwatch.createStarted();
         final long start = dataJournal.lastWrittenSequenceNr();
+        final long bytes = dataJournal.handleWriteMessages(message);
+        sw.stop();
 
-        dataJournal.handleWriteMessages(message);
-
-        batchWriteTime.update(System.nanoTime() - startTicks, TimeUnit.NANOSECONDS);
+        batchWriteTime.update(sw.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
         messageWriteCount.mark(dataJournal.lastWrittenSequenceNr() - start);
+
+        // log message after statistics are updated
+        LOG.debug("{}: write of {} bytes completed in {}", persistenceId, bytes, sw);
     }
 
     private void handleUnknown(final Object message) {
@@ -308,6 +312,7 @@ final class SegmentedJournalActor extends AbstractActor {
             return;
         }
 
+        final Stopwatch sw = Stopwatch.createStarted();
         deleteJournal = SegmentedJournal.<Long>builder().withDirectory(directory).withName("delete")
                 .withNamespace(DELETE_NAMESPACE).withMaxSegmentSize(DELETE_SEGMENT_SIZE).build();
         final Indexed<Long> lastEntry = deleteJournal.writer().getLastEntry();
@@ -316,7 +321,7 @@ final class SegmentedJournalActor extends AbstractActor {
         dataJournal = new DataJournalV0(persistenceId, messageSize, context().system(), storage, directory,
             maxEntrySize, maxSegmentSize);
         dataJournal.deleteTo(lastDelete);
-        LOG.debug("{}: journal open with last index {}, deleted to {}", persistenceId,
+        LOG.debug("{}: journal open in {} with last index {}, deleted to {}", persistenceId, sw,
             dataJournal.lastWrittenSequenceNr(), lastDelete);
     }
 }