Use memory-mapped segmented journal by default
[controller.git] / opendaylight / md-sal / sal-clustering-config / src / main / resources / initial / factory-akka.conf
index eb4903e5527852d1f8d9286f0c69b6519147a634..ed6c7b9d64887970a8ad7837cb00d7d3079c9953 100644 (file)
@@ -2,8 +2,8 @@
 odl-cluster-data {
   bounded-mailbox {
     mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
-    mailbox-capacity = 1000
-    mailbox-push-timeout-time = 100ms
+    mailbox-capacity = 5000
+    mailbox-push-timeout-time = 10ms
   }
 
   metric-capture-enabled = true
@@ -28,22 +28,53 @@ odl-cluster-data {
     throughput = 1
   }
 
+  serialization-dispatcher {
+    type = Dispatcher
+    executor = "fork-join-executor"
+    fork-join-executor {
+      # Min number of threads to cap factor-based parallelism number to
+      parallelism-min = 2
+      # Parallelism (threads) ... ceil(available processors * factor)
+      parallelism-factor = 2.0
+      # Max number of threads to cap factor-based parallelism number to
+      parallelism-max = 15
+    }
+    throughput = 1
+  }
+
+  shard-dispatcher {
+    type = Dispatcher
+    executor = "default-executor"
+
+    # We use a ControlAwareMailbox so that raft messages that implement ControlMessage
+    # are given higher priority.
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.UnboundedDequeBasedControlAwareMailbox"
+  }
+
   akka {
     loglevel = "INFO"
     loggers = ["akka.event.slf4j.Slf4jLogger"]
     logger-startup-timeout = 300s
 
+    # JFR requires boot delegation, which we do not have by default
+    java-flight-recorder {
+      enabled = false
+    }
+
     actor {
+      warn-about-java-serializer-usage = off
       provider = "akka.cluster.ClusterActorRefProvider"
       serializers {
         java = "akka.serialization.JavaSerializer"
         proto = "akka.remote.serialization.ProtobufSerializer"
         readylocal = "org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransactionSerializer"
+        simpleReplicatedLogEntry = "org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntrySerializer"
       }
 
       serialization-bindings {
         "com.google.protobuf.Message" = proto
         "org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction" = readylocal
+        "org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry" = simpleReplicatedLogEntry
       }
 
       default-dispatcher {
@@ -60,26 +91,94 @@ odl-cluster-data {
     }
     remote {
       log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2550
+      # Disable passive connections, as we are seeing issues
+      # with read-only associations
+      use-passive-connections = off
+
+      classic.netty.tcp {
         maximum-frame-size = 419430400
         send-buffer-size = 52428800
         receive-buffer-size = 52428800
       }
+
+      artery {
+        enabled = on
+        transport = tcp
+
+        advanced {
+          maximum-frame-size = 400 MiB
+          #maximum-large-frame-size = 2 MiB
+        }
+      }
     }
 
     cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"]
-
       seed-node-timeout = 12s
 
-      auto-down-unreachable-after = 30s
+      # Following is an excerpt from Akka Cluster Documentation
+      # link - http://doc.akka.io/docs/akka/snapshot/java/cluster-usage.html
+      # Warning - Akka recommends against using the auto-down feature of Akka Cluster in production.
+      # This is crucial for correct behavior if you use Cluster Singleton or Cluster Sharding,
+      # especially together with Akka Persistence.
+
+      allow-weakly-up-members = on
+
+      use-dispatcher = cluster-dispatcher
+
+      failure-detector.acceptable-heartbeat-pause = 3 s
+    }
+
+    persistence {
+      journal {
+        # The following activates the default segmented file journal. Each persistent actor
+        # is stored in a separate directory, with multiple segment files. Segments are removed
+        # when they are no longer required.
+        #
+        plugin = akka.persistence.journal.segmented-file
 
-      roles = [
-        "member-1"
-      ]
+        segmented-file {
+          class = "org.opendaylight.controller.akka.segjournal.SegmentedFileJournal"
+          # Root directory for segmented journal storage
+          root-directory = "segmented-journal"
+          # Maximum size of a single entry in the segmented journal
+          max-entry-size = 16M
+          # Maximum size of a segment
+          max-segment-size = 128M
+          # Map each segment into memory. Defaults to true, use false to keep a heap-based
+          # buffer instead.
+          memory-mapped = true
+        }
+      }
+
+      # Journal configuration for shards that have persistence turned off. They still need to have a journal plugin
+      # configured, since they still need to store things in the journal occasionally, but having larger segment sizes
+      # would be wastefull.
+      non-persistent {
+        journal {
+          class = "org.opendaylight.controller.akka.segjournal.SegmentedFileJournal"
+          # Root directory for segmented journal storage
+          root-directory = "segmented-journal"
+          # Maximum size of a single entry in the segmented journal
+          max-entry-size = 512K
+          # Maximum size of a segment
+          max-segment-size = 1M
+          # Map each segment into memory. Note that while this can improve performance,
+          # it will also place additional burden on system resources.
+          memory-mapped = false
+        }
+      }
 
+      snapshot-store.local.class = "org.opendaylight.controller.cluster.persistence.LocalSnapshotStore"
+      snapshot-store.plugin = akka.persistence.snapshot-store.local
+    }
+  }
+
+  cluster-dispatcher {
+    type = "Dispatcher"
+    executor = "fork-join-executor"
+    fork-join-executor {
+      parallelism-min = 2
+      parallelism-max = 4
     }
   }
 }