throughput = 1
}
+ serialization-dispatcher {
+ type = Dispatcher
+ executor = "fork-join-executor"
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 2
+ # Parallelism (threads) ... ceil(available processors * factor)
+ parallelism-factor = 2.0
+ # Max number of threads to cap factor-based parallelism number to
+ parallelism-max = 15
+ }
+ throughput = 1
+ }
+
+ shard-dispatcher {
+ type = Dispatcher
+ executor = "default-executor"
+
+ # We use a ControlAwareMailbox so that raft messages that implement ControlMessage
+ # are given higher priority.
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.UnboundedDequeBasedControlAwareMailbox"
+ }
+
akka {
loglevel = "INFO"
loggers = ["akka.event.slf4j.Slf4jLogger"]
java = "akka.serialization.JavaSerializer"
proto = "akka.remote.serialization.ProtobufSerializer"
readylocal = "org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransactionSerializer"
+ simpleReplicatedLogEntry = "org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntrySerializer"
}
serialization-bindings {
"com.google.protobuf.Message" = proto
"org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction" = readylocal
+ "org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry" = simpleReplicatedLogEntry
}
default-dispatcher {
}
remote {
log-remote-lifecycle-events = off
+ # Disable passive connections, as we are seeing issues
+ # with read-only associations
+ use-passive-connections = off
netty.tcp {
maximum-frame-size = 419430400
artery {
advanced {
- maximum-frame-size = 1 GiB
- maximum-large-frame-size = 1 GiB
+ #maximum-frame-size = 256 KiB
+ #maximum-large-frame-size = 2 MiB
}
}
}
cluster {
seed-node-timeout = 12s
-
+
# Following is an excerpt from Akka Cluster Documentation
# link - http://doc.akka.io/docs/akka/snapshot/java/cluster-usage.html
# Warning - Akka recommends against using the auto-down feature of Akka Cluster in production.
- # This is crucial for correct behavior if you use Cluster Singleton or Cluster Sharding,
+ # This is crucial for correct behavior if you use Cluster Singleton or Cluster Sharding,
# especially together with Akka Persistence.
#auto-down-unreachable-after = 30s
allow-weakly-up-members = on
+
+ use-dispatcher = cluster-dispatcher
+
+ failure-detector.acceptable-heartbeat-pause = 3 s
}
persistence {
- journal.plugin = akka.persistence.journal.leveldb
+ journal {
+ # The following activates the default segmented file journal. Each persistent actor
+ # is stored in a separate directory, with multiple segment files. Segments are removed
+ # when they are not longer required.
+ #
+ plugin = akka.persistence.journal.segmented-file
+
+ segmented-file {
+ class = "org.opendaylight.controller.akka.segjournal.SegmentedFileJournal"
+ # Root directory for segmented journal storage
+ root-directory = "segmented-journal"
+ # Maximum size of a single entry in the segmented journal
+ max-entry-size = 16M
+ # Maximum size of a segment
+ max-segment-size = 128M
+ # Map each segment into memory. Note that while this can improve performance,
+ # it will also place additional burden on system resources.
+ memory-mapped = false
+ }
+ }
+
+ snapshot-store.local.class = "org.opendaylight.controller.cluster.persistence.LocalSnapshotStore"
snapshot-store.plugin = akka.persistence.snapshot-store.local
}
}
+
+ cluster-dispatcher {
+ type = "Dispatcher"
+ executor = "fork-join-executor"
+ fork-join-executor {
+ parallelism-min = 2
+ parallelism-max = 4
+ }
+ }
}