From b47ffc2df37ef67559274068d651c86aa666cbc3 Mon Sep 17 00:00:00 2001 From: Robert Varga Date: Fri, 16 Dec 2016 11:23:28 +0100 Subject: [PATCH] Use Akka artery for remote transport This transport is introduced in 2.4.11 and is supposed to be faster than the TCP transport. Enabled it globally so we can evaluate it. Change-Id: I25234f82ac056700e8b56abaeb452c53ec5b9dbd Signed-off-by: Robert Varga Signed-off-by: Tom Pantelis --- .../example/ExampleRoleChangeListener.java | 2 +- .../src/main/resources/application.conf | 20 ++--- .../src/main/resources/application.conf | 79 ------------------- .../src/main/resources/initial/akka.conf | 9 ++- .../main/resources/initial/factory-akka.conf | 17 ++-- .../sal-distributed-datastore/client.conf | 36 --------- .../sal-distributed-datastore/server.conf | 37 --------- .../src/main/resources/application.conf | 71 ----------------- .../src/main/resources/module-shards.conf | 25 ------ .../src/main/resources/modules.conf | 7 -- .../DataTreeCohortIntegrationTest.java | 2 +- .../DistributedDataStoreIntegrationTest.java | 2 +- ...butedDataStoreRemotingIntegrationTest.java | 8 +- .../cluster/datastore/MemberNode.java | 37 +++++++-- .../datastore/PrefixShardCreationTest.java | 6 +- ...ributedEntityOwnershipIntegrationTest.java | 15 ++-- .../shardmanager/ShardManagerTest.java | 42 +++++----- .../datastore/utils/ActorContextTest.java | 20 ++--- .../datastore/utils/MockClusterWrapper.java | 2 +- .../src/test/resources/application.conf | 55 +++++++++++-- .../src/main/resources/member-2.conf | 14 ++-- .../src/main/resources/member-3.conf | 14 ++-- .../src/main/resources/application.conf | 64 --------------- .../src/test/resources/application.conf | 46 ++++++----- 24 files changed, 196 insertions(+), 434 deletions(-) delete mode 100644 opendaylight/md-sal/sal-akka-raft/src/main/resources/application.conf delete mode 100644 opendaylight/md-sal/sal-distributed-datastore/client.conf delete mode 100644 opendaylight/md-sal/sal-distributed-datastore/server.conf delete mode 100644 opendaylight/md-sal/sal-distributed-datastore/src/main/resources/application.conf delete mode 100644 opendaylight/md-sal/sal-distributed-datastore/src/main/resources/module-shards.conf delete mode 100644 opendaylight/md-sal/sal-distributed-datastore/src/main/resources/modules.conf delete mode 100644 opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/application.conf diff --git a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleRoleChangeListener.java b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleRoleChangeListener.java index f6930e8b09..3336ad9b3b 100644 --- a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleRoleChangeListener.java +++ b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleRoleChangeListener.java @@ -35,7 +35,7 @@ import scala.concurrent.duration.FiniteDuration; */ public class ExampleRoleChangeListener extends AbstractUntypedActor implements AutoCloseable{ // the akka url should be set to the notifiers actor-system and domain. - private static final String NOTIFIER_AKKA_URL = "akka.tcp://raft-test@127.0.0.1:2550/user/"; + private static final String NOTIFIER_AKKA_URL = "akka://raft-test@127.0.0.1:2550/user/"; private Map notifierRegistrationStatus = new HashMap<>(); private Cancellable registrationSchedule = null; diff --git a/opendaylight/md-sal/sal-akka-raft-example/src/main/resources/application.conf b/opendaylight/md-sal/sal-akka-raft-example/src/main/resources/application.conf index b2132b88b0..02dd4a1240 100644 --- a/opendaylight/md-sal/sal-akka-raft-example/src/main/resources/application.conf +++ b/opendaylight/md-sal/sal-akka-raft-example/src/main/resources/application.conf @@ -45,12 +45,13 @@ raft-test { } remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2550 - } - } + log-remote-lifecycle-events = off + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2550 + } + } } } @@ -65,9 +66,10 @@ raft-test-listener { remote { log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2554 + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2554 } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/resources/application.conf b/opendaylight/md-sal/sal-akka-raft/src/main/resources/application.conf deleted file mode 100644 index b2132b88b0..0000000000 --- a/opendaylight/md-sal/sal-akka-raft/src/main/resources/application.conf +++ /dev/null @@ -1,79 +0,0 @@ -akka { - - loglevel = "DEBUG" - - actor { - # enable to test serialization only. - # serialize-messages = on - - serializers { - java = "akka.serialization.JavaSerializer" - proto = "akka.remote.serialization.ProtobufSerializer" - } - - serialization-bindings { - "org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry" = java - "org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener" = java - "com.google.protobuf.Message" = proto - "com.google.protobuf.GeneratedMessage" = proto - } - } -} - -raft-test { - akka { - - loglevel = "DEBUG" - - actor { - # enable to test serialization only. - # serialize-messages = on - - provider = "akka.remote.RemoteActorRefProvider" - - serializers { - java = "akka.serialization.JavaSerializer" - proto = "akka.remote.serialization.ProtobufSerializer" - } - - serialization-bindings { - "org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry" = java - "org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener" = java - "com.google.protobuf.Message" = proto - "com.google.protobuf.GeneratedMessage" = proto - } - } - - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2550 - } - } - } -} - -raft-test-listener { - - akka { - loglevel = "DEBUG" - - actor { - provider = "akka.remote.RemoteActorRefProvider" - } - - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2554 - } - } - - member-id = "member-1" - } -} - - - diff --git a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf index 852171285c..31e54601dc 100644 --- a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf +++ b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf @@ -2,14 +2,15 @@ odl-cluster-data { akka { remote { - netty.tcp { - hostname = "127.0.0.1" - port = 2550 + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2550 } } cluster { - seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"] + seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550"] roles = [ "member-1" diff --git a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf index 10660fdb5a..af200deedc 100644 --- a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf +++ b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf @@ -61,18 +61,22 @@ odl-cluster-data { } remote { log-remote-lifecycle-events = off + netty.tcp { - hostname = "127.0.0.1" - port = 2550 maximum-frame-size = 419430400 send-buffer-size = 52428800 receive-buffer-size = 52428800 } + + artery { + advanced { + maximum-frame-size = 1 GiB + maximum-large-frame-size = 1 GiB + } + } } cluster { - seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"] - seed-node-timeout = 12s # Following is an excerpt from Akka Cluster Documentation @@ -84,11 +88,6 @@ odl-cluster-data { #auto-down-unreachable-after = 30s allow-weakly-up-members = on - - roles = [ - "member-1" - ] - } persistence { diff --git a/opendaylight/md-sal/sal-distributed-datastore/client.conf b/opendaylight/md-sal/sal-distributed-datastore/client.conf deleted file mode 100644 index 90bfb4c3e1..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/client.conf +++ /dev/null @@ -1,36 +0,0 @@ -ODLCluster{ - akka { - actor { - serialize-messages = on - - provider = "akka.cluster.ClusterActorRefProvider" - serializers { - java = "akka.serialization.JavaSerializer" - proto = "akka.remote.serialization.ProtobufSerializer" - } - - serialization-bindings { - "com.google.protobuf.Message" = proto - "com.google.protobuf.GeneratedMessage" = proto - "com.google.protobuf.GeneratedMessage$GeneratedExtension" = proto - "com.google.protobuf.FieldSet" = proto - } - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2552 - maximum-frame-size = 2097152 - send-buffer-size = 52428800 - receive-buffer-size = 52428800 - } - } - - cluster { - seed-nodes = ["akka.tcp://opendaylight-cluster@127.0.0.1:2550"] - - auto-down-unreachable-after = 10s - } - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/server.conf b/opendaylight/md-sal/sal-distributed-datastore/server.conf deleted file mode 100644 index 6209adfc17..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/server.conf +++ /dev/null @@ -1,37 +0,0 @@ - -ODLCluster{ - akka { - actor { - serialize-messages = on - - provider = "akka.cluster.ClusterActorRefProvider" - serializers { - java = "akka.serialization.JavaSerializer" - proto = "akka.remote.serialization.ProtobufSerializer" - } - - serialization-bindings { - "com.google.protobuf.Message" = proto - "com.google.protobuf.GeneratedMessage" = proto - "com.google.protobuf.GeneratedMessage$GeneratedExtension" = proto - "com.google.protobuf.FieldSet" = proto - } - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2550 - maximum-frame-size = 2097152 - send-buffer-size = 52428800 - receive-buffer-size = 52428800 - } - } - - cluster { - seed-nodes = ["akka.tcp://opendaylight-cluster@127.0.0.1:2550"] - - auto-down-unreachable-after = 10s - } - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/application.conf b/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/application.conf deleted file mode 100644 index 2449976a25..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/application.conf +++ /dev/null @@ -1,71 +0,0 @@ - -odl-cluster-data { - bounded-mailbox { - mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" - mailbox-capacity = 1000 - mailbox-push-timeout-time = 100ms - } - - metric-capture-enabled = true - - akka { - loggers = ["akka.event.slf4j.Slf4jLogger"] - cluster { - roles = [ - "member-1" - ] - } - actor { - provider = "akka.cluster.ClusterActorRefProvider" - serializers { - java = "akka.serialization.JavaSerializer" - proto = "akka.remote.serialization.ProtobufSerializer" - readylocal = "org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransactionSerializer" - } - - serialization-bindings { - "com.google.protobuf.Message" = proto - "org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction" = readylocal - } - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2550 - maximum-frame-size = 419430400 - send-buffer-size = 52428800 - receive-buffer-size = 52428800 - } - } - - cluster { - seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"] - - auto-down-unreachable-after = 10s - } - } -} - -odl-cluster-rpc { - akka { - loggers = ["akka.event.slf4j.Slf4jLogger"] - actor { - provider = "akka.cluster.ClusterActorRefProvider" - - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2551 - } - } - - cluster { - seed-nodes = ["akka.tcp://opendaylight-cluster-rpc@127.0.0.1:2551"] - - auto-down-unreachable-after = 10s - } - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/module-shards.conf b/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/module-shards.conf deleted file mode 100644 index 60dd7754d0..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/module-shards.conf +++ /dev/null @@ -1,25 +0,0 @@ -module-shards = [ - { - name = "default" - shards = [ - { - name="default", - replicas = [ - "member-1", - ] - } - ] - }, - { - name = "inventory" - shards = [ - { - name="inventory" - replicas = [ - "member-1", - ] - } - ] - } - -] diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/modules.conf b/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/modules.conf deleted file mode 100644 index e820703eeb..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/modules.conf +++ /dev/null @@ -1,7 +0,0 @@ -modules = [ - { - name = "inventory" - namespace = "urn:opendaylight:inventory" - shard-strategy = "module" - } -] diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java index 301f65f3a0..0352db6f76 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java @@ -64,7 +64,7 @@ public class DataTreeCohortIntegrationTest { @BeforeClass public static void setUpClass() throws IOException { system = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1")); - final Address member1Address = AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"); + final Address member1Address = AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"); Cluster.get(system).join(member1Address); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java index 9b97104d97..cb2284a30c 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java @@ -97,7 +97,7 @@ public class DistributedDataStoreIntegrationTest { @BeforeClass public static void setUpClass() throws IOException { system = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1")); - Address member1Address = AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"); + Address member1Address = AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"); Cluster.get(system).join(member1Address); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java index 7474690bca..ff44f01402 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java @@ -103,9 +103,9 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { private static final String[] CARS = {"cars"}; private static final Address MEMBER_1_ADDRESS = AddressFromURIString.parse( - "akka.tcp://cluster-test@127.0.0.1:2558"); + "akka://cluster-test@127.0.0.1:2558"); private static final Address MEMBER_2_ADDRESS = AddressFromURIString.parse( - "akka.tcp://cluster-test@127.0.0.1:2559"); + "akka://cluster-test@127.0.0.1:2559"); private static final String MODULE_SHARDS_CARS_ONLY_1_2 = "module-shards-cars-member-1-and-2.conf"; private static final String MODULE_SHARDS_CARS_PEOPLE_1_2 = "module-shards-member1-and-2.conf"; @@ -806,7 +806,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { String testName = "testLeadershipTransferOnShutdown"; initDatastores(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, CARS_AND_PEOPLE); - IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System, followerDatastoreContextBuilder); + IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System, + DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(100)); try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupDistributedDataStore(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) { @@ -912,6 +913,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { @Test(expected = AskTimeoutException.class) public void testTransactionWithShardLeaderNotResponding() throws Exception { + followerDatastoreContextBuilder.shardElectionTimeoutFactor(50); initDatastoresWithCars("testTransactionWithShardLeaderNotResponding"); // Do an initial read to get the primary shard info cached. diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java index bb2ce19dee..e6ea971247 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java @@ -12,7 +12,6 @@ import static org.junit.Assert.fail; import akka.actor.ActorRef; import akka.actor.ActorSystem; -import akka.actor.Address; import akka.actor.AddressFromURIString; import akka.cluster.Cluster; import akka.cluster.ClusterEvent.CurrentClusterState; @@ -23,6 +22,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Stopwatch; import com.google.common.collect.Sets; import com.google.common.util.concurrent.Uninterruptibles; +import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import java.util.List; import java.util.Set; @@ -34,6 +34,7 @@ import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftS import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState; import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper; import org.opendaylight.yangtools.yang.model.api.SchemaContext; +import org.slf4j.LoggerFactory; import scala.concurrent.Await; import scala.concurrent.Future; import scala.concurrent.duration.Duration; @@ -47,7 +48,7 @@ import scala.concurrent.duration.Duration; * @author Thomas Pantelis */ public class MemberNode { - static final Address MEMBER_1_ADDRESS = AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"); + private static final String MEMBER_1_ADDRESS = "akka://cluster-test@127.0.0.1:2558"; private IntegrationTestKit kit; private AbstractDataStore configDataStore; @@ -110,6 +111,7 @@ public class MemberNode { fail("Member " + member + " is now down"); } + @SuppressWarnings("checkstyle:IllegalCatch") public void cleanup() { if (!cleanedUp) { cleanedUp = true; @@ -120,7 +122,11 @@ public class MemberNode { operDataStore.close(); } - IntegrationTestKit.shutdownActorSystem(kit.getSystem(), Boolean.TRUE); + try { + IntegrationTestKit.shutdownActorSystem(kit.getSystem(), Boolean.TRUE); + } catch (RuntimeException e) { + LoggerFactory.getLogger(MemberNode.class).warn("Failed to shutdown actor system", e); + } } } @@ -179,6 +185,7 @@ public class MemberNode { private final List members; private String moduleShardsConfig; private String akkaConfig; + private boolean useAkkaArtery = true; private String[] waitForshardLeader = new String[0]; private String testName; private SchemaContext schemaContext; @@ -210,6 +217,16 @@ public class MemberNode { return this; } + /** + * Specifies whether or not to use akka artery for remoting. Default is true. + * + * @return this Builder + */ + public Builder useAkkaArtery(final boolean newUseAkkaArtery) { + this.useAkkaArtery = newUseAkkaArtery; + return this; + } + /** * Specifies the name of the test that is appended to the data store names. This is required. * @@ -272,8 +289,18 @@ public class MemberNode { MemberNode node = new MemberNode(); node.datastoreContextBuilder = datastoreContextBuilder; - ActorSystem system = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig(akkaConfig)); - Cluster.get(system).join(MEMBER_1_ADDRESS); + Config baseConfig = ConfigFactory.load(); + Config config; + if (useAkkaArtery) { + config = baseConfig.getConfig(akkaConfig); + } else { + config = baseConfig.getConfig(akkaConfig + "-without-artery") + .withFallback(baseConfig.getConfig(akkaConfig)); + } + + ActorSystem system = ActorSystem.create("cluster-test", config); + String member1Address = useAkkaArtery ? MEMBER_1_ADDRESS : MEMBER_1_ADDRESS.replace("akka", "akka.tcp"); + Cluster.get(system).join(AddressFromURIString.parse(member1Address)); node.kit = new IntegrationTestKit(system, datastoreContextBuilder); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/PrefixShardCreationTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/PrefixShardCreationTest.java index 2a3431f67f..c87b2f4369 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/PrefixShardCreationTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/PrefixShardCreationTest.java @@ -100,7 +100,7 @@ public class PrefixShardCreationTest extends AbstractShardManagerTest { // Create ACtorSystem for member-1 final ActorSystem system1 = newActorSystem("Member1"); - Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); final TestActorRef shardManager1 = TestActorRef.create(system1, newTestShardMgrBuilder(new ConfigurationImpl(new EmptyModuleShardConfigProvider())) @@ -113,7 +113,7 @@ public class PrefixShardCreationTest extends AbstractShardManagerTest { final ActorSystem system2 = newActorSystem("Member2"); - Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); final TestActorRef shardManager2 = TestActorRef.create(system2, newTestShardMgrBuilder() @@ -186,4 +186,4 @@ public class PrefixShardCreationTest extends AbstractShardManagerTest { actorSystems.add(system); return system; } -} \ No newline at end of file +} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java index 6ff4ad25ec..b4ad323c49 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java @@ -348,23 +348,28 @@ public class DistributedEntityOwnershipIntegrationTest { .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName()); String name = "test"; - final MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1") + .useAkkaArtery(false).testName(name) .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT) .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build(); - final MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2") + .useAkkaArtery(false).testName(name) .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT) .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build(); - final MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + final MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3") + .useAkkaArtery(false).testName(name) .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT) .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build(); - final MemberNode follower3Node = MemberNode.builder(memberNodes).akkaConfig("Member4").testName(name) + final MemberNode follower3Node = MemberNode.builder(memberNodes).akkaConfig("Member4") + .useAkkaArtery(false).testName(name) .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT) .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build(); - final MemberNode follower4Node = MemberNode.builder(memberNodes).akkaConfig("Member5").testName(name) + final MemberNode follower4Node = MemberNode.builder(memberNodes).akkaConfig("Member5") + .useAkkaArtery(false).testName(name) .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT) .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build(); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerTest.java index 521a0b86bd..dc640d2c92 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerTest.java @@ -634,7 +634,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { // Create an ActorSystem ShardManager actor for member-1. final ActorSystem system1 = newActorSystem("Member1"); - Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); final TestActorRef shardManager1 = TestActorRef.create(system1, newTestShardMgrBuilderWithMockShardActor().cluster( @@ -645,7 +645,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { final ActorSystem system2 = newActorSystem("Member2"); - Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); final ActorRef mockShardActor2 = newMockShardActor(system2, "astronauts", "member-2"); @@ -684,7 +684,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { shardManager2.underlyingActor().verifyFindPrimary(); - Cluster.get(system2).down(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system2).down(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); shardManager1.underlyingActor().waitForMemberRemoved(); @@ -705,7 +705,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { // Create an ActorSystem ShardManager actor for member-1. final ActorSystem system1 = newActorSystem("Member1"); - Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); final ActorRef mockShardActor1 = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1"); @@ -718,7 +718,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { final ActorSystem system2 = newActorSystem("Member2"); - Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); final ActorRef mockShardActor2 = newMockShardActor(system2, Shard.DEFAULT_NAME, "member-2"); @@ -758,7 +758,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { assertTrue("Unexpected primary path " + path, path.contains("member-2-shard-default-config")); shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2", - "akka.tcp://cluster-test@127.0.0.1:2558"), getRef()); + "akka://cluster-test@127.0.0.1:2558"), getRef()); shardManager1.underlyingActor().waitForUnreachableMember(); @@ -767,7 +767,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { MessageCollectorActor.clearMessages(mockShardActor1); shardManager1.tell( - MockClusterWrapper.createMemberRemoved("member-2", "akka.tcp://cluster-test@127.0.0.1:2558"), + MockClusterWrapper.createMemberRemoved("member-2", "akka://cluster-test@127.0.0.1:2558"), getRef()); MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerDown.class); @@ -777,7 +777,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { expectMsgClass(duration("5 seconds"), NoShardLeaderException.class); shardManager1.tell( - MockClusterWrapper.createReachableMember("member-2", "akka.tcp://cluster-test@127.0.0.1:2558"), + MockClusterWrapper.createReachableMember("member-2", "akka://cluster-test@127.0.0.1:2558"), getRef()); shardManager1.underlyingActor().waitForReachableMember(); @@ -793,7 +793,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { assertTrue("Unexpected primary path " + path1, path1.contains("member-2-shard-default-config")); shardManager1.tell( - MockClusterWrapper.createMemberUp("member-2", "akka.tcp://cluster-test@127.0.0.1:2558"), + MockClusterWrapper.createMemberUp("member-2", "akka://cluster-test@127.0.0.1:2558"), getRef()); MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerUp.class); @@ -801,13 +801,13 @@ public class ShardManagerTest extends AbstractShardManagerTest { // Test FindPrimary wait succeeds after reachable member event. shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2", - "akka.tcp://cluster-test@127.0.0.1:2558"), getRef()); + "akka://cluster-test@127.0.0.1:2558"), getRef()); shardManager1.underlyingActor().waitForUnreachableMember(); shardManager1.tell(new FindPrimary("default", true), getRef()); shardManager1.tell( - MockClusterWrapper.createReachableMember("member-2", "akka.tcp://cluster-test@127.0.0.1:2558"), + MockClusterWrapper.createReachableMember("member-2", "akka://cluster-test@127.0.0.1:2558"), getRef()); RemotePrimaryShardFound found2 = expectMsgClass(duration("5 seconds"), RemotePrimaryShardFound.class); @@ -827,7 +827,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { // Create an ActorSystem ShardManager actor for member-1. final ActorSystem system1 = newActorSystem("Member1"); - Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); final ActorRef mockShardActor1 = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1"); @@ -842,7 +842,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { final ActorSystem system2 = newActorSystem("Member2"); - Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); final ActorRef mockShardActor2 = newMockShardActor(system2, Shard.DEFAULT_NAME, "member-2"); @@ -885,7 +885,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { system1.actorSelection(mockShardActor1.path()), DataStoreVersions.CURRENT_VERSION)); shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2", - "akka.tcp://cluster-test@127.0.0.1:2558"), getRef()); + "akka://cluster-test@127.0.0.1:2558"), getRef()); shardManager1.underlyingActor().waitForUnreachableMember(); @@ -1449,7 +1449,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { // Create an ActorSystem ShardManager actor for member-1. final ActorSystem system1 = newActorSystem("Member1"); - Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); ActorRef mockDefaultShardActor = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1"); final TestActorRef newReplicaShardManager = TestActorRef.create(system1, newTestShardMgrBuilder(mockConfig).shardActor(mockDefaultShardActor) @@ -1459,7 +1459,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { // Create an ActorSystem ShardManager actor for member-2. final ActorSystem system2 = newActorSystem("Member2"); - Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); String memberId2 = "member-2-shard-astronauts-" + shardMrgIDSuffix; String name = ShardIdentifier.create("astronauts", MEMBER_2, "config").toString(); @@ -1680,7 +1680,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { newReplicaShardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef()); MockClusterWrapper.sendMemberUp(newReplicaShardManager, "member-2", - AddressFromURIString.parse("akka.tcp://non-existent@127.0.0.1:5").toString()); + AddressFromURIString.parse("akka://non-existent@127.0.0.1:5").toString()); newReplicaShardManager.tell(new AddShardReplica("astronauts"), getRef()); Status.Failure resp = expectMsgClass(duration("5 seconds"), Status.Failure.class); @@ -1748,7 +1748,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { // Create an ActorSystem ShardManager actor for member-1. final ActorSystem system1 = newActorSystem("Member1"); - Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); ActorRef mockDefaultShardActor = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1"); final TestActorRef newReplicaShardManager = TestActorRef.create(system1, @@ -1758,7 +1758,7 @@ public class ShardManagerTest extends AbstractShardManagerTest { // Create an ActorSystem ShardManager actor for member-2. final ActorSystem system2 = newActorSystem("Member2"); - Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558")); + Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558")); String name = ShardIdentifier.create("default", MEMBER_2, shardMrgIDSuffix).toString(); String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix; @@ -1774,11 +1774,11 @@ public class ShardManagerTest extends AbstractShardManagerTest { shardManagerID); // Because mockShardLeaderActor is created at the top level of the actor system it has an address like so, - // akka.tcp://cluster-test@127.0.0.1:2559/user/member-2-shard-default-config1 + // akka://cluster-test@127.0.0.1:2559/user/member-2-shard-default-config1 // However when a shard manager has a local shard which is a follower and a leader that is remote it will // try to compute an address for the remote shard leader using the ShardPeerAddressResolver. This address will // look like so, - // akka.tcp://cluster-test@127.0.0.1:2559/user/shardmanager-config1/member-2-shard-default-config1 + // akka://cluster-test@127.0.0.1:2559/user/shardmanager-config1/member-2-shard-default-config1 // In this specific case if we did a FindPrimary for shard default from member-1 we would come up // with the address of an actor which does not exist, therefore any message sent to that actor would go to // dead letters. diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/ActorContextTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/ActorContextTest.java index 78a00fb00e..cb0864ffaf 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/ActorContextTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/ActorContextTest.java @@ -256,13 +256,13 @@ public class ActorContextTest extends AbstractActorTest { assertEquals(true, actorContext.isPathLocal("akka://test/user/token2/token3/$a")); // self address of remote format,but Tx path local format. - clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550)); + clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550)); actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class)); assertEquals(true, actorContext.isPathLocal( "akka://system/user/shardmanager/shard/transaction")); // self address of local format,but Tx path remote format. - clusterWrapper.setSelfAddress(new Address("akka.tcp", "system")); + clusterWrapper.setSelfAddress(new Address("akka", "system")); actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class)); assertEquals(false, actorContext.isPathLocal( "akka://system@127.0.0.1:2550/user/shardmanager/shard/transaction")); @@ -273,24 +273,24 @@ public class ActorContextTest extends AbstractActorTest { assertEquals(true, actorContext.isPathLocal("akka://test1/user/$a")); //ip and port same - clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550)); + clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550)); actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class)); - assertEquals(true, actorContext.isPathLocal("akka.tcp://system@127.0.0.1:2550/")); + assertEquals(true, actorContext.isPathLocal("akka://system@127.0.0.1:2550/")); // forward-slash missing in address - clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550)); + clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550)); actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class)); - assertEquals(false, actorContext.isPathLocal("akka.tcp://system@127.0.0.1:2550")); + assertEquals(false, actorContext.isPathLocal("akka://system@127.0.0.1:2550")); //ips differ - clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550)); + clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550)); actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class)); - assertEquals(false, actorContext.isPathLocal("akka.tcp://system@127.1.0.1:2550/")); + assertEquals(false, actorContext.isPathLocal("akka://system@127.1.0.1:2550/")); //ports differ - clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550)); + clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550)); actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class)); - assertEquals(false, actorContext.isPathLocal("akka.tcp://system@127.0.0.1:2551/")); + assertEquals(false, actorContext.isPathLocal("akka://system@127.0.0.1:2551/")); } @Test diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/MockClusterWrapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/MockClusterWrapper.java index 0b9371698f..9822559d5d 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/MockClusterWrapper.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/MockClusterWrapper.java @@ -22,7 +22,7 @@ import scala.collection.JavaConversions; public class MockClusterWrapper implements ClusterWrapper { - private Address selfAddress = new Address("akka.tcp", "test", "127.0.0.1", 2550); + private Address selfAddress = new Address("akka", "test", "127.0.0.1", 2550); private final MemberName currentMemberName; public MockClusterWrapper() { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf b/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf index f0cf18932f..bc9a86bf89 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf @@ -73,6 +73,12 @@ Member1 { } remote { log-remote-lifecycle-events = off + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2558 + } + netty.tcp { hostname = "127.0.0.1" port = 2558 @@ -80,7 +86,6 @@ Member1 { } cluster { - auto-down-unreachable-after = 100s retry-unsuccessful-join-after = 100ms roles = [ @@ -126,6 +131,12 @@ Member2 { } remote { log-remote-lifecycle-events = off + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2559 + } + netty.tcp { hostname = "127.0.0.1" port = 2559 @@ -133,7 +144,6 @@ Member2 { } cluster { - auto-down-unreachable-after = 100s retry-unsuccessful-join-after = 100ms roles = [ @@ -181,6 +191,12 @@ Member3 { } remote { log-remote-lifecycle-events = off + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2557 + } + netty.tcp { hostname = "127.0.0.1" port = 2557 @@ -188,7 +204,6 @@ Member3 { } cluster { - auto-down-unreachable-after = 100s retry-unsuccessful-join-after = 100ms roles = [ @@ -236,6 +251,12 @@ Member4 { } remote { log-remote-lifecycle-events = off + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2560 + } + netty.tcp { hostname = "127.0.0.1" port = 2560 @@ -243,7 +264,6 @@ Member4 { } cluster { - auto-down-unreachable-after = 100s retry-unsuccessful-join-after = 100ms roles = [ @@ -291,6 +311,12 @@ Member5 { } remote { log-remote-lifecycle-events = off + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2561 + } + netty.tcp { hostname = "127.0.0.1" port = 2561 @@ -298,7 +324,6 @@ Member5 { } cluster { - auto-down-unreachable-after = 100s retry-unsuccessful-join-after = 100ms roles = [ @@ -307,3 +332,23 @@ Member5 { } } } + +Member1-without-artery { + akka.remote.artery.enabled = off +} + +Member2-without-artery { + akka.remote.artery.enabled = off +} + +Member3-without-artery { + akka.remote.artery.enabled = off +} + +Member4-without-artery { + akka.remote.artery.enabled = off +} + +Member5-without-artery { + akka.remote.artery.enabled = off +} diff --git a/opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-2.conf b/opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-2.conf index 38aa4c5174..28a35ecb4c 100644 --- a/opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-2.conf +++ b/opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-2.conf @@ -26,17 +26,15 @@ odl-cluster-data { } remote { log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2553 - maximum-frame-size = 419430400 - send-buffer-size = 52428800 - receive-buffer-size = 52428800 + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2553 } } cluster { - seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550", "akka.tcp://opendaylight-cluster-data@127.0.0.1:2553"] + seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550", "akka://opendaylight-cluster-data@127.0.0.1:2553"] auto-down-unreachable-after = 10s @@ -46,4 +44,4 @@ odl-cluster-data { } } -} \ No newline at end of file +} diff --git a/opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-3.conf b/opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-3.conf index 7b2da6a1db..f9afe04278 100644 --- a/opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-3.conf +++ b/opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-3.conf @@ -26,17 +26,15 @@ odl-cluster-data { } remote { log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2554 - maximum-frame-size = 419430400 - send-buffer-size = 52428800 - receive-buffer-size = 52428800 + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2554 } } cluster { - seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550", "akka.tcp://opendaylight-cluster-data@127.0.0.1:2554"] + seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550", "akka://opendaylight-cluster-data@127.0.0.1:2554"] auto-down-unreachable-after = 10s @@ -46,4 +44,4 @@ odl-cluster-data { } } -} \ No newline at end of file +} diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/application.conf b/opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/application.conf deleted file mode 100644 index c1c78d36f3..0000000000 --- a/opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/application.conf +++ /dev/null @@ -1,64 +0,0 @@ - -odl-cluster-data { - akka { - cluster { - roles = [ - "member-1" - ] - } - actor { - provider = "akka.cluster.ClusterActorRefProvider" - serializers { - java = "akka.serialization.JavaSerializer" - proto = "akka.remote.serialization.ProtobufSerializer" - } - - serialization-bindings { - "com.google.protobuf.Message" = proto - - } - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2550 - maximum-frame-size = 2097152 - send-buffer-size = 52428800 - receive-buffer-size = 52428800 - } - } - - cluster { - seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"] - - auto-down-unreachable-after = 10s - } - } -} - -odl-cluster-rpc { - bounded-mailbox { - mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" - mailbox-capacity = 1000 - mailbox-push-timeout-time = 100ms - } - - akka { - actor { - provider = "akka.cluster.ClusterActorRefProvider" - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2551 - } - } - - cluster { - seed-nodes = ["akka.tcp://opendaylight-cluster-rpc@127.0.0.1:2551"] - auto-down-unreachable-after = 10s - } - } -} diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/resources/application.conf b/opendaylight/md-sal/sal-remoterpc-connector/src/test/resources/application.conf index f22b4eafb3..524e69ffe3 100644 --- a/opendaylight/md-sal/sal-remoterpc-connector/src/test/resources/application.conf +++ b/opendaylight/md-sal/sal-remoterpc-connector/src/test/resources/application.conf @@ -1,4 +1,4 @@ -odl-cluster-rpc{ +odl-cluster-rpc { bounded-mailbox { mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" mailbox-capacity = 1000 @@ -22,20 +22,21 @@ odl-cluster-rpc{ log-sent-messages = on log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2550 + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2550 } } cluster { - seed-nodes = ["akka.tcp://opendaylight-rpc@127.0.0.1:2550"] + seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2550"] auto-down-unreachable-after = 10s } } } -unit-test{ +unit-test { akka { loglevel = "DEBUG" #loggers = ["akka.event.slf4j.Slf4jLogger"] @@ -48,7 +49,7 @@ unit-test{ } } -memberA{ +memberA { bounded-mailbox { mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" mailbox-capacity = 1000 @@ -68,20 +69,21 @@ memberA{ log-sent-messages = off log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2551 + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2551 } } cluster { - seed-nodes = ["akka.tcp://opendaylight-rpc@127.0.0.1:2551"] + seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"] auto-down-unreachable-after = 10s } } } -memberB{ +memberB { bounded-mailbox { mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" mailbox-capacity = 1000 @@ -102,20 +104,21 @@ memberB{ log-sent-messages = off log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2552 + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2552 } } cluster { - seed-nodes = ["akka.tcp://opendaylight-rpc@127.0.0.1:2551"] + seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"] auto-down-unreachable-after = 10s } } } -memberC{ +memberC { bounded-mailbox { mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" mailbox-capacity = 1000 @@ -135,14 +138,15 @@ memberC{ log-sent-messages = off log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 2553 + artery { + enabled = on + canonical.hostname = "127.0.0.1" + canonical.port = 2553 } } cluster { - seed-nodes = ["akka.tcp://opendaylight-rpc@127.0.0.1:2551"] + seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"] auto-down-unreachable-after = 10s } -- 2.36.6