Use Akka artery for remote transport 66/49466/10
authorRobert Varga <rovarga@cisco.com>
Fri, 16 Dec 2016 10:23:28 +0000 (11:23 +0100)
committerTom Pantelis <tpanteli@brocade.com>
Thu, 5 Jan 2017 12:02:28 +0000 (12:02 +0000)
This transport is introduced in 2.4.11 and is supposed to be
faster than the TCP transport. Enabled it globally so we can
evaluate it.

Change-Id: I25234f82ac056700e8b56abaeb452c53ec5b9dbd
Signed-off-by: Robert Varga <rovarga@cisco.com>
Signed-off-by: Tom Pantelis <tpanteli@brocade.com>
24 files changed:
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleRoleChangeListener.java
opendaylight/md-sal/sal-akka-raft-example/src/main/resources/application.conf
opendaylight/md-sal/sal-akka-raft/src/main/resources/application.conf [deleted file]
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf
opendaylight/md-sal/sal-distributed-datastore/client.conf [deleted file]
opendaylight/md-sal/sal-distributed-datastore/server.conf [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/resources/application.conf [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/resources/module-shards.conf [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/resources/modules.conf [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/PrefixShardCreationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/ActorContextTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/MockClusterWrapper.java
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf
opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-2.conf
opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-3.conf
opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/application.conf [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/test/resources/application.conf

index f6930e8b09656cf59e48aa5a85d0b6ebb9c2c72f..3336ad9b3b0b1b0b642d451f11c075e81d1485f7 100644 (file)
@@ -35,7 +35,7 @@ import scala.concurrent.duration.FiniteDuration;
  */
 public class ExampleRoleChangeListener extends AbstractUntypedActor implements AutoCloseable{
     // the akka url should be set to the notifiers actor-system and domain.
  */
 public class ExampleRoleChangeListener extends AbstractUntypedActor implements AutoCloseable{
     // the akka url should be set to the notifiers actor-system and domain.
-    private static final String NOTIFIER_AKKA_URL = "akka.tcp://raft-test@127.0.0.1:2550/user/";
+    private static final String NOTIFIER_AKKA_URL = "akka://raft-test@127.0.0.1:2550/user/";
 
     private Map<String, Boolean> notifierRegistrationStatus = new HashMap<>();
     private Cancellable registrationSchedule = null;
 
     private Map<String, Boolean> notifierRegistrationStatus = new HashMap<>();
     private Cancellable registrationSchedule = null;
index b2132b88b0c4f062414cf57a42fc8efdc3eb88dc..02dd4a124083714ea89509c3014865814e90f349 100644 (file)
@@ -45,12 +45,13 @@ raft-test {
         }
 
         remote {
         }
 
         remote {
-                        log-remote-lifecycle-events = off
-                        netty.tcp {
-                            hostname = "127.0.0.1"
-                            port = 2550
-                        }
-                    }
+            log-remote-lifecycle-events = off
+            artery {
+                enabled = on
+                canonical.hostname = "127.0.0.1"
+                canonical.port = 2550
+            }
+        }
     }
 }
 
     }
 }
 
@@ -65,9 +66,10 @@ raft-test-listener {
 
     remote {
         log-remote-lifecycle-events = off
 
     remote {
         log-remote-lifecycle-events = off
-        netty.tcp {
-            hostname = "127.0.0.1"
-            port = 2554
+        artery {
+            enabled = on
+            canonical.hostname = "127.0.0.1"
+            canonical.port = 2554
         }
     }
 
         }
     }
 
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/resources/application.conf b/opendaylight/md-sal/sal-akka-raft/src/main/resources/application.conf
deleted file mode 100644 (file)
index b2132b8..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-akka {
-
-    loglevel = "DEBUG"
-
-    actor {
-        # enable to test serialization only.
-        # serialize-messages = on
-
-        serializers {
-          java  = "akka.serialization.JavaSerializer"
-          proto = "akka.remote.serialization.ProtobufSerializer"
-        }
-
-        serialization-bindings {
-            "org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry" = java
-            "org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener" = java
-            "com.google.protobuf.Message" = proto
-            "com.google.protobuf.GeneratedMessage" = proto
-        }
-    }
-}
-
-raft-test {
-    akka {
-
-        loglevel = "DEBUG"
-
-        actor {
-            # enable to test serialization only.
-            # serialize-messages = on
-
-            provider = "akka.remote.RemoteActorRefProvider"
-
-            serializers {
-              java  = "akka.serialization.JavaSerializer"
-              proto = "akka.remote.serialization.ProtobufSerializer"
-            }
-
-            serialization-bindings {
-                "org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry" = java
-                "org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener" = java
-                "com.google.protobuf.Message" = proto
-                "com.google.protobuf.GeneratedMessage" = proto
-            }
-        }
-
-        remote {
-                        log-remote-lifecycle-events = off
-                        netty.tcp {
-                            hostname = "127.0.0.1"
-                            port = 2550
-                        }
-                    }
-    }
-}
-
-raft-test-listener {
-
-  akka {
-    loglevel = "DEBUG"
-
-    actor {
-        provider = "akka.remote.RemoteActorRefProvider"
-    }
-
-    remote {
-        log-remote-lifecycle-events = off
-        netty.tcp {
-            hostname = "127.0.0.1"
-            port = 2554
-        }
-    }
-
-    member-id = "member-1"
-  }
-}
-
-
-
index 852171285ce63124c62225b8c5fb9f36341d62d7..31e54601dc6bf5e79b9103e2cdcb0d1dba5831be 100644 (file)
@@ -2,14 +2,15 @@
 odl-cluster-data {
   akka {
     remote {
 odl-cluster-data {
   akka {
     remote {
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2550
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2550
       }
     }
 
     cluster {
       }
     }
 
     cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"]
+      seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550"]
 
       roles = [
         "member-1"
 
       roles = [
         "member-1"
index 10660fdb5a6bc37eef64e8424f0fbaaf785beb9e..af200deedc0667baa06a1f1335a20605938b18bb 100644 (file)
@@ -61,18 +61,22 @@ odl-cluster-data {
     }
     remote {
       log-remote-lifecycle-events = off
     }
     remote {
       log-remote-lifecycle-events = off
+
       netty.tcp {
       netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2550
         maximum-frame-size = 419430400
         send-buffer-size = 52428800
         receive-buffer-size = 52428800
       }
         maximum-frame-size = 419430400
         send-buffer-size = 52428800
         receive-buffer-size = 52428800
       }
+
+      artery {
+        advanced {
+          maximum-frame-size = 1 GiB
+          maximum-large-frame-size = 1 GiB
+        }
+      }
     }
 
     cluster {
     }
 
     cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"]
-
       seed-node-timeout = 12s
  
       # Following is an excerpt from Akka Cluster Documentation
       seed-node-timeout = 12s
  
       # Following is an excerpt from Akka Cluster Documentation
@@ -84,11 +88,6 @@ odl-cluster-data {
       #auto-down-unreachable-after = 30s
 
       allow-weakly-up-members = on
       #auto-down-unreachable-after = 30s
 
       allow-weakly-up-members = on
-
-      roles = [
-        "member-1"
-      ]
-
     }
 
     persistence {
     }
 
     persistence {
diff --git a/opendaylight/md-sal/sal-distributed-datastore/client.conf b/opendaylight/md-sal/sal-distributed-datastore/client.conf
deleted file mode 100644 (file)
index 90bfb4c..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-ODLCluster{
-  akka {
-    actor {
-      serialize-messages = on
-
-      provider = "akka.cluster.ClusterActorRefProvider"
-      serializers {
-                java = "akka.serialization.JavaSerializer"
-                proto = "akka.remote.serialization.ProtobufSerializer"
-              }
-
-              serialization-bindings {
-                  "com.google.protobuf.Message" = proto
-                  "com.google.protobuf.GeneratedMessage" = proto
-                  "com.google.protobuf.GeneratedMessage$GeneratedExtension" = proto
-                  "com.google.protobuf.FieldSet" = proto
-              }
-    }
-    remote {
-      log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2552
-       maximum-frame-size = 2097152
-       send-buffer-size = 52428800
-       receive-buffer-size = 52428800
-      }
-    }
-
-    cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster@127.0.0.1:2550"]
-
-      auto-down-unreachable-after = 10s
-    }
-  }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/server.conf b/opendaylight/md-sal/sal-distributed-datastore/server.conf
deleted file mode 100644 (file)
index 6209adf..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-
-ODLCluster{
-  akka {
-    actor {
-      serialize-messages = on
-
-      provider = "akka.cluster.ClusterActorRefProvider"
-      serializers {
-                java = "akka.serialization.JavaSerializer"
-                proto = "akka.remote.serialization.ProtobufSerializer"
-              }
-
-              serialization-bindings {
-                  "com.google.protobuf.Message" = proto
-                  "com.google.protobuf.GeneratedMessage" = proto
-                  "com.google.protobuf.GeneratedMessage$GeneratedExtension" = proto
-                  "com.google.protobuf.FieldSet" = proto
-              }
-    }
-    remote {
-      log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2550
-       maximum-frame-size = 2097152
-       send-buffer-size = 52428800
-       receive-buffer-size = 52428800
-      }
-    }
-
-    cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster@127.0.0.1:2550"]
-
-      auto-down-unreachable-after = 10s
-    }
-  }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/application.conf b/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/application.conf
deleted file mode 100644 (file)
index 2449976..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-
-odl-cluster-data {
-  bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
-    mailbox-capacity = 1000
-    mailbox-push-timeout-time = 100ms
-  }
-
-  metric-capture-enabled = true
-
-  akka {
-    loggers = ["akka.event.slf4j.Slf4jLogger"]
-    cluster {
-        roles = [
-          "member-1"
-        ]
-    }
-    actor {
-      provider = "akka.cluster.ClusterActorRefProvider"
-      serializers {
-                java = "akka.serialization.JavaSerializer"
-                proto = "akka.remote.serialization.ProtobufSerializer"
-                readylocal = "org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransactionSerializer"
-              }
-
-              serialization-bindings {
-                  "com.google.protobuf.Message" = proto
-                  "org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction" = readylocal
-              }
-    }
-    remote {
-      log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2550
-        maximum-frame-size = 419430400
-        send-buffer-size = 52428800
-        receive-buffer-size = 52428800
-      }
-    }
-
-    cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"]
-
-      auto-down-unreachable-after = 10s
-    }
-  }
-}
-
-odl-cluster-rpc {
-  akka {
-    loggers = ["akka.event.slf4j.Slf4jLogger"]
-    actor {
-      provider = "akka.cluster.ClusterActorRefProvider"
-
-    }
-    remote {
-      log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2551
-      }
-    }
-
-    cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster-rpc@127.0.0.1:2551"]
-
-      auto-down-unreachable-after = 10s
-    }
-  }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/module-shards.conf b/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/module-shards.conf
deleted file mode 100644 (file)
index 60dd775..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-module-shards = [
-    {
-        name = "default"
-        shards = [
-            {
-                name="default",
-                replicas = [
-                    "member-1",
-                ]
-            }
-        ]
-    },
-    {
-        name = "inventory"
-        shards = [
-            {
-                name="inventory"
-                replicas = [
-                    "member-1",
-                ]
-            }
-        ]
-    }
-
-]
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/modules.conf b/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/modules.conf
deleted file mode 100644 (file)
index e820703..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-modules = [
-    {
-        name = "inventory"
-        namespace = "urn:opendaylight:inventory"
-        shard-strategy = "module"
-    }
-]
index 301f65f3a05d4c55e143f223cbf7af0d8ce226c2..0352db6f767282d8a92e0793391f2fbbfa82d8da 100644 (file)
@@ -64,7 +64,7 @@ public class DataTreeCohortIntegrationTest {
     @BeforeClass
     public static void setUpClass() throws IOException {
         system = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
     @BeforeClass
     public static void setUpClass() throws IOException {
         system = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
-        final Address member1Address = AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558");
+        final Address member1Address = AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558");
         Cluster.get(system).join(member1Address);
     }
 
         Cluster.get(system).join(member1Address);
     }
 
index 9b97104d9777afb4461182d6d08b9adbd86749b7..cb2284a30c6c7b103d86fea1c12d0881d37e1e86 100644 (file)
@@ -97,7 +97,7 @@ public class DistributedDataStoreIntegrationTest {
     @BeforeClass
     public static void setUpClass() throws IOException {
         system = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
     @BeforeClass
     public static void setUpClass() throws IOException {
         system = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
-        Address member1Address = AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558");
+        Address member1Address = AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558");
         Cluster.get(system).join(member1Address);
     }
 
         Cluster.get(system).join(member1Address);
     }
 
index 7474690bcac940661552cefe0df4f3f5b10e0828..ff44f01402f33f3f57f0bab01d169e78d3eeab1d 100644 (file)
@@ -103,9 +103,9 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     private static final String[] CARS = {"cars"};
 
     private static final Address MEMBER_1_ADDRESS = AddressFromURIString.parse(
     private static final String[] CARS = {"cars"};
 
     private static final Address MEMBER_1_ADDRESS = AddressFromURIString.parse(
-            "akka.tcp://cluster-test@127.0.0.1:2558");
+            "akka://cluster-test@127.0.0.1:2558");
     private static final Address MEMBER_2_ADDRESS = AddressFromURIString.parse(
     private static final Address MEMBER_2_ADDRESS = AddressFromURIString.parse(
-            "akka.tcp://cluster-test@127.0.0.1:2559");
+            "akka://cluster-test@127.0.0.1:2559");
 
     private static final String MODULE_SHARDS_CARS_ONLY_1_2 = "module-shards-cars-member-1-and-2.conf";
     private static final String MODULE_SHARDS_CARS_PEOPLE_1_2 = "module-shards-member1-and-2.conf";
 
     private static final String MODULE_SHARDS_CARS_ONLY_1_2 = "module-shards-cars-member-1-and-2.conf";
     private static final String MODULE_SHARDS_CARS_PEOPLE_1_2 = "module-shards-member1-and-2.conf";
@@ -806,7 +806,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         String testName = "testLeadershipTransferOnShutdown";
         initDatastores(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, CARS_AND_PEOPLE);
 
         String testName = "testLeadershipTransferOnShutdown";
         initDatastores(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, CARS_AND_PEOPLE);
 
-        IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System, followerDatastoreContextBuilder);
+        IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
+                DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(100));
         try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupDistributedDataStore(testName,
                 MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
 
         try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupDistributedDataStore(testName,
                 MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
 
@@ -912,6 +913,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
     @Test(expected = AskTimeoutException.class)
     public void testTransactionWithShardLeaderNotResponding() throws Exception {
 
     @Test(expected = AskTimeoutException.class)
     public void testTransactionWithShardLeaderNotResponding() throws Exception {
+        followerDatastoreContextBuilder.shardElectionTimeoutFactor(50);
         initDatastoresWithCars("testTransactionWithShardLeaderNotResponding");
 
         // Do an initial read to get the primary shard info cached.
         initDatastoresWithCars("testTransactionWithShardLeaderNotResponding");
 
         // Do an initial read to get the primary shard info cached.
index bb2ce19deec0314b5fb409fe4d9b7f85e776ae41..e6ea97124707cbd2cf85c605dba139bcf81628a4 100644 (file)
@@ -12,7 +12,6 @@ import static org.junit.Assert.fail;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
-import akka.actor.Address;
 import akka.actor.AddressFromURIString;
 import akka.cluster.Cluster;
 import akka.cluster.ClusterEvent.CurrentClusterState;
 import akka.actor.AddressFromURIString;
 import akka.cluster.Cluster;
 import akka.cluster.ClusterEvent.CurrentClusterState;
@@ -23,6 +22,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.base.Stopwatch;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.Uninterruptibles;
 import com.google.common.base.Stopwatch;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.Uninterruptibles;
+import com.typesafe.config.Config;
 import com.typesafe.config.ConfigFactory;
 import java.util.List;
 import java.util.Set;
 import com.typesafe.config.ConfigFactory;
 import java.util.List;
 import java.util.Set;
@@ -34,6 +34,7 @@ import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftS
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.Duration;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.Duration;
@@ -47,7 +48,7 @@ import scala.concurrent.duration.Duration;
  * @author Thomas Pantelis
  */
 public class MemberNode {
  * @author Thomas Pantelis
  */
 public class MemberNode {
-    static final Address MEMBER_1_ADDRESS = AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558");
+    private static final String MEMBER_1_ADDRESS = "akka://cluster-test@127.0.0.1:2558";
 
     private IntegrationTestKit kit;
     private AbstractDataStore configDataStore;
 
     private IntegrationTestKit kit;
     private AbstractDataStore configDataStore;
@@ -110,6 +111,7 @@ public class MemberNode {
         fail("Member " + member + " is now down");
     }
 
         fail("Member " + member + " is now down");
     }
 
+    @SuppressWarnings("checkstyle:IllegalCatch")
     public void cleanup() {
         if (!cleanedUp) {
             cleanedUp = true;
     public void cleanup() {
         if (!cleanedUp) {
             cleanedUp = true;
@@ -120,7 +122,11 @@ public class MemberNode {
                 operDataStore.close();
             }
 
                 operDataStore.close();
             }
 
-            IntegrationTestKit.shutdownActorSystem(kit.getSystem(), Boolean.TRUE);
+            try {
+                IntegrationTestKit.shutdownActorSystem(kit.getSystem(), Boolean.TRUE);
+            } catch (RuntimeException e) {
+                LoggerFactory.getLogger(MemberNode.class).warn("Failed to shutdown actor system", e);
+            }
         }
     }
 
         }
     }
 
@@ -179,6 +185,7 @@ public class MemberNode {
         private final List<MemberNode> members;
         private String moduleShardsConfig;
         private String akkaConfig;
         private final List<MemberNode> members;
         private String moduleShardsConfig;
         private String akkaConfig;
+        private boolean useAkkaArtery = true;
         private String[] waitForshardLeader = new String[0];
         private String testName;
         private SchemaContext schemaContext;
         private String[] waitForshardLeader = new String[0];
         private String testName;
         private SchemaContext schemaContext;
@@ -210,6 +217,16 @@ public class MemberNode {
             return this;
         }
 
             return this;
         }
 
+        /**
+         * Specifies whether or not to use akka artery for remoting. Default is true.
+         *
+         * @return this Builder
+         */
+        public Builder useAkkaArtery(final boolean newUseAkkaArtery) {
+            this.useAkkaArtery = newUseAkkaArtery;
+            return this;
+        }
+
         /**
          * Specifies the name of the test that is appended to the data store names. This is required.
          *
         /**
          * Specifies the name of the test that is appended to the data store names. This is required.
          *
@@ -272,8 +289,18 @@ public class MemberNode {
             MemberNode node = new MemberNode();
             node.datastoreContextBuilder = datastoreContextBuilder;
 
             MemberNode node = new MemberNode();
             node.datastoreContextBuilder = datastoreContextBuilder;
 
-            ActorSystem system = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig(akkaConfig));
-            Cluster.get(system).join(MEMBER_1_ADDRESS);
+            Config baseConfig = ConfigFactory.load();
+            Config config;
+            if (useAkkaArtery) {
+                config = baseConfig.getConfig(akkaConfig);
+            } else {
+                config = baseConfig.getConfig(akkaConfig + "-without-artery")
+                        .withFallback(baseConfig.getConfig(akkaConfig));
+            }
+
+            ActorSystem system = ActorSystem.create("cluster-test", config);
+            String member1Address = useAkkaArtery ? MEMBER_1_ADDRESS : MEMBER_1_ADDRESS.replace("akka", "akka.tcp");
+            Cluster.get(system).join(AddressFromURIString.parse(member1Address));
 
             node.kit = new IntegrationTestKit(system, datastoreContextBuilder);
 
 
             node.kit = new IntegrationTestKit(system, datastoreContextBuilder);
 
index 2a3431f67f4eaec569109217e302f8d61cd4e8a8..c87b2f43694083057444561103c95fdfb5593b19 100644 (file)
@@ -100,7 +100,7 @@ public class PrefixShardCreationTest extends AbstractShardManagerTest {
 
         // Create ACtorSystem for member-1
         final ActorSystem system1 = newActorSystem("Member1");
 
         // Create ACtorSystem for member-1
         final ActorSystem system1 = newActorSystem("Member1");
-        Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
         final TestActorRef<TestShardManager> shardManager1 = TestActorRef.create(system1,
                 newTestShardMgrBuilder(new ConfigurationImpl(new EmptyModuleShardConfigProvider()))
 
         final TestActorRef<TestShardManager> shardManager1 = TestActorRef.create(system1,
                 newTestShardMgrBuilder(new ConfigurationImpl(new EmptyModuleShardConfigProvider()))
@@ -113,7 +113,7 @@ public class PrefixShardCreationTest extends AbstractShardManagerTest {
 
         final ActorSystem system2 = newActorSystem("Member2");
 
 
         final ActorSystem system2 = newActorSystem("Member2");
 
-        Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
         final TestActorRef<TestShardManager> shardManager2 = TestActorRef.create(system2,
                 newTestShardMgrBuilder()
 
         final TestActorRef<TestShardManager> shardManager2 = TestActorRef.create(system2,
                 newTestShardMgrBuilder()
@@ -186,4 +186,4 @@ public class PrefixShardCreationTest extends AbstractShardManagerTest {
         actorSystems.add(system);
         return system;
     }
         actorSystems.add(system);
         return system;
     }
-}
\ No newline at end of file
+}
index 6ff4ad25ec821700de1d45e82b6c3839b193084f..b4ad323c495088afd15b8c43321e666765abd57c 100644 (file)
@@ -348,23 +348,28 @@ public class DistributedEntityOwnershipIntegrationTest {
                 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
 
         String name = "test";
                 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
 
         String name = "test";
-        final MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1")
+                .useAkkaArtery(false).testName(name)
                 .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
                 .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
 
                 .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
                 .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
 
-        final MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2")
+                .useAkkaArtery(false).testName(name)
                 .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
                 .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
 
                 .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
                 .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
 
-        final MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3")
+                .useAkkaArtery(false).testName(name)
                 .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
                 .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
 
                 .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
                 .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
 
-        final MemberNode follower3Node = MemberNode.builder(memberNodes).akkaConfig("Member4").testName(name)
+        final MemberNode follower3Node = MemberNode.builder(memberNodes).akkaConfig("Member4")
+                .useAkkaArtery(false).testName(name)
                 .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
                 .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
 
                 .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
                 .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
 
-        final MemberNode follower4Node = MemberNode.builder(memberNodes).akkaConfig("Member5").testName(name)
+        final MemberNode follower4Node = MemberNode.builder(memberNodes).akkaConfig("Member5")
+                .useAkkaArtery(false).testName(name)
                 .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
                 .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
 
                 .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
                 .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
 
index 521a0b86bdec17d48b8eddbe000f43834ebcc806..dc640d2c9238104017b5904e1495e16c85c60f45 100644 (file)
@@ -634,7 +634,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         // Create an ActorSystem ShardManager actor for member-1.
 
         final ActorSystem system1 = newActorSystem("Member1");
         // Create an ActorSystem ShardManager actor for member-1.
 
         final ActorSystem system1 = newActorSystem("Member1");
-        Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
         final TestActorRef<TestShardManager> shardManager1 = TestActorRef.create(system1,
                 newTestShardMgrBuilderWithMockShardActor().cluster(
 
         final TestActorRef<TestShardManager> shardManager1 = TestActorRef.create(system1,
                 newTestShardMgrBuilderWithMockShardActor().cluster(
@@ -645,7 +645,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         final ActorSystem system2 = newActorSystem("Member2");
 
 
         final ActorSystem system2 = newActorSystem("Member2");
 
-        Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
         final ActorRef mockShardActor2 = newMockShardActor(system2, "astronauts", "member-2");
 
 
         final ActorRef mockShardActor2 = newMockShardActor(system2, "astronauts", "member-2");
 
@@ -684,7 +684,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
                 shardManager2.underlyingActor().verifyFindPrimary();
 
 
                 shardManager2.underlyingActor().verifyFindPrimary();
 
-                Cluster.get(system2).down(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+                Cluster.get(system2).down(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
                 shardManager1.underlyingActor().waitForMemberRemoved();
 
 
                 shardManager1.underlyingActor().waitForMemberRemoved();
 
@@ -705,7 +705,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         // Create an ActorSystem ShardManager actor for member-1.
 
         final ActorSystem system1 = newActorSystem("Member1");
         // Create an ActorSystem ShardManager actor for member-1.
 
         final ActorSystem system1 = newActorSystem("Member1");
-        Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
         final ActorRef mockShardActor1 = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1");
 
 
         final ActorRef mockShardActor1 = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1");
 
@@ -718,7 +718,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         final ActorSystem system2 = newActorSystem("Member2");
 
 
         final ActorSystem system2 = newActorSystem("Member2");
 
-        Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
         final ActorRef mockShardActor2 = newMockShardActor(system2, Shard.DEFAULT_NAME, "member-2");
 
 
         final ActorRef mockShardActor2 = newMockShardActor(system2, Shard.DEFAULT_NAME, "member-2");
 
@@ -758,7 +758,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                 assertTrue("Unexpected primary path " + path, path.contains("member-2-shard-default-config"));
 
                 shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2",
                 assertTrue("Unexpected primary path " + path, path.contains("member-2-shard-default-config"));
 
                 shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2",
-                        "akka.tcp://cluster-test@127.0.0.1:2558"), getRef());
+                        "akka://cluster-test@127.0.0.1:2558"), getRef());
 
                 shardManager1.underlyingActor().waitForUnreachableMember();
 
 
                 shardManager1.underlyingActor().waitForUnreachableMember();
 
@@ -767,7 +767,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                 MessageCollectorActor.clearMessages(mockShardActor1);
 
                 shardManager1.tell(
                 MessageCollectorActor.clearMessages(mockShardActor1);
 
                 shardManager1.tell(
-                        MockClusterWrapper.createMemberRemoved("member-2", "akka.tcp://cluster-test@127.0.0.1:2558"),
+                        MockClusterWrapper.createMemberRemoved("member-2", "akka://cluster-test@127.0.0.1:2558"),
                         getRef());
 
                 MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerDown.class);
                         getRef());
 
                 MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerDown.class);
@@ -777,7 +777,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                 expectMsgClass(duration("5 seconds"), NoShardLeaderException.class);
 
                 shardManager1.tell(
                 expectMsgClass(duration("5 seconds"), NoShardLeaderException.class);
 
                 shardManager1.tell(
-                        MockClusterWrapper.createReachableMember("member-2", "akka.tcp://cluster-test@127.0.0.1:2558"),
+                        MockClusterWrapper.createReachableMember("member-2", "akka://cluster-test@127.0.0.1:2558"),
                         getRef());
 
                 shardManager1.underlyingActor().waitForReachableMember();
                         getRef());
 
                 shardManager1.underlyingActor().waitForReachableMember();
@@ -793,7 +793,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                 assertTrue("Unexpected primary path " + path1, path1.contains("member-2-shard-default-config"));
 
                 shardManager1.tell(
                 assertTrue("Unexpected primary path " + path1, path1.contains("member-2-shard-default-config"));
 
                 shardManager1.tell(
-                        MockClusterWrapper.createMemberUp("member-2", "akka.tcp://cluster-test@127.0.0.1:2558"),
+                        MockClusterWrapper.createMemberUp("member-2", "akka://cluster-test@127.0.0.1:2558"),
                         getRef());
 
                 MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerUp.class);
                         getRef());
 
                 MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerUp.class);
@@ -801,13 +801,13 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                 // Test FindPrimary wait succeeds after reachable member event.
 
                 shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2",
                 // Test FindPrimary wait succeeds after reachable member event.
 
                 shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2",
-                        "akka.tcp://cluster-test@127.0.0.1:2558"), getRef());
+                        "akka://cluster-test@127.0.0.1:2558"), getRef());
                 shardManager1.underlyingActor().waitForUnreachableMember();
 
                 shardManager1.tell(new FindPrimary("default", true), getRef());
 
                 shardManager1.tell(
                 shardManager1.underlyingActor().waitForUnreachableMember();
 
                 shardManager1.tell(new FindPrimary("default", true), getRef());
 
                 shardManager1.tell(
-                        MockClusterWrapper.createReachableMember("member-2", "akka.tcp://cluster-test@127.0.0.1:2558"),
+                        MockClusterWrapper.createReachableMember("member-2", "akka://cluster-test@127.0.0.1:2558"),
                         getRef());
 
                 RemotePrimaryShardFound found2 = expectMsgClass(duration("5 seconds"), RemotePrimaryShardFound.class);
                         getRef());
 
                 RemotePrimaryShardFound found2 = expectMsgClass(duration("5 seconds"), RemotePrimaryShardFound.class);
@@ -827,7 +827,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         // Create an ActorSystem ShardManager actor for member-1.
 
         final ActorSystem system1 = newActorSystem("Member1");
         // Create an ActorSystem ShardManager actor for member-1.
 
         final ActorSystem system1 = newActorSystem("Member1");
-        Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
         final ActorRef mockShardActor1 = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1");
 
 
         final ActorRef mockShardActor1 = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1");
 
@@ -842,7 +842,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         final ActorSystem system2 = newActorSystem("Member2");
 
 
         final ActorSystem system2 = newActorSystem("Member2");
 
-        Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
         final ActorRef mockShardActor2 = newMockShardActor(system2, Shard.DEFAULT_NAME, "member-2");
 
 
         final ActorRef mockShardActor2 = newMockShardActor(system2, Shard.DEFAULT_NAME, "member-2");
 
@@ -885,7 +885,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                         system1.actorSelection(mockShardActor1.path()), DataStoreVersions.CURRENT_VERSION));
 
                 shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2",
                         system1.actorSelection(mockShardActor1.path()), DataStoreVersions.CURRENT_VERSION));
 
                 shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2",
-                        "akka.tcp://cluster-test@127.0.0.1:2558"), getRef());
+                        "akka://cluster-test@127.0.0.1:2558"), getRef());
 
                 shardManager1.underlyingActor().waitForUnreachableMember();
 
 
                 shardManager1.underlyingActor().waitForUnreachableMember();
 
@@ -1449,7 +1449,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         // Create an ActorSystem ShardManager actor for member-1.
         final ActorSystem system1 = newActorSystem("Member1");
 
         // Create an ActorSystem ShardManager actor for member-1.
         final ActorSystem system1 = newActorSystem("Member1");
-        Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
         ActorRef mockDefaultShardActor = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1");
         final TestActorRef<TestShardManager> newReplicaShardManager = TestActorRef.create(system1,
                 newTestShardMgrBuilder(mockConfig).shardActor(mockDefaultShardActor)
         ActorRef mockDefaultShardActor = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1");
         final TestActorRef<TestShardManager> newReplicaShardManager = TestActorRef.create(system1,
                 newTestShardMgrBuilder(mockConfig).shardActor(mockDefaultShardActor)
@@ -1459,7 +1459,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         // Create an ActorSystem ShardManager actor for member-2.
         final ActorSystem system2 = newActorSystem("Member2");
 
         // Create an ActorSystem ShardManager actor for member-2.
         final ActorSystem system2 = newActorSystem("Member2");
-        Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
         String memberId2 = "member-2-shard-astronauts-" + shardMrgIDSuffix;
         String name = ShardIdentifier.create("astronauts", MEMBER_2, "config").toString();
 
         String memberId2 = "member-2-shard-astronauts-" + shardMrgIDSuffix;
         String name = ShardIdentifier.create("astronauts", MEMBER_2, "config").toString();
@@ -1680,7 +1680,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
                 newReplicaShardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
                 MockClusterWrapper.sendMemberUp(newReplicaShardManager, "member-2",
 
                 newReplicaShardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
                 MockClusterWrapper.sendMemberUp(newReplicaShardManager, "member-2",
-                        AddressFromURIString.parse("akka.tcp://non-existent@127.0.0.1:5").toString());
+                        AddressFromURIString.parse("akka://non-existent@127.0.0.1:5").toString());
 
                 newReplicaShardManager.tell(new AddShardReplica("astronauts"), getRef());
                 Status.Failure resp = expectMsgClass(duration("5 seconds"), Status.Failure.class);
 
                 newReplicaShardManager.tell(new AddShardReplica("astronauts"), getRef());
                 Status.Failure resp = expectMsgClass(duration("5 seconds"), Status.Failure.class);
@@ -1748,7 +1748,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         // Create an ActorSystem ShardManager actor for member-1.
         final ActorSystem system1 = newActorSystem("Member1");
 
         // Create an ActorSystem ShardManager actor for member-1.
         final ActorSystem system1 = newActorSystem("Member1");
-        Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system1).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
         ActorRef mockDefaultShardActor = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1");
 
         final TestActorRef<TestShardManager> newReplicaShardManager = TestActorRef.create(system1,
         ActorRef mockDefaultShardActor = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1");
 
         final TestActorRef<TestShardManager> newReplicaShardManager = TestActorRef.create(system1,
@@ -1758,7 +1758,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         // Create an ActorSystem ShardManager actor for member-2.
         final ActorSystem system2 = newActorSystem("Member2");
 
         // Create an ActorSystem ShardManager actor for member-2.
         final ActorSystem system2 = newActorSystem("Member2");
-        Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+        Cluster.get(system2).join(AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"));
 
         String name = ShardIdentifier.create("default", MEMBER_2, shardMrgIDSuffix).toString();
         String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
 
         String name = ShardIdentifier.create("default", MEMBER_2, shardMrgIDSuffix).toString();
         String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
@@ -1774,11 +1774,11 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                 shardManagerID);
 
         // Because mockShardLeaderActor is created at the top level of the actor system it has an address like so,
                 shardManagerID);
 
         // Because mockShardLeaderActor is created at the top level of the actor system it has an address like so,
-        //    akka.tcp://cluster-test@127.0.0.1:2559/user/member-2-shard-default-config1
+        //    akka://cluster-test@127.0.0.1:2559/user/member-2-shard-default-config1
         // However when a shard manager has a local shard which is a follower and a leader that is remote it will
         // try to compute an address for the remote shard leader using the ShardPeerAddressResolver. This address will
         // look like so,
         // However when a shard manager has a local shard which is a follower and a leader that is remote it will
         // try to compute an address for the remote shard leader using the ShardPeerAddressResolver. This address will
         // look like so,
-        //    akka.tcp://cluster-test@127.0.0.1:2559/user/shardmanager-config1/member-2-shard-default-config1
+        //    akka://cluster-test@127.0.0.1:2559/user/shardmanager-config1/member-2-shard-default-config1
         // In this specific case if we did a FindPrimary for shard default from member-1 we would come up
         // with the address of an actor which does not exist, therefore any message sent to that actor would go to
         // dead letters.
         // In this specific case if we did a FindPrimary for shard default from member-1 we would come up
         // with the address of an actor which does not exist, therefore any message sent to that actor would go to
         // dead letters.
index 78a00fb00ea169a75314940a7294f8b8437eda0a..cb0864ffaf8d04d2fa59813dc4b6fe553e4ca8a2 100644 (file)
@@ -256,13 +256,13 @@ public class ActorContextTest extends AbstractActorTest {
         assertEquals(true, actorContext.isPathLocal("akka://test/user/token2/token3/$a"));
 
         // self address of remote format,but Tx path local format.
         assertEquals(true, actorContext.isPathLocal("akka://test/user/token2/token3/$a"));
 
         // self address of remote format,but Tx path local format.
-        clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550));
+        clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
         assertEquals(true, actorContext.isPathLocal(
             "akka://system/user/shardmanager/shard/transaction"));
 
         // self address of local format,but Tx path remote format.
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
         assertEquals(true, actorContext.isPathLocal(
             "akka://system/user/shardmanager/shard/transaction"));
 
         // self address of local format,but Tx path remote format.
-        clusterWrapper.setSelfAddress(new Address("akka.tcp", "system"));
+        clusterWrapper.setSelfAddress(new Address("akka", "system"));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
         assertEquals(false, actorContext.isPathLocal(
             "akka://system@127.0.0.1:2550/user/shardmanager/shard/transaction"));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
         assertEquals(false, actorContext.isPathLocal(
             "akka://system@127.0.0.1:2550/user/shardmanager/shard/transaction"));
@@ -273,24 +273,24 @@ public class ActorContextTest extends AbstractActorTest {
         assertEquals(true, actorContext.isPathLocal("akka://test1/user/$a"));
 
         //ip and port same
         assertEquals(true, actorContext.isPathLocal("akka://test1/user/$a"));
 
         //ip and port same
-        clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550));
+        clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertEquals(true, actorContext.isPathLocal("akka.tcp://system@127.0.0.1:2550/"));
+        assertEquals(true, actorContext.isPathLocal("akka://system@127.0.0.1:2550/"));
 
         // forward-slash missing in address
 
         // forward-slash missing in address
-        clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550));
+        clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertEquals(false, actorContext.isPathLocal("akka.tcp://system@127.0.0.1:2550"));
+        assertEquals(false, actorContext.isPathLocal("akka://system@127.0.0.1:2550"));
 
         //ips differ
 
         //ips differ
-        clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550));
+        clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertEquals(false, actorContext.isPathLocal("akka.tcp://system@127.1.0.1:2550/"));
+        assertEquals(false, actorContext.isPathLocal("akka://system@127.1.0.1:2550/"));
 
         //ports differ
 
         //ports differ
-        clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550));
+        clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
         actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertEquals(false, actorContext.isPathLocal("akka.tcp://system@127.0.0.1:2551/"));
+        assertEquals(false, actorContext.isPathLocal("akka://system@127.0.0.1:2551/"));
     }
 
     @Test
     }
 
     @Test
index 0b9371698fafd4e703cc0edf998fa07b8ced99e6..9822559d5d14b810b9317813efff96a2e9004a82 100644 (file)
@@ -22,7 +22,7 @@ import scala.collection.JavaConversions;
 
 public class MockClusterWrapper implements ClusterWrapper {
 
 
 public class MockClusterWrapper implements ClusterWrapper {
 
-    private Address selfAddress = new Address("akka.tcp", "test", "127.0.0.1", 2550);
+    private Address selfAddress = new Address("akka", "test", "127.0.0.1", 2550);
     private final MemberName currentMemberName;
 
     public MockClusterWrapper() {
     private final MemberName currentMemberName;
 
     public MockClusterWrapper() {
index f0cf18932f11f3acb07598065715c2fbdf6b1b01..bc9a86bf89698df0c511343dc81cf8eda475d5b3 100644 (file)
@@ -73,6 +73,12 @@ Member1 {
     }
     remote {
       log-remote-lifecycle-events = off
     }
     remote {
       log-remote-lifecycle-events = off
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2558
+      }
+
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2558
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2558
@@ -80,7 +86,6 @@ Member1 {
     }
 
     cluster {
     }
 
     cluster {
-      auto-down-unreachable-after = 100s
       retry-unsuccessful-join-after = 100ms
 
       roles = [
       retry-unsuccessful-join-after = 100ms
 
       roles = [
@@ -126,6 +131,12 @@ Member2 {
     }
     remote {
       log-remote-lifecycle-events = off
     }
     remote {
       log-remote-lifecycle-events = off
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2559
+      }
+
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2559
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2559
@@ -133,7 +144,6 @@ Member2 {
     }
 
     cluster {
     }
 
     cluster {
-      auto-down-unreachable-after = 100s
       retry-unsuccessful-join-after = 100ms
 
       roles = [
       retry-unsuccessful-join-after = 100ms
 
       roles = [
@@ -181,6 +191,12 @@ Member3 {
     }
     remote {
       log-remote-lifecycle-events = off
     }
     remote {
       log-remote-lifecycle-events = off
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2557
+      }
+
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2557
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2557
@@ -188,7 +204,6 @@ Member3 {
     }
 
     cluster {
     }
 
     cluster {
-      auto-down-unreachable-after = 100s
       retry-unsuccessful-join-after = 100ms
 
       roles = [
       retry-unsuccessful-join-after = 100ms
 
       roles = [
@@ -236,6 +251,12 @@ Member4 {
     }
     remote {
       log-remote-lifecycle-events = off
     }
     remote {
       log-remote-lifecycle-events = off
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2560
+      }
+
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2560
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2560
@@ -243,7 +264,6 @@ Member4 {
     }
 
     cluster {
     }
 
     cluster {
-      auto-down-unreachable-after = 100s
       retry-unsuccessful-join-after = 100ms
 
       roles = [
       retry-unsuccessful-join-after = 100ms
 
       roles = [
@@ -291,6 +311,12 @@ Member5 {
     }
     remote {
       log-remote-lifecycle-events = off
     }
     remote {
       log-remote-lifecycle-events = off
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2561
+      }
+
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2561
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2561
@@ -298,7 +324,6 @@ Member5 {
     }
 
     cluster {
     }
 
     cluster {
-      auto-down-unreachable-after = 100s
       retry-unsuccessful-join-after = 100ms
 
       roles = [
       retry-unsuccessful-join-after = 100ms
 
       roles = [
@@ -307,3 +332,23 @@ Member5 {
     }
   }
 }
     }
   }
 }
+
+Member1-without-artery {
+  akka.remote.artery.enabled = off
+}
+
+Member2-without-artery {
+  akka.remote.artery.enabled = off
+}
+
+Member3-without-artery {
+  akka.remote.artery.enabled = off
+}
+
+Member4-without-artery {
+  akka.remote.artery.enabled = off
+}
+
+Member5-without-artery {
+  akka.remote.artery.enabled = off
+}
index 38aa4c517418644d4e233546098f094a78cee1ab..28a35ecb4c8b55e642fb042e25e9a90dc6fb7a02 100644 (file)
@@ -26,17 +26,15 @@ odl-cluster-data {
     }
     remote {
       log-remote-lifecycle-events = off
     }
     remote {
       log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2553
-        maximum-frame-size = 419430400
-        send-buffer-size = 52428800
-        receive-buffer-size = 52428800
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2553
       }
     }
 
     cluster {
       }
     }
 
     cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550", "akka.tcp://opendaylight-cluster-data@127.0.0.1:2553"]
+      seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550", "akka://opendaylight-cluster-data@127.0.0.1:2553"]
 
       auto-down-unreachable-after = 10s
 
 
       auto-down-unreachable-after = 10s
 
@@ -46,4 +44,4 @@ odl-cluster-data {
 
     }
   }
 
     }
   }
-}
\ No newline at end of file
+}
index 7b2da6a1dbe85a63f35fe2b7acd4ff8decc9c0e7..f9afe04278c40309925cf0dc5b7192716359161f 100644 (file)
@@ -26,17 +26,15 @@ odl-cluster-data {
     }
     remote {
       log-remote-lifecycle-events = off
     }
     remote {
       log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2554
-        maximum-frame-size = 419430400
-        send-buffer-size = 52428800
-        receive-buffer-size = 52428800
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2554
       }
     }
 
     cluster {
       }
     }
 
     cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550", "akka.tcp://opendaylight-cluster-data@127.0.0.1:2554"]
+      seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550", "akka://opendaylight-cluster-data@127.0.0.1:2554"]
 
       auto-down-unreachable-after = 10s
 
 
       auto-down-unreachable-after = 10s
 
@@ -46,4 +44,4 @@ odl-cluster-data {
 
     }
   }
 
     }
   }
-}
\ No newline at end of file
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/application.conf b/opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/application.conf
deleted file mode 100644 (file)
index c1c78d3..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-
-odl-cluster-data {
-  akka {
-    cluster {
-        roles = [
-          "member-1"
-        ]
-    }
-    actor {
-      provider = "akka.cluster.ClusterActorRefProvider"
-      serializers {
-                java = "akka.serialization.JavaSerializer"
-                proto = "akka.remote.serialization.ProtobufSerializer"
-              }
-
-              serialization-bindings {
-                  "com.google.protobuf.Message" = proto
-
-              }
-    }
-    remote {
-      log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2550
-        maximum-frame-size = 2097152
-        send-buffer-size = 52428800
-        receive-buffer-size = 52428800
-      }
-    }
-
-    cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"]
-
-      auto-down-unreachable-after = 10s
-    }
-  }
-}
-
-odl-cluster-rpc {
-  bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
-    mailbox-capacity = 1000
-    mailbox-push-timeout-time = 100ms
-  }
-
-  akka {
-    actor {
-      provider = "akka.cluster.ClusterActorRefProvider"
-    }
-    remote {
-      log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2551
-      }
-    }
-
-    cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster-rpc@127.0.0.1:2551"]
-      auto-down-unreachable-after = 10s
-    }
-  }
-}
index f22b4eafb390fac9774afbe301bd5d375f1ec9c9..524e69ffe3f45f2a26e07a018a82d522ee21caa8 100644 (file)
@@ -1,4 +1,4 @@
-odl-cluster-rpc{
+odl-cluster-rpc {
   bounded-mailbox {
     mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
   bounded-mailbox {
     mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
@@ -22,20 +22,21 @@ odl-cluster-rpc{
       log-sent-messages = on
 
       log-remote-lifecycle-events = off
       log-sent-messages = on
 
       log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2550
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2550
       }
     }
 
     cluster {
       }
     }
 
     cluster {
-      seed-nodes = ["akka.tcp://opendaylight-rpc@127.0.0.1:2550"]
+      seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2550"]
 
       auto-down-unreachable-after = 10s
     }
   }
 }
 
       auto-down-unreachable-after = 10s
     }
   }
 }
-unit-test{
+unit-test {
   akka {
     loglevel = "DEBUG"
     #loggers = ["akka.event.slf4j.Slf4jLogger"]
   akka {
     loglevel = "DEBUG"
     #loggers = ["akka.event.slf4j.Slf4jLogger"]
@@ -48,7 +49,7 @@ unit-test{
   }
 }
 
   }
 }
 
-memberA{
+memberA {
   bounded-mailbox {
     mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
   bounded-mailbox {
     mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
@@ -68,20 +69,21 @@ memberA{
       log-sent-messages = off
 
       log-remote-lifecycle-events = off
       log-sent-messages = off
 
       log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2551
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2551
       }
     }
 
     cluster {
       }
     }
 
     cluster {
-      seed-nodes = ["akka.tcp://opendaylight-rpc@127.0.0.1:2551"]
+      seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
 
       auto-down-unreachable-after = 10s
     }
   }
 }
 
       auto-down-unreachable-after = 10s
     }
   }
 }
-memberB{
+memberB {
   bounded-mailbox {
     mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
   bounded-mailbox {
     mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
@@ -102,20 +104,21 @@ memberB{
       log-sent-messages = off
 
       log-remote-lifecycle-events = off
       log-sent-messages = off
 
       log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2552
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2552
       }
     }
 
     cluster {
       }
     }
 
     cluster {
-      seed-nodes = ["akka.tcp://opendaylight-rpc@127.0.0.1:2551"]
+      seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
 
       auto-down-unreachable-after = 10s
     }
   }
 }
 
       auto-down-unreachable-after = 10s
     }
   }
 }
-memberC{
+memberC {
   bounded-mailbox {
     mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
   bounded-mailbox {
     mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
@@ -135,14 +138,15 @@ memberC{
       log-sent-messages = off
 
       log-remote-lifecycle-events = off
       log-sent-messages = off
 
       log-remote-lifecycle-events = off
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2553
+      artery {
+        enabled = on
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2553
       }
     }
 
     cluster {
       }
     }
 
     cluster {
-      seed-nodes = ["akka.tcp://opendaylight-rpc@127.0.0.1:2551"]
+      seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
 
       auto-down-unreachable-after = 10s
     }
 
       auto-down-unreachable-after = 10s
     }