Rename ActorContext to ActorUtils
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / test / java / org / opendaylight / controller / cluster / sharding / DistributedShardedDOMDataTreeRemotingTest.java
1 /*
2  * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.sharding;
10
11 import static org.junit.Assert.assertEquals;
12 import static org.junit.Assert.assertNotNull;
13 import static org.junit.Assert.assertTrue;
14 import static org.junit.Assert.fail;
15 import static org.mockito.Mockito.doReturn;
16 import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.findLocalShard;
17 import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.waitUntilShardIsDown;
18
19 import akka.actor.ActorRef;
20 import akka.actor.ActorSystem;
21 import akka.actor.Address;
22 import akka.actor.AddressFromURIString;
23 import akka.cluster.Cluster;
24 import akka.testkit.javadsl.TestKit;
25 import com.google.common.collect.Lists;
26 import com.typesafe.config.ConfigFactory;
27 import java.util.Collections;
28 import java.util.HashSet;
29 import java.util.Set;
30 import org.junit.After;
31 import org.junit.Assert;
32 import org.junit.Before;
33 import org.junit.Test;
34 import org.mockito.Mockito;
35 import org.opendaylight.controller.cluster.ActorSystemProvider;
36 import org.opendaylight.controller.cluster.datastore.AbstractTest;
37 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
38 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
39 import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
40 import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
41 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
42 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
43 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
44 import org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration;
45 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
46 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
47 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
48 import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
49 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
50 import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
51 import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
52 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
53 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
54 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
55 import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
56 import org.slf4j.Logger;
57 import org.slf4j.LoggerFactory;
58
59 public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
60
61     private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTreeRemotingTest.class);
62
63     private static final Address MEMBER_1_ADDRESS =
64             AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558");
65
66     private static final DOMDataTreeIdentifier TEST_ID =
67             new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
68
69     private static final String MODULE_SHARDS_CONFIG = "module-shards-default.conf";
70
71     private ActorSystem leaderSystem;
72     private ActorSystem followerSystem;
73
74
75     private final Builder leaderDatastoreContextBuilder =
76             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
77
78     private final DatastoreContext.Builder followerDatastoreContextBuilder =
79             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
80
81     private DistributedDataStore leaderConfigDatastore;
82     private DistributedDataStore leaderOperDatastore;
83
84     private DistributedDataStore followerConfigDatastore;
85     private DistributedDataStore followerOperDatastore;
86
87
88     private IntegrationTestKit followerTestKit;
89     private IntegrationTestKit leaderTestKit;
90     private DistributedShardedDOMDataTree leaderShardFactory;
91
92     private DistributedShardedDOMDataTree followerShardFactory;
93     private ActorSystemProvider leaderSystemProvider;
94     private ActorSystemProvider followerSystemProvider;
95
96     @Before
97     public void setUp() {
98         InMemoryJournal.clear();
99         InMemorySnapshotStore.clear();
100
101         leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
102         Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
103
104         followerSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member2"));
105         Cluster.get(followerSystem).join(MEMBER_1_ADDRESS);
106
107         leaderSystemProvider = Mockito.mock(ActorSystemProvider.class);
108         doReturn(leaderSystem).when(leaderSystemProvider).getActorSystem();
109
110         followerSystemProvider = Mockito.mock(ActorSystemProvider.class);
111         doReturn(followerSystem).when(followerSystemProvider).getActorSystem();
112
113     }
114
115     @After
116     public void tearDown() {
117         if (leaderConfigDatastore != null) {
118             leaderConfigDatastore.close();
119         }
120         if (leaderOperDatastore != null) {
121             leaderOperDatastore.close();
122         }
123
124         if (followerConfigDatastore != null) {
125             followerConfigDatastore.close();
126         }
127         if (followerOperDatastore != null) {
128             followerOperDatastore.close();
129         }
130
131         TestKit.shutdownActorSystem(leaderSystem, true);
132         TestKit.shutdownActorSystem(followerSystem, true);
133
134         InMemoryJournal.clear();
135         InMemorySnapshotStore.clear();
136     }
137
138     private void initEmptyDatastores() throws Exception {
139         initEmptyDatastores(MODULE_SHARDS_CONFIG);
140     }
141
142     private void initEmptyDatastores(final String moduleShardsConfig) throws Exception {
143         leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder);
144
145         leaderConfigDatastore = leaderTestKit.setupDistributedDataStore(
146                 "config", moduleShardsConfig, true,
147                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
148         leaderOperDatastore = leaderTestKit.setupDistributedDataStore(
149                 "operational", moduleShardsConfig, true,
150                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
151
152         leaderShardFactory = new DistributedShardedDOMDataTree(leaderSystemProvider,
153                 leaderOperDatastore,
154                 leaderConfigDatastore);
155
156         followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder);
157
158         followerConfigDatastore = followerTestKit.setupDistributedDataStore(
159                 "config", moduleShardsConfig, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
160         followerOperDatastore = followerTestKit.setupDistributedDataStore(
161                 "operational", moduleShardsConfig, true,
162                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
163
164         followerShardFactory = new DistributedShardedDOMDataTree(followerSystemProvider,
165                 followerOperDatastore,
166                 followerConfigDatastore);
167
168         followerTestKit.waitForMembersUp("member-1");
169
170         LOG.info("Initializing leader DistributedShardedDOMDataTree");
171         leaderShardFactory.init();
172
173         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
174                 ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
175
176         leaderTestKit.waitUntilLeader(leaderOperDatastore.getActorUtils(),
177                 ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
178
179         LOG.info("Initializing follower DistributedShardedDOMDataTree");
180         followerShardFactory.init();
181     }
182
183     @Test
184     public void testProducerRegistrations() throws Exception {
185         LOG.info("testProducerRegistrations starting");
186         initEmptyDatastores();
187
188         leaderTestKit.waitForMembersUp("member-2");
189
190         // TODO refactor shard creation and verification to own method
191         final DistributedShardRegistration shardRegistration =
192                 waitOnAsyncTask(leaderShardFactory.createDistributedShard(
193                         TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
194                         DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
195
196         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
197                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
198
199         final ActorRef leaderShardManager = leaderConfigDatastore.getActorUtils().getShardManager();
200
201         assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
202                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
203
204         assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
205                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
206
207         final Set<String> peers  = new HashSet<>();
208         IntegrationTestKit.verifyShardState(leaderConfigDatastore,
209                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
210                         peers.addAll(onDemandShardState.getPeerAddresses().values()));
211         assertEquals(peers.size(), 1);
212
213         final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
214         try {
215             followerShardFactory.createProducer(Collections.singleton(TEST_ID));
216             fail("Producer should be already registered on the other node");
217         } catch (final IllegalArgumentException e) {
218             assertTrue(e.getMessage().contains("is attached to producer"));
219         }
220
221         producer.close();
222
223         final DOMDataTreeProducer followerProducer =
224                 followerShardFactory.createProducer(Collections.singleton(TEST_ID));
225         try {
226             leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
227             fail("Producer should be already registered on the other node");
228         } catch (final IllegalArgumentException e) {
229             assertTrue(e.getMessage().contains("is attached to producer"));
230         }
231
232         followerProducer.close();
233         // try to create a shard on an already registered prefix on follower
234         try {
235             waitOnAsyncTask(followerShardFactory.createDistributedShard(
236                     TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
237                     DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
238             fail("This prefix already should have a shard registration that was forwarded from the other node");
239         } catch (final DOMDataTreeShardingConflictException e) {
240             assertTrue(e.getMessage().contains("is already occupied by another shard"));
241         }
242
243         shardRegistration.close().toCompletableFuture().get();
244
245         LOG.info("testProducerRegistrations ending");
246     }
247
248     @Test
249     public void testWriteIntoMultipleShards() throws Exception {
250         LOG.info("testWriteIntoMultipleShards starting");
251         initEmptyDatastores();
252
253         leaderTestKit.waitForMembersUp("member-2");
254
255         LOG.debug("registering first shard");
256         final DistributedShardRegistration shardRegistration =
257                 waitOnAsyncTask(leaderShardFactory.createDistributedShard(
258                         TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
259                         DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
260
261
262         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
263                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
264         findLocalShard(followerConfigDatastore.getActorUtils(),
265                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
266
267         final Set<String> peers  = new HashSet<>();
268         IntegrationTestKit.verifyShardState(leaderConfigDatastore,
269                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
270                         peers.addAll(onDemandShardState.getPeerAddresses().values()));
271         assertEquals(peers.size(), 1);
272
273         LOG.debug("Got after waiting for nonleader");
274         final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
275
276         final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
277         final DOMDataTreeWriteCursor cursor = tx.createCursor(TEST_ID);
278         Assert.assertNotNull(cursor);
279         final YangInstanceIdentifier nameId =
280                 YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(TestModel.NAME_QNAME).build();
281         cursor.write(nameId.getLastPathArgument(),
282                 ImmutableLeafNodeBuilder.<String>create().withNodeIdentifier(
283                         new NodeIdentifier(TestModel.NAME_QNAME)).withValue("Test Value").build());
284
285         cursor.close();
286         LOG.warn("Got to pre submit");
287
288         tx.commit().get();
289
290         shardRegistration.close().toCompletableFuture().get();
291
292         LOG.info("testWriteIntoMultipleShards ending");
293     }
294
295     @Test
296     public void testMultipleShardRegistrations() throws Exception {
297         LOG.info("testMultipleShardRegistrations starting");
298         initEmptyDatastores();
299
300         final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
301                 TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
302                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
303
304         final DistributedShardRegistration reg2 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
305                 new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_CONTAINER_PATH),
306                 Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
307                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
308
309         final DistributedShardRegistration reg3 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
310                 new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.INNER_LIST_PATH),
311                 Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
312                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
313
314         final DistributedShardRegistration reg4 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
315                 new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.JUNK_PATH),
316                 Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
317                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
318
319         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
320                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
321         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
322                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
323         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
324                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
325         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
326                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
327
328         // check leader has local shards
329         assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
330                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
331
332         assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
333                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
334
335         assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
336                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
337
338         assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
339                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
340
341         // check follower has local shards
342         assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
343                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
344
345         assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
346                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
347
348         assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
349                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
350
351         assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
352                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
353
354         LOG.debug("Closing registrations");
355
356         reg1.close().toCompletableFuture().get();
357         reg2.close().toCompletableFuture().get();
358         reg3.close().toCompletableFuture().get();
359         reg4.close().toCompletableFuture().get();
360
361         waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
362                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
363
364         waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
365                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
366
367         waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
368                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
369
370         waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
371                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
372
373         LOG.debug("All leader shards gone");
374
375         waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
376                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
377
378         waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
379                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
380
381         waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
382                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
383
384         waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
385                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
386
387         LOG.debug("All follower shards gone");
388         LOG.info("testMultipleShardRegistrations ending");
389     }
390
391     @Test
392     public void testMultipleRegistrationsAtOnePrefix() throws Exception {
393         LOG.info("testMultipleRegistrationsAtOnePrefix starting");
394         initEmptyDatastores();
395
396         for (int i = 0; i < 5; i++) {
397             LOG.info("Round {}", i);
398             final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
399                     TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
400                     DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
401
402             leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
403                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
404
405             assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
406                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
407
408             assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
409                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
410
411
412             final Set<String> peers  = new HashSet<>();
413             IntegrationTestKit.verifyShardState(leaderConfigDatastore,
414                     ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
415                             peers.addAll(onDemandShardState.getPeerAddresses().values()));
416             assertEquals(peers.size(), 1);
417
418             waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
419
420             waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
421                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
422
423             waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
424                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
425         }
426
427         LOG.info("testMultipleRegistrationsAtOnePrefix ending");
428     }
429
430     @Test
431     public void testInitialBootstrappingWithNoModuleShards() throws Exception {
432         LOG.info("testInitialBootstrappingWithNoModuleShards starting");
433         initEmptyDatastores("module-shards-default-member-1.conf");
434
435         // We just verify the DistributedShardedDOMDataTree initialized without error.
436     }
437 }