df870f584cedc8f5f9bbf4382de4aaf6fef1b279
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / test / java / org / opendaylight / controller / cluster / sharding / DistributedShardedDOMDataTreeRemotingTest.java
1 /*
2  * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.sharding;
10
11 import static org.junit.Assert.assertNotNull;
12 import static org.junit.Assert.assertTrue;
13 import static org.junit.Assert.fail;
14 import static org.mockito.Mockito.doReturn;
15 import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.findLocalShard;
16 import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.waitUntilShardIsDown;
17
18 import akka.actor.ActorRef;
19 import akka.actor.ActorSystem;
20 import akka.actor.Address;
21 import akka.actor.AddressFromURIString;
22 import akka.cluster.Cluster;
23 import akka.testkit.JavaTestKit;
24 import com.google.common.collect.Lists;
25 import com.typesafe.config.ConfigFactory;
26 import java.util.Collections;
27 import org.junit.After;
28 import org.junit.Assert;
29 import org.junit.Before;
30 import org.junit.Test;
31 import org.mockito.Mockito;
32 import org.opendaylight.controller.cluster.ActorSystemProvider;
33 import org.opendaylight.controller.cluster.datastore.AbstractTest;
34 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
35 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
36 import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
37 import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
38 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
39 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
40 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
41 import org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration;
42 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
43 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
44 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
45 import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
46 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
47 import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
48 import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
49 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
50 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
51 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
52 import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
53 import org.slf4j.Logger;
54 import org.slf4j.LoggerFactory;
55
56 public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
57
58     private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTreeRemotingTest.class);
59
60     private static final Address MEMBER_1_ADDRESS =
61             AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558");
62
63     private static final DOMDataTreeIdentifier TEST_ID =
64             new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
65
66     private static final String MODULE_SHARDS_CONFIG = "module-shards-cars-member-1-and-2.conf";
67
68     private ActorSystem leaderSystem;
69     private ActorSystem followerSystem;
70
71
72     private final Builder leaderDatastoreContextBuilder =
73             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
74                     .logicalStoreType(
75                             org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION);
76
77     private final DatastoreContext.Builder followerDatastoreContextBuilder =
78             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
79                     .logicalStoreType(
80                             org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION);
81
82     private DistributedDataStore leaderConfigDatastore;
83     private DistributedDataStore leaderOperDatastore;
84
85     private DistributedDataStore followerConfigDatastore;
86     private DistributedDataStore followerOperDatastore;
87
88
89     private IntegrationTestKit followerTestKit;
90     private IntegrationTestKit leaderTestKit;
91     private DistributedShardedDOMDataTree leaderShardFactory;
92
93     private DistributedShardedDOMDataTree followerShardFactory;
94     private ActorSystemProvider leaderSystemProvider;
95     private ActorSystemProvider followerSystemProvider;
96
97     @Before
98     public void setUp() {
99         InMemoryJournal.clear();
100         InMemorySnapshotStore.clear();
101
102         leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
103         Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
104
105         followerSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member2"));
106         Cluster.get(followerSystem).join(MEMBER_1_ADDRESS);
107
108         leaderSystemProvider = Mockito.mock(ActorSystemProvider.class);
109         doReturn(leaderSystem).when(leaderSystemProvider).getActorSystem();
110
111         followerSystemProvider = Mockito.mock(ActorSystemProvider.class);
112         doReturn(followerSystem).when(followerSystemProvider).getActorSystem();
113
114     }
115
116     @After
117     public void tearDown() {
118         if (leaderConfigDatastore != null) {
119             leaderConfigDatastore.close();
120         }
121         if (leaderOperDatastore != null) {
122             leaderOperDatastore.close();
123         }
124
125         if (followerConfigDatastore != null) {
126             followerConfigDatastore.close();
127         }
128         if (followerOperDatastore != null) {
129             followerOperDatastore.close();
130         }
131
132         JavaTestKit.shutdownActorSystem(leaderSystem);
133         JavaTestKit.shutdownActorSystem(followerSystem);
134
135         InMemoryJournal.clear();
136         InMemorySnapshotStore.clear();
137     }
138
139     private void initEmptyDatastores() {
140         leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder);
141
142         leaderConfigDatastore = (DistributedDataStore) leaderTestKit.setupDistributedDataStore(
143                 "config", MODULE_SHARDS_CONFIG, true,
144                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
145         leaderOperDatastore = (DistributedDataStore) leaderTestKit.setupDistributedDataStore(
146                 "operational", MODULE_SHARDS_CONFIG, true,
147                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
148
149         leaderShardFactory = new DistributedShardedDOMDataTree(leaderSystemProvider,
150                 leaderOperDatastore,
151                 leaderConfigDatastore);
152
153         followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder);
154
155         followerConfigDatastore = (DistributedDataStore) followerTestKit.setupDistributedDataStore(
156                 "config", MODULE_SHARDS_CONFIG, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
157         followerOperDatastore = (DistributedDataStore) followerTestKit.setupDistributedDataStore(
158                 "operational", MODULE_SHARDS_CONFIG, true,
159                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
160
161         followerShardFactory = new DistributedShardedDOMDataTree(followerSystemProvider,
162                 followerOperDatastore,
163                 followerConfigDatastore);
164
165         leaderShardFactory.init();
166         followerShardFactory.init();
167
168         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
169                 ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
170
171         leaderTestKit.waitUntilLeader(leaderOperDatastore.getActorContext(),
172                 ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
173
174     }
175
176     @Test
177     public void testProducerRegistrations() throws Exception {
178         initEmptyDatastores();
179
180         leaderTestKit.waitForMembersUp("member-2");
181
182         final DistributedShardRegistration shardRegistration =
183                 waitOnAsyncTask(leaderShardFactory.createDistributedShard(
184                         TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
185                         DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
186
187         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
188                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
189
190         final ActorRef leaderShardManager = leaderConfigDatastore.getActorContext().getShardManager();
191
192         assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
193                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
194
195         assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
196                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
197
198         final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
199         try {
200             followerShardFactory.createProducer(Collections.singleton(TEST_ID));
201             fail("Producer should be already registered on the other node");
202         } catch (final IllegalArgumentException e) {
203             assertTrue(e.getMessage().contains("is attached to producer"));
204         }
205
206         producer.close();
207
208         final DOMDataTreeProducer followerProducer =
209                 followerShardFactory.createProducer(Collections.singleton(TEST_ID));
210         try {
211             leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
212             fail("Producer should be already registered on the other node");
213         } catch (final IllegalArgumentException e) {
214             assertTrue(e.getMessage().contains("is attached to producer"));
215         }
216
217         followerProducer.close();
218         // try to create a shard on an already registered prefix on follower
219         try {
220             waitOnAsyncTask(followerShardFactory.createDistributedShard(
221                     TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
222                     DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
223             fail("This prefix already should have a shard registration that was forwarded from the other node");
224         } catch (final DOMDataTreeShardingConflictException e) {
225             assertTrue(e.getMessage().contains("is already occupied by another shard"));
226         }
227
228         shardRegistration.close().toCompletableFuture().get();
229     }
230
231     @Test
232     public void testWriteIntoMultipleShards() throws Exception {
233         initEmptyDatastores();
234
235         leaderTestKit.waitForMembersUp("member-2");
236
237         LOG.debug("registering first shard");
238         final DistributedShardRegistration shardRegistration =
239                 waitOnAsyncTask(leaderShardFactory.createDistributedShard(
240                         TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
241                         DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
242
243
244         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
245                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
246         findLocalShard(followerConfigDatastore.getActorContext(),
247                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
248
249         LOG.debug("Got after waiting for nonleader");
250         final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
251
252         final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
253         final DOMDataTreeWriteCursor cursor = tx.createCursor(TEST_ID);
254         Assert.assertNotNull(cursor);
255         final YangInstanceIdentifier nameId =
256                 YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(TestModel.NAME_QNAME).build();
257         cursor.write(nameId.getLastPathArgument(),
258                 ImmutableLeafNodeBuilder.<String>create().withNodeIdentifier(
259                         new NodeIdentifier(TestModel.NAME_QNAME)).withValue("Test Value").build());
260
261         cursor.close();
262         LOG.warn("Got to pre submit");
263
264         tx.submit().checkedGet();
265
266         shardRegistration.close().toCompletableFuture().get();
267     }
268
269     @Test
270     public void testMultipleShardRegistrations() throws Exception {
271         initEmptyDatastores();
272
273         final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
274                 TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
275                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
276
277         final DistributedShardRegistration reg2 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
278                 new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_CONTAINER_PATH),
279                 Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
280                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
281
282         final DistributedShardRegistration reg3 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
283                 new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.INNER_LIST_PATH),
284                 Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
285                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
286
287         final DistributedShardRegistration reg4 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
288                 new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.JUNK_PATH),
289                 Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
290                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
291
292         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
293                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
294         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
295                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
296         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
297                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
298         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
299                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
300
301         // check leader has local shards
302         assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
303                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
304
305         assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
306                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
307
308         assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
309                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
310
311         assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
312                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
313
314         // check follower has local shards
315         assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
316                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
317
318         assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
319                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
320
321         assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
322                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
323
324         assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
325                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
326
327
328         LOG.debug("Closing registrations");
329
330         reg1.close().toCompletableFuture().get();
331         reg2.close().toCompletableFuture().get();
332         reg3.close().toCompletableFuture().get();
333         reg4.close().toCompletableFuture().get();
334
335         waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
336                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
337
338         waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
339                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
340
341         waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
342                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
343
344         waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
345                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
346
347         LOG.debug("All leader shards gone");
348
349         waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
350                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
351
352         waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
353                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
354
355         waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
356                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
357
358         waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
359                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
360
361         LOG.debug("All follower shards gone");
362     }
363
364     @Test
365     public void testMultipleRegistrationsAtOnePrefix() throws Exception {
366         initEmptyDatastores();
367
368         for (int i = 0; i < 10; i++) {
369             LOG.debug("Round {}", i);
370             final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
371                     TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
372                     DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
373
374             leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
375                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
376
377             assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
378                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
379
380             assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
381                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
382
383             waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
384
385             waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
386                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
387
388             waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
389                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
390         }
391
392     }
393 }