Fix intermittent failure in testLeadershipTransferOnShutdown
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / test / java / org / opendaylight / controller / cluster / sharding / DistributedShardedDOMDataTreeRemotingTest.java
1 /*
2  * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.sharding;
10
11 import static org.junit.Assert.assertNotNull;
12 import static org.junit.Assert.assertTrue;
13 import static org.junit.Assert.fail;
14 import static org.mockito.Mockito.doReturn;
15 import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.findLocalShard;
16 import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.waitUntilShardIsDown;
17
18 import akka.actor.ActorRef;
19 import akka.actor.ActorSystem;
20 import akka.actor.Address;
21 import akka.actor.AddressFromURIString;
22 import akka.cluster.Cluster;
23 import akka.testkit.JavaTestKit;
24 import com.google.common.collect.Lists;
25 import com.typesafe.config.ConfigFactory;
26 import java.util.Collections;
27 import org.junit.After;
28 import org.junit.Assert;
29 import org.junit.Before;
30 import org.junit.Ignore;
31 import org.junit.Test;
32 import org.mockito.Mockito;
33 import org.opendaylight.controller.cluster.ActorSystemProvider;
34 import org.opendaylight.controller.cluster.datastore.AbstractTest;
35 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
36 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
37 import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
38 import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
39 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
40 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
41 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
42 import org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration;
43 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
44 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
45 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
46 import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
47 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
48 import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
49 import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
50 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
51 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
52 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
53 import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
54 import org.slf4j.Logger;
55 import org.slf4j.LoggerFactory;
56
57 @Ignore("https://bugs.opendaylight.org/show_bug.cgi?id=8301")
58 public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
59
60     private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTreeRemotingTest.class);
61
62     private static final Address MEMBER_1_ADDRESS =
63             AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558");
64
65     private static final DOMDataTreeIdentifier TEST_ID =
66             new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
67
68     private static final String MODULE_SHARDS_CONFIG = "module-shards-cars-member-1-and-2.conf";
69
70     private ActorSystem leaderSystem;
71     private ActorSystem followerSystem;
72
73
74     private final Builder leaderDatastoreContextBuilder =
75             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
76                     .logicalStoreType(
77                             org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION);
78
79     private final DatastoreContext.Builder followerDatastoreContextBuilder =
80             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
81                     .logicalStoreType(
82                             org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION);
83
84     private DistributedDataStore leaderConfigDatastore;
85     private DistributedDataStore leaderOperDatastore;
86
87     private DistributedDataStore followerConfigDatastore;
88     private DistributedDataStore followerOperDatastore;
89
90
91     private IntegrationTestKit followerTestKit;
92     private IntegrationTestKit leaderTestKit;
93     private DistributedShardedDOMDataTree leaderShardFactory;
94
95     private DistributedShardedDOMDataTree followerShardFactory;
96     private ActorSystemProvider leaderSystemProvider;
97     private ActorSystemProvider followerSystemProvider;
98
99     @Before
100     public void setUp() {
101         InMemoryJournal.clear();
102         InMemorySnapshotStore.clear();
103
104         leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
105         Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
106
107         followerSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member2"));
108         Cluster.get(followerSystem).join(MEMBER_1_ADDRESS);
109
110         leaderSystemProvider = Mockito.mock(ActorSystemProvider.class);
111         doReturn(leaderSystem).when(leaderSystemProvider).getActorSystem();
112
113         followerSystemProvider = Mockito.mock(ActorSystemProvider.class);
114         doReturn(followerSystem).when(followerSystemProvider).getActorSystem();
115
116     }
117
118     @After
119     public void tearDown() {
120         if (leaderConfigDatastore != null) {
121             leaderConfigDatastore.close();
122         }
123         if (leaderOperDatastore != null) {
124             leaderOperDatastore.close();
125         }
126
127         if (followerConfigDatastore != null) {
128             followerConfigDatastore.close();
129         }
130         if (followerOperDatastore != null) {
131             followerOperDatastore.close();
132         }
133
134         JavaTestKit.shutdownActorSystem(leaderSystem, null, Boolean.TRUE);
135         JavaTestKit.shutdownActorSystem(followerSystem, null, Boolean.TRUE);
136
137         InMemoryJournal.clear();
138         InMemorySnapshotStore.clear();
139     }
140
141     private void initEmptyDatastores() throws Exception {
142         leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder);
143
144         leaderConfigDatastore = leaderTestKit.setupDistributedDataStore(
145                 "config", MODULE_SHARDS_CONFIG, true,
146                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
147         leaderOperDatastore = leaderTestKit.setupDistributedDataStore(
148                 "operational", MODULE_SHARDS_CONFIG, true,
149                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
150
151         leaderShardFactory = new DistributedShardedDOMDataTree(leaderSystemProvider,
152                 leaderOperDatastore,
153                 leaderConfigDatastore);
154
155         followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder);
156
157         followerConfigDatastore = followerTestKit.setupDistributedDataStore(
158                 "config", MODULE_SHARDS_CONFIG, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
159         followerOperDatastore = followerTestKit.setupDistributedDataStore(
160                 "operational", MODULE_SHARDS_CONFIG, true,
161                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
162
163         followerShardFactory = new DistributedShardedDOMDataTree(followerSystemProvider,
164                 followerOperDatastore,
165                 followerConfigDatastore);
166
167         leaderShardFactory.init();
168         followerShardFactory.init();
169
170         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
171                 ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
172
173         leaderTestKit.waitUntilLeader(leaderOperDatastore.getActorContext(),
174                 ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
175
176     }
177
178     @Test
179     public void testProducerRegistrations() throws Exception {
180         initEmptyDatastores();
181
182         leaderTestKit.waitForMembersUp("member-2");
183
184         final DistributedShardRegistration shardRegistration =
185                 waitOnAsyncTask(leaderShardFactory.createDistributedShard(
186                         TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
187                         DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
188
189         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
190                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
191
192         final ActorRef leaderShardManager = leaderConfigDatastore.getActorContext().getShardManager();
193
194         assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
195                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
196
197         assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
198                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
199
200         final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
201         try {
202             followerShardFactory.createProducer(Collections.singleton(TEST_ID));
203             fail("Producer should be already registered on the other node");
204         } catch (final IllegalArgumentException e) {
205             assertTrue(e.getMessage().contains("is attached to producer"));
206         }
207
208         producer.close();
209
210         final DOMDataTreeProducer followerProducer =
211                 followerShardFactory.createProducer(Collections.singleton(TEST_ID));
212         try {
213             leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
214             fail("Producer should be already registered on the other node");
215         } catch (final IllegalArgumentException e) {
216             assertTrue(e.getMessage().contains("is attached to producer"));
217         }
218
219         followerProducer.close();
220         // try to create a shard on an already registered prefix on follower
221         try {
222             waitOnAsyncTask(followerShardFactory.createDistributedShard(
223                     TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
224                     DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
225             fail("This prefix already should have a shard registration that was forwarded from the other node");
226         } catch (final DOMDataTreeShardingConflictException e) {
227             assertTrue(e.getMessage().contains("is already occupied by another shard"));
228         }
229
230         shardRegistration.close().toCompletableFuture().get();
231     }
232
233     @Test
234     public void testWriteIntoMultipleShards() throws Exception {
235         initEmptyDatastores();
236
237         leaderTestKit.waitForMembersUp("member-2");
238
239         LOG.debug("registering first shard");
240         final DistributedShardRegistration shardRegistration =
241                 waitOnAsyncTask(leaderShardFactory.createDistributedShard(
242                         TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
243                         DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
244
245
246         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
247                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
248         findLocalShard(followerConfigDatastore.getActorContext(),
249                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
250
251         LOG.debug("Got after waiting for nonleader");
252         final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
253
254         final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
255         final DOMDataTreeWriteCursor cursor = tx.createCursor(TEST_ID);
256         Assert.assertNotNull(cursor);
257         final YangInstanceIdentifier nameId =
258                 YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(TestModel.NAME_QNAME).build();
259         cursor.write(nameId.getLastPathArgument(),
260                 ImmutableLeafNodeBuilder.<String>create().withNodeIdentifier(
261                         new NodeIdentifier(TestModel.NAME_QNAME)).withValue("Test Value").build());
262
263         cursor.close();
264         LOG.warn("Got to pre submit");
265
266         tx.submit().checkedGet();
267
268         shardRegistration.close().toCompletableFuture().get();
269     }
270
271     @Test
272     public void testMultipleShardRegistrations() throws Exception {
273         initEmptyDatastores();
274
275         final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
276                 TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
277                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
278
279         final DistributedShardRegistration reg2 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
280                 new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_CONTAINER_PATH),
281                 Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
282                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
283
284         final DistributedShardRegistration reg3 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
285                 new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.INNER_LIST_PATH),
286                 Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
287                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
288
289         final DistributedShardRegistration reg4 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
290                 new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.JUNK_PATH),
291                 Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
292                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
293
294         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
295                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
296         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
297                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
298         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
299                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
300         leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
301                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
302
303         // check leader has local shards
304         assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
305                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
306
307         assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
308                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
309
310         assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
311                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
312
313         assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
314                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
315
316         // check follower has local shards
317         assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
318                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
319
320         assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
321                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
322
323         assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
324                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
325
326         assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
327                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
328
329
330         LOG.debug("Closing registrations");
331
332         reg1.close().toCompletableFuture().get();
333         reg2.close().toCompletableFuture().get();
334         reg3.close().toCompletableFuture().get();
335         reg4.close().toCompletableFuture().get();
336
337         waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
338                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
339
340         waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
341                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
342
343         waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
344                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
345
346         waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
347                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
348
349         LOG.debug("All leader shards gone");
350
351         waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
352                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
353
354         waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
355                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
356
357         waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
358                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
359
360         waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
361                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
362
363         LOG.debug("All follower shards gone");
364     }
365
366     @Test
367     public void testMultipleRegistrationsAtOnePrefix() throws Exception {
368         initEmptyDatastores();
369
370         for (int i = 0; i < 10; i++) {
371             LOG.debug("Round {}", i);
372             final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
373                     TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
374                     DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
375
376             leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
377                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
378
379             assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
380                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
381
382             assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
383                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
384
385             waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
386
387             waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
388                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
389
390             waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
391                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
392         }
393
394     }
395 }