BUG-5280: synchronize access to local histories
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / databroker / actors / dds / AbstractDataStoreClientBehavior.java
index 5a34b3b77e2afd8419ae100929960e9b6ff02be8..9af88a8a5a7b9c8a3070ad5f24ae9a6ce06b3a20 100644 (file)
@@ -16,8 +16,7 @@ import java.util.Collection;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
+import java.util.concurrent.locks.StampedLock;
 import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
 import org.opendaylight.controller.cluster.access.client.ClientActorBehavior;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
@@ -64,6 +63,7 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
 
     private final Map<LocalHistoryIdentifier, ClientLocalHistory> histories = new ConcurrentHashMap<>();
     private final AtomicLong nextHistoryId = new AtomicLong(1);
+    private final StampedLock lock = new StampedLock();
     private final SingleClientHistory singleHistory;
 
     private volatile Throwable aborted;
@@ -91,14 +91,19 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
     }
 
     private void abortOperations(final Throwable cause) {
-        // This acts as a barrier, application threads check this after they have added an entry in the maps,
-        // and if they observe aborted being non-null, they will perform their cleanup and not return the handle.
-        aborted = cause;
+        final long stamp = lock.writeLock();
+        try {
+            // This acts as a barrier, application threads check this after they have added an entry in the maps,
+            // and if they observe aborted being non-null, they will perform their cleanup and not return the handle.
+            aborted = cause;
 
-        for (ClientLocalHistory h : histories.values()) {
-            h.localAbort(cause);
+            for (ClientLocalHistory h : histories.values()) {
+                h.localAbort(cause);
+            }
+            histories.clear();
+        } finally {
+            lock.unlockWrite(stamp);
         }
-        histories.clear();
     }
 
     private AbstractDataStoreClientBehavior shutdown(final ClientActorBehavior<ShardBackendInfo> currentBehavior) {
@@ -122,44 +127,41 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
      * involved, as the messages need to be replayed to the individual proxies.
      */
     @Override
-    @GuardedBy("connectionsLock")
-    protected final ConnectedClientConnection<ShardBackendInfo> connectionUp(
-            final AbstractClientConnection<ShardBackendInfo> conn, final ShardBackendInfo backend) {
-
-        // Step 0: create a new connected connection
-        final ConnectedClientConnection<ShardBackendInfo> newConn = new ConnectedClientConnection<>(conn.context(),
-                conn.cookie(), backend);
-
-        LOG.debug("{}: resolving connection {} to {}", persistenceId(), conn, newConn);
+    protected final ConnectionConnectCohort connectionUp(final ConnectedClientConnection<ShardBackendInfo> newConn) {
+        final long stamp = lock.writeLock();
 
+        // Step 1: Freeze all AbstractProxyHistory instances pointing to that shard. This indirectly means that no
+        //         further TransactionProxies can be created and we can safely traverse maps without risking
+        //         missing an entry
         final Collection<HistoryReconnectCohort> cohorts = new ArrayList<>();
-        try {
-            // Step 1: Freeze all AbstractProxyHistory instances pointing to that shard. This indirectly means that no
-            //         further TransactionProxies can be created and we can safely traverse maps without risking
-            //         missing an entry
-            startReconnect(singleHistory, newConn, cohorts);
-            for (ClientLocalHistory h : histories.values()) {
-                startReconnect(h, newConn, cohorts);
-            }
-
-            // Step 2: Collect previous successful requests from the cohorts. We do not want to expose
-            //         the non-throttling interface to the connection, hence we use a wrapper consumer
-            for (HistoryReconnectCohort c : cohorts) {
-                c.replaySuccessfulRequests();
-            }
-
-            // Step 3: Install a forwarder, which will forward requests back to affected cohorts. Any outstanding
-            //         requests will be immediately sent to it and requests being sent concurrently will get forwarded
-            //         once they hit the new connection.
-            conn.setForwarder(BouncingReconnectForwarder.forCohorts(newConn, cohorts));
-        } finally {
-            // Step 4: Complete switchover of the connection. The cohorts can resume normal operations.
-            for (HistoryReconnectCohort c : cohorts) {
-                c.close();
-            }
+        startReconnect(singleHistory, newConn, cohorts);
+        for (ClientLocalHistory h : histories.values()) {
+            startReconnect(h, newConn, cohorts);
         }
 
-        return newConn;
+        return previousEntries -> {
+            try {
+                // Step 2: Collect previous successful requests from the cohorts. We do not want to expose
+                //         the non-throttling interface to the connection, hence we use a wrapper consumer
+                for (HistoryReconnectCohort c : cohorts) {
+                    c.replaySuccessfulRequests(previousEntries);
+                }
+
+                // Step 3: Install a forwarder, which will forward requests back to affected cohorts. Any outstanding
+                //         requests will be immediately sent to it and requests being sent concurrently will get
+                //         forwarded once they hit the new connection.
+                return BouncingReconnectForwarder.forCohorts(newConn, cohorts);
+            } finally {
+                try {
+                    // Step 4: Complete switchover of the connection. The cohorts can resume normal operations.
+                    for (HistoryReconnectCohort c : cohorts) {
+                        c.close();
+                    }
+                } finally {
+                    lock.unlockWrite(stamp);
+                }
+            }
+        };
     }
 
     private static void startReconnect(final AbstractClientHistory history,
@@ -181,19 +183,21 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
     public final ClientLocalHistory createLocalHistory() {
         final LocalHistoryIdentifier historyId = new LocalHistoryIdentifier(getIdentifier(),
             nextHistoryId.getAndIncrement());
-        final ClientLocalHistory history = new ClientLocalHistory(this, historyId);
-        LOG.debug("{}: creating a new local history {}", persistenceId(), history);
 
-        Verify.verify(histories.put(historyId, history) == null);
+        final long stamp = lock.readLock();
+        try {
+            if (aborted != null) {
+                throw Throwables.propagate(aborted);
+            }
+
+            final ClientLocalHistory history = new ClientLocalHistory(this, historyId);
+            LOG.debug("{}: creating a new local history {}", persistenceId(), history);
 
-        final Throwable a = aborted;
-        if (a != null) {
-            history.localAbort(a);
-            histories.remove(historyId, history);
-            throw Throwables.propagate(a);
+            Verify.verify(histories.put(historyId, history) == null);
+            return history;
+        } finally {
+            lock.unlockRead(stamp);
         }
-
-        return history;
     }
 
     @Override
@@ -201,6 +205,11 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
         return singleHistory.createTransaction();
     }
 
+    @Override
+    public final ClientSnapshot createSnapshot() {
+        return singleHistory.doCreateSnapshot();
+    }
+
     @Override
     public final void close() {
         context().executeInActor(this::shutdown);