BUG-8898: prioritize InternalCommand
[controller.git] / opendaylight / md-sal / cds-access-client / src / main / java / org / opendaylight / controller / cluster / access / client / ClientActorBehavior.java
index e70fdc9662c6232dcc81bda50d95795cca02dbde..625209aaa57d14e2d2bb3fd8a42374de9eddf59c 100644 (file)
@@ -9,8 +9,11 @@ package org.opendaylight.controller.cluster.access.client;
 
 import com.google.common.annotations.Beta;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
 import com.google.common.base.Verify;
+import java.util.Collection;
 import java.util.Map;
+import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
@@ -18,6 +21,7 @@ import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 import javax.annotation.concurrent.GuardedBy;
 import org.opendaylight.controller.cluster.access.commands.NotLeaderException;
+import org.opendaylight.controller.cluster.access.commands.OutOfSequenceEnvelopeException;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FailureEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
@@ -53,11 +57,11 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
          * @param enqueuedEntries Previously-enqueued entries
          * @return A {@link ReconnectForwarder} to handle any straggler messages which arrive after this method returns.
          */
-        @Nonnull ReconnectForwarder finishReconnect(@Nonnull Iterable<ConnectionEntry> enqueuedEntries);
+        @Nonnull ReconnectForwarder finishReconnect(@Nonnull Collection<ConnectionEntry> enqueuedEntries);
     }
 
     private static final Logger LOG = LoggerFactory.getLogger(ClientActorBehavior.class);
-    private static final FiniteDuration RESOLVE_RETRY_DURATION = FiniteDuration.apply(5, TimeUnit.SECONDS);
+    private static final FiniteDuration RESOLVE_RETRY_DURATION = FiniteDuration.apply(1, TimeUnit.SECONDS);
 
     /**
      * Map of connections to the backend. This map is concurrent to allow lookups, but given complex operations
@@ -157,6 +161,25 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
     }
 
     private ClientActorBehavior<T> internalOnRequestFailure(final FailureEnvelope command) {
+        final AbstractClientConnection<T> conn = getConnection(command);
+        if (conn != null) {
+            /*
+             * We are talking to multiple actors, which may be lagging behind our state significantly. This has
+             * the effect that we may be receiving responses from a previous connection after we have created a new
+             * one to a different actor.
+             *
+             * Since we are already replaying requests to the new actor, we want to ignore errors reported on the old
+             * connection -- for example NotLeaderException, which must not cause a new reconnect. Check the envelope's
+             * sessionId and if it does not match our current connection just ignore it.
+             */
+            final Optional<T> optBackend = conn.getBackendInfo();
+            if (optBackend.isPresent() && optBackend.get().getSessionId() != command.getSessionId()) {
+                LOG.debug("{}: Mismatched current connection {} and envelope {}, ignoring response", persistenceId(),
+                    conn, command);
+                return this;
+            }
+        }
+
         final RequestFailure<?, ?> failure = command.getMessage();
         final RequestException cause = failure.getCause();
         if (cause instanceof RetiredGenerationException) {
@@ -166,13 +189,22 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
             return null;
         }
         if (cause instanceof NotLeaderException) {
-            final AbstractClientConnection<T> conn = getConnection(command);
             if (conn instanceof ReconnectingClientConnection) {
                 // Already reconnecting, do not churn the logs
                 return this;
             } else if (conn != null) {
                 LOG.info("{}: connection {} indicated no leadership, reconnecting it", persistenceId(), conn, cause);
-                return conn.reconnect(this);
+                return conn.reconnect(this, cause);
+            }
+        }
+        if (cause instanceof OutOfSequenceEnvelopeException) {
+            if (conn instanceof ReconnectingClientConnection) {
+                // Already reconnecting, do not churn the logs
+                return this;
+            } else if (conn != null) {
+                LOG.info("{}: connection {} indicated sequencing mismatch on {} sequence {} ({}), reconnecting it",
+                    persistenceId(), conn, failure.getTarget(), failure.getSequence(), command.getTxSequence(), cause);
+                return conn.reconnect(this, cause);
             }
         }
 
@@ -229,22 +261,22 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
     @GuardedBy("connectionsLock")
     @Nonnull protected abstract ConnectionConnectCohort connectionUp(@Nonnull ConnectedClientConnection<T> newConn);
 
-    private void backendConnectFinished(final Long shard, final AbstractClientConnection<T> conn,
+    private void backendConnectFinished(final Long shard, final AbstractClientConnection<T> oldConn,
             final T backend, final Throwable failure) {
         if (failure != null) {
             if (failure instanceof TimeoutException) {
-                if (!conn.equals(connections.get(shard))) {
+                if (!oldConn.equals(connections.get(shard))) {
                     // AbstractClientConnection will remove itself when it decides there is no point in continuing,
                     // at which point we want to stop retrying
-                    LOG.info("{}: stopping resolution of shard {} on stale connection {}", persistenceId(), shard, conn,
-                        failure);
+                    LOG.info("{}: stopping resolution of shard {} on stale connection {}", persistenceId(), shard,
+                        oldConn, failure);
                     return;
                 }
 
                 LOG.debug("{}: timed out resolving shard {}, scheduling retry in {}", persistenceId(), shard,
                     RESOLVE_RETRY_DURATION, failure);
                 context().executeInActor(b -> {
-                    resolveConnection(shard, conn);
+                    resolveConnection(shard, oldConn);
                     return b;
                 }, RESOLVE_RETRY_DURATION);
                 return;
@@ -258,41 +290,64 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
                 cause = new RuntimeRequestException("Failed to resolve shard " + shard, failure);
             }
 
-            conn.poison(cause);
+            oldConn.poison(cause);
             return;
         }
 
         LOG.info("{}: resolved shard {} to {}", persistenceId(), shard, backend);
         final long stamp = connectionsLock.writeLock();
         try {
+            final Stopwatch sw = Stopwatch.createStarted();
+
             // Create a new connected connection
-            final ConnectedClientConnection<T> newConn = new ConnectedClientConnection<>(conn.context(),
-                    conn.cookie(), backend);
-            LOG.info("{}: resolving connection {} to {}", persistenceId(), conn, newConn);
+            final ConnectedClientConnection<T> newConn = new ConnectedClientConnection<>(oldConn, backend);
+            LOG.info("{}: resolving connection {} to {}", persistenceId(), oldConn, newConn);
 
             // Start reconnecting without the old connection lock held
             final ConnectionConnectCohort cohort = Verify.verifyNotNull(connectionUp(newConn));
 
             // Lock the old connection and get a reference to its entries
-            final Iterable<ConnectionEntry> replayIterable = conn.startReplay();
+            final Collection<ConnectionEntry> replayIterable = oldConn.startReplay();
 
             // Finish the connection attempt
             final ReconnectForwarder forwarder = Verify.verifyNotNull(cohort.finishReconnect(replayIterable));
 
+            // Cancel sleep debt after entries were replayed, before new connection starts receiving.
+            newConn.cancelDebt();
+
             // Install the forwarder, unlocking the old connection
-            conn.finishReplay(forwarder);
+            oldConn.finishReplay(forwarder);
 
             // Make sure new lookups pick up the new connection
-            connections.replace(shard, conn, newConn);
-            LOG.info("{}: replaced connection {} with {}", persistenceId(), conn, newConn);
+            if (!connections.replace(shard, oldConn, newConn)) {
+                final AbstractClientConnection<T> existing = connections.get(oldConn.cookie());
+                LOG.warn("{}: old connection {} does not match existing {}, new connection {} in limbo",
+                    persistenceId(), oldConn, existing, newConn);
+            } else {
+                LOG.info("{}: replaced connection {} with {} in {}", persistenceId(), oldConn, newConn, sw);
+            }
         } finally {
             connectionsLock.unlockWrite(stamp);
         }
     }
 
     void removeConnection(final AbstractClientConnection<?> conn) {
-        connections.remove(conn.cookie(), conn);
-        LOG.debug("{}: removed connection {}", persistenceId(), conn);
+        final long stamp = connectionsLock.writeLock();
+        try {
+            if (!connections.remove(conn.cookie(), conn)) {
+                final AbstractClientConnection<T> existing = connections.get(conn.cookie());
+                if (existing != null) {
+                    LOG.warn("{}: failed to remove connection {}, as it was superseded by {}", persistenceId(), conn,
+                        existing);
+                } else {
+                    LOG.warn("{}: failed to remove connection {}, as it was not tracked", persistenceId(), conn);
+                }
+            } else {
+                LOG.info("{}: removed connection {}", persistenceId(), conn);
+            }
+        } finally {
+            connectionsLock.unlockWrite(stamp);
+        }
     }
 
     @SuppressWarnings("unchecked")
@@ -301,11 +356,20 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
         final ReconnectingClientConnection<T> conn = (ReconnectingClientConnection<T>)newConn;
         LOG.info("{}: connection {} reconnecting as {}", persistenceId(), oldConn, newConn);
 
-        final boolean replaced = connections.replace(oldConn.cookie(), (AbstractClientConnection<T>)oldConn, conn);
-        if (!replaced) {
-            final AbstractClientConnection<T> existing = connections.get(oldConn.cookie());
-            LOG.warn("{}: old connection {} does not match existing {}, new connection {} in limbo", persistenceId(),
-                oldConn, existing, newConn);
+        final long stamp = connectionsLock.writeLock();
+        try {
+            final boolean replaced = connections.replace(oldConn.cookie(), (AbstractClientConnection<T>)oldConn, conn);
+            if (!replaced) {
+                final AbstractClientConnection<T> existing = connections.get(oldConn.cookie());
+                if (existing != null) {
+                    LOG.warn("{}: failed to replace connection {}, as it was superseded by {}", persistenceId(), conn,
+                        existing);
+                } else {
+                    LOG.warn("{}: failed to replace connection {}, as it was not tracked", persistenceId(), conn);
+                }
+            }
+        } finally {
+            connectionsLock.unlockWrite(stamp);
         }
 
         final Long shard = oldConn.cookie();