X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fcds-access-client%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Faccess%2Fclient%2FAbstractClientConnection.java;h=abd668010a16ad09c3c0d573c0a0320cd59a9e85;hp=6cb89eec1601e5fa901f89e7ac37d7e254698d66;hb=a12fb3d06006f9f5ca90f4323dcaaad4f5ad1f62;hpb=a4839cbdbe20bced4f2651ff4a2daa618c848946 diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientConnection.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientConnection.java index 6cb89eec16..abd668010a 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientConnection.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientConnection.java @@ -69,6 +69,13 @@ public abstract class AbstractClientConnection { @VisibleForTesting static final long NO_PROGRESS_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(15); + // Emit a debug entry if we sleep for more that this amount + private static final long DEBUG_DELAY_NANOS = TimeUnit.MILLISECONDS.toNanos(100); + + // Upper bound on the time a thread is forced to sleep to keep queue size under control + private static final long MAX_DELAY_SECONDS = 5; + private static final long MAX_DELAY_NANOS = TimeUnit.SECONDS.toNanos(MAX_DELAY_SECONDS); + private final Lock lock = new ReentrantLock(); private final ClientActorContext context; @GuardedBy("lock") @@ -130,13 +137,7 @@ public abstract class AbstractClientConnection { */ public final void sendRequest(final Request request, final Consumer> callback) { final long now = currentTime(); - final long delay = enqueueEntry(new ConnectionEntry(request, callback, now), now); - try { - TimeUnit.NANOSECONDS.sleep(delay); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - LOG.debug("Interrupted after sleeping {}ns", e, currentTime() - now); - } + sendEntry(new ConnectionEntry(request, callback, now), now); } /** @@ -156,6 +157,24 @@ public abstract class AbstractClientConnection { enqueueEntry(new ConnectionEntry(request, callback, enqueuedTicks), currentTime()); } + public final long enqueueEntry(final ConnectionEntry entry, final long now) { + lock.lock(); + try { + final RequestException maybePoison = poisoned; + if (maybePoison != null) { + throw new IllegalStateException("Connection " + this + " has been poisoned", maybePoison); + } + + if (queue.isEmpty()) { + // The queue is becoming non-empty, schedule a timer. + scheduleTimer(entry.getEnqueuedTicks() + REQUEST_TIMEOUT_NANOS - now); + } + return queue.enqueue(entry, now); + } finally { + lock.unlock(); + } + } + public abstract Optional getBackendInfo(); final Collection startReplay() { @@ -166,6 +185,20 @@ public abstract class AbstractClientConnection { @GuardedBy("lock") final void finishReplay(final ReconnectForwarder forwarder) { setForwarder(forwarder); + + /* + * The process of replaying all messages may have taken a significant chunk of time, depending on type + * of messages, queue depth and available processing power. In extreme situations this may have already + * exceeded BACKEND_ALIVE_TIMEOUT_NANOS, in which case we are running the risk of not making reasonable forward + * progress before we start a reconnect cycle. + * + * Note that the timer is armed after we have sent the first message, hence we should be seeing a response + * from the backend before we see a timeout, simply due to how the mailbox operates. + * + * At any rate, reset the timestamp once we complete reconnection (which an atomic transition from the + * perspective of outside world), as that makes it a bit easier to reason about timing of events. + */ + lastReceivedTicks = currentTime(); lock.unlock(); } @@ -178,21 +211,24 @@ public abstract class AbstractClientConnection { abstract ClientActorBehavior lockedReconnect(ClientActorBehavior current, RequestException runtimeRequestException); - final long enqueueEntry(final ConnectionEntry entry, final long now) { - lock.lock(); + final void sendEntry(final ConnectionEntry entry, final long now) { + long delay = enqueueEntry(entry, now); try { - final RequestException maybePoison = poisoned; - if (maybePoison != null) { - throw new IllegalStateException("Connection " + this + " has been poisoned", maybePoison); - } - - if (queue.isEmpty()) { - // The queue is becoming non-empty, schedule a timer. - scheduleTimer(entry.getEnqueuedTicks() + REQUEST_TIMEOUT_NANOS - now); + if (delay >= DEBUG_DELAY_NANOS) { + if (delay > MAX_DELAY_NANOS) { + LOG.info("Capping {} throttle delay from {} to {} seconds", this, + TimeUnit.NANOSECONDS.toSeconds(delay), MAX_DELAY_SECONDS, new Throwable()); + delay = MAX_DELAY_NANOS; + } + if (LOG.isDebugEnabled()) { + LOG.debug("{}: Sleeping for {}ms on connection {}", context.persistenceId(), + TimeUnit.NANOSECONDS.toMillis(delay), this); + } } - return queue.enqueue(entry, now); - } finally { - lock.unlock(); + TimeUnit.NANOSECONDS.sleep(delay); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LOG.debug("Interrupted after sleeping {}ns", e, currentTime() - now); } } @@ -213,11 +249,11 @@ public abstract class AbstractClientConnection { @GuardedBy("lock") private void scheduleTimer(final long delay) { if (haveTimer) { - LOG.debug("{}: timer already scheduled", context.persistenceId()); + LOG.debug("{}: timer already scheduled on {}", context.persistenceId(), this); return; } if (queue.hasSuccessor()) { - LOG.debug("{}: connection has successor, not scheduling timer", context.persistenceId()); + LOG.debug("{}: connection {} has a successor, not scheduling timer", context.persistenceId(), this); return; } @@ -226,7 +262,7 @@ public abstract class AbstractClientConnection { final long normalized = delay <= 0 ? 0 : Math.min(delay, BACKEND_ALIVE_TIMEOUT_NANOS); final FiniteDuration dur = FiniteDuration.fromNanos(normalized); - LOG.debug("{}: scheduling timeout in {}", context.persistenceId(), dur); + LOG.debug("{}: connection {} scheduling timeout in {}", context.persistenceId(), this, dur); context.executeInActor(this::runTimer, dur); haveTimer = true; } @@ -246,6 +282,9 @@ public abstract class AbstractClientConnection { try { haveTimer = false; final long now = currentTime(); + + LOG.debug("{}: running timer on {}", context.persistenceId(), this); + // The following line is only reliable when queue is not forwarding, but such state should not last long. // FIXME: BUG-8422: this may not be accurate w.r.t. replayed entries final long ticksSinceProgress = queue.ticksStalling(now); @@ -264,6 +303,7 @@ public abstract class AbstractClientConnection { delay = lockedCheckTimeout(now); if (delay == null) { // We have timed out. There is no point in scheduling a timer + LOG.debug("{}: connection {} timed out", context.persistenceId(), this); return lockedReconnect(current, new RuntimeRequestException("Backend connection timed out", new TimeoutException())); } @@ -271,6 +311,8 @@ public abstract class AbstractClientConnection { if (delay.isPresent()) { // If there is new delay, schedule a timer scheduleTimer(delay.get()); + } else { + LOG.debug("{}: not scheduling timeout on {}", context.persistenceId(), this); } } finally { lock.unlock(); @@ -304,13 +346,14 @@ public abstract class AbstractClientConnection { @GuardedBy("lock") private Optional lockedCheckTimeout(final long now) { if (queue.isEmpty()) { + LOG.debug("{}: connection {} is empty", context.persistenceId(), this); return Optional.empty(); } final long backendSilentTicks = backendSilentTicks(now); if (backendSilentTicks >= BACKEND_ALIVE_TIMEOUT_NANOS) { - LOG.debug("Connection {} has not seen activity from backend for {} nanoseconds, timing out", this, - backendSilentTicks); + LOG.debug("{}: Connection {} has not seen activity from backend for {} nanoseconds, timing out", + context.persistenceId(), this, backendSilentTicks); return null; } @@ -323,7 +366,7 @@ public abstract class AbstractClientConnection { tasksTimedOut++; queue.remove(now); - LOG.debug("Connection {} timed out entryt {}", this, head); + LOG.debug("{}: Connection {} timed out entry {}", context.persistenceId(), this, head); head.complete(head.getRequest().toRequestFailure( new RequestTimeoutException("Timed out after " + beenOpen + "ns"))); }