+ lock.lock();
+
+ final List<ConnectionEntry> poisonEntries;
+ final NoProgressException poisonCause;
+ try {
+ haveTimer = false;
+ final long now = currentTime();
+
+ LOG.debug("{}: running timer on {}", context.persistenceId(), this);
+
+ // The following line is only reliable when queue is not forwarding, but such state should not last long.
+ // FIXME: BUG-8422: this may not be accurate w.r.t. replayed entries
+ final long ticksSinceProgress = queue.ticksStalling(now);
+ if (ticksSinceProgress < context.config().getNoProgressTimeout()) {
+ // Requests are always scheduled in sequence, hence checking for timeout is relatively straightforward.
+ // Note we use also inquire about the delay, so we can re-schedule if needed, hence the unusual
+ // tri-state return convention.
+ final Optional<Long> delay = lockedCheckTimeout(now);
+ if (delay == null) {
+ // We have timed out. There is no point in scheduling a timer
+ LOG.debug("{}: connection {} timed out", context.persistenceId(), this);
+ return lockedReconnect(current, new RuntimeRequestException("Backend connection timed out",
+ new TimeoutException()));
+ }
+
+ if (delay.isPresent()) {
+ // If there is new delay, schedule a timer
+ scheduleTimer(delay.get());
+ } else {
+ LOG.debug("{}: not scheduling timeout on {}", context.persistenceId(), this);
+ }