package org.opendaylight.controller.cluster.access.client;
import akka.actor.ActorRef;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Verify;
-import com.google.common.collect.Iterables;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.ArrayDeque;
+import java.util.Collection;
+import java.util.Deque;
import java.util.Iterator;
import java.util.Optional;
import java.util.Queue;
private static final Logger LOG = LoggerFactory.getLogger(TransmitQueue.class);
- private final ArrayDeque<TransmittedConnectionEntry> inflight = new ArrayDeque<>();
- private final ArrayDeque<ConnectionEntry> pending = new ArrayDeque<>();
+ private final Deque<TransmittedConnectionEntry> inflight = new ArrayDeque<>();
+ private final Deque<ConnectionEntry> pending = new ArrayDeque<>();
private final ProgressTracker tracker;
private ReconnectForwarder successor;
tracker = new AveragingProgressTracker(targetDepth);
}
- final Iterable<ConnectionEntry> asIterable() {
- return Iterables.concat(inflight, pending);
+ /**
+ * Drain the contents of the connection into a list. This will leave the queue empty and allow further entries
+ * to be added to it during replay. When we set the successor all entries enqueued between when this methods
+ * returns and the successor is set will be replayed to the successor.
+ *
+ * @return Collection of entries present in the queue.
+ */
+ final Collection<ConnectionEntry> drain() {
+ final Collection<ConnectionEntry> ret = new ArrayDeque<>(inflight.size() + pending.size());
+ ret.addAll(inflight);
+ ret.addAll(pending);
+ inflight.clear();
+ pending.clear();
+ return ret;
}
final long ticksStalling(final long now) {
tracker.closeTask(now, entry.getEnqueuedTicks(), entry.getTxTicks(), envelope.getExecutionTimeNanos());
// We have freed up a slot, try to transmit something
- int toSend = canTransmitCount(inflight.size());
- while (toSend > 0) {
+ tryTransmit(now);
+
+ return Optional.of(entry);
+ }
+
+ final void tryTransmit(final long now) {
+ final int toSend = canTransmitCount(inflight.size());
+ if (toSend > 0 && !pending.isEmpty()) {
+ transmitEntries(toSend, now);
+ }
+ }
+
+ private void transmitEntries(final int maxTransmit, final long now) {
+ for (int i = 0; i < maxTransmit; ++i) {
final ConnectionEntry e = pending.poll();
if (e == null) {
- break;
+ LOG.debug("Queue {} transmitted {} requests", this, i);
+ return;
}
- LOG.debug("Transmitting entry {}", e);
- transmit(e, now);
- toSend--;
+ transmitEntry(e, now);
}
- return Optional.of(entry);
+ LOG.debug("Queue {} transmitted {} requests", this, maxTransmit);
+ }
+
+ private void transmitEntry(final ConnectionEntry entry, final long now) {
+ LOG.debug("Queue {} transmitting entry {}", this, entry);
+ // We are not thread-safe and are supposed to be externally-guarded,
+ // hence send-before-record should be fine.
+ // This needs to be revisited if the external guards are lowered.
+ inflight.addLast(transmit(entry, now));
}
/**
*/
final long enqueue(final ConnectionEntry entry, final long now) {
if (successor != null) {
+ // This call will pay the enqueuing price, hence the caller does not have to
successor.forwardEntry(entry, now);
return 0;
}
+ // XXX: we should place a guard against incorrect entry sequences:
+ // entry.getEnqueueTicks() should have non-negative difference from the last entry present in the queues
+
// Reserve an entry before we do anything that can fail
final long delay = tracker.openTask(now);
- if (canTransmitCount(inflight.size()) <= 0) {
+
+ /*
+ * This is defensive to make sure we do not do the wrong thing here and reorder messages if we ever happen
+ * to have available send slots and non-empty pending queue.
+ */
+ final int toSend = canTransmitCount(inflight.size());
+ if (toSend <= 0) {
LOG.trace("Queue is at capacity, delayed sending of request {}", entry.getRequest());
- pending.add(entry);
- } else {
- // We are not thread-safe and are supposed to be externally-guarded,
- // hence send-before-record should be fine.
- // This needs to be revisited if the external guards are lowered.
- inflight.offer(transmit(entry, now));
- LOG.debug("Sent request {} on queue {}", entry.getRequest(), this);
+ pending.addLast(entry);
+ return delay;
}
+
+ if (pending.isEmpty()) {
+ transmitEntry(entry, now);
+ return delay;
+ }
+
+ pending.addLast(entry);
+ transmitEntries(toSend, now);
return delay;
}
}
final void setForwarder(final ReconnectForwarder forwarder, final long now) {
- Verify.verify(successor == null, "Successor {} already set on connection {}", successor, this);
+ Verify.verify(successor == null, "Successor %s already set on connection %s", successor, this);
successor = Preconditions.checkNotNull(forwarder);
LOG.debug("Connection {} superseded by {}, splicing queue", this, successor);
+ /*
+ * We need to account for entries which have been added between the time drain() was called and this method
+ * is invoked. Since the old connection is visible during replay and some entries may have completed on the
+ * replay thread, there was an avenue for this to happen.
+ */
+ int count = 0;
ConnectionEntry entry = inflight.poll();
while (entry != null) {
- successor.forwardEntry(entry, now);
+ successor.replayEntry(entry, now);
entry = inflight.poll();
+ count++;
}
entry = pending.poll();
while (entry != null) {
- successor.forwardEntry(entry, now);
+ successor.replayEntry(entry, now);
entry = pending.poll();
+ count++;
}
+
+ LOG.debug("Connection {} queue spliced {} messages", this, count);
+ }
+
+ final void remove(final long now) {
+ final TransmittedConnectionEntry txe = inflight.poll();
+ if (txe == null) {
+ final ConnectionEntry entry = pending.pop();
+ tracker.closeTask(now, entry.getEnqueuedTicks(), 0, 0);
+ } else {
+ tracker.closeTask(now, txe.getEnqueuedTicks(), txe.getTxTicks(), 0);
+ }
+ }
+
+ @VisibleForTesting
+ Deque<TransmittedConnectionEntry> getInflight() {
+ return inflight;
+ }
+
+ @VisibleForTesting
+ Deque<ConnectionEntry> getPending() {
+ return pending;
}
/*
}
queue.clear();
}
-
}