2 * Copyright (c) 2015 Pantheon Technologies s.r.o. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.openflowjava.protocol.impl.core.connection;
10 import com.google.common.base.Preconditions;
11 import com.google.common.base.Verify;
12 import com.google.common.util.concurrent.FutureCallback;
13 import io.netty.channel.Channel;
14 import java.util.ArrayList;
15 import java.util.Iterator;
16 import java.util.List;
17 import java.util.concurrent.atomic.AtomicLongFieldUpdater;
18 import javax.annotation.Nonnull;
19 import javax.annotation.concurrent.GuardedBy;
20 import org.opendaylight.openflowjava.protocol.api.connection.OutboundQueue;
21 import org.opendaylight.openflowjava.protocol.api.connection.OutboundQueueException;
22 import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.OfHeader;
23 import org.slf4j.Logger;
24 import org.slf4j.LoggerFactory;
26 final class StackedOutboundQueue implements OutboundQueue {
27 private static final Logger LOG = LoggerFactory.getLogger(StackedOutboundQueue.class);
28 private static final AtomicLongFieldUpdater<StackedOutboundQueue> BARRIER_XID_UPDATER = AtomicLongFieldUpdater.newUpdater(StackedOutboundQueue.class, "barrierXid");
29 private static final AtomicLongFieldUpdater<StackedOutboundQueue> LAST_XID_UPDATER = AtomicLongFieldUpdater.newUpdater(StackedOutboundQueue.class, "lastXid");
31 @GuardedBy("unflushedSegments")
32 private volatile StackedSegment firstSegment;
33 @GuardedBy("unflushedSegments")
34 private final List<StackedSegment> unflushedSegments = new ArrayList<>(2);
35 @GuardedBy("unflushedSegments")
36 private final List<StackedSegment> uncompletedSegments = new ArrayList<>(2);
37 private final OutboundQueueManager<?> manager;
39 private volatile long allocatedXid = -1;
40 private volatile long barrierXid = -1;
41 private volatile long lastXid = -1;
43 @GuardedBy("unflushedSegments")
44 private Integer shutdownOffset;
46 // Accessed from Netty only
47 private int flushOffset;
49 StackedOutboundQueue(final OutboundQueueManager<?> manager) {
50 this.manager = Preconditions.checkNotNull(manager);
51 firstSegment = StackedSegment.create(0L);
52 uncompletedSegments.add(firstSegment);
53 unflushedSegments.add(firstSegment);
56 @GuardedBy("unflushedSegments")
57 private void ensureSegment(final StackedSegment first, final int offset) {
58 final int segmentOffset = offset / StackedSegment.SEGMENT_SIZE;
59 LOG.debug("Queue {} slow offset {} maps to {} segments {}", this, offset, segmentOffset, unflushedSegments.size());
61 for (int i = unflushedSegments.size(); i <= segmentOffset; ++i) {
62 final StackedSegment newSegment = StackedSegment.create(first.getBaseXid() + (StackedSegment.SEGMENT_SIZE * i));
63 LOG.debug("Adding segment {}", newSegment);
64 unflushedSegments.add(newSegment);
67 allocatedXid = uncompletedSegments.get(uncompletedSegments.size() - 1).getEndXid();
71 * This method is expected to be called from multiple threads concurrently.
74 public Long reserveEntry() {
75 final long xid = LAST_XID_UPDATER.incrementAndGet(this);
76 final StackedSegment fastSegment = firstSegment;
78 if (xid >= fastSegment.getBaseXid() + StackedSegment.SEGMENT_SIZE) {
79 if (xid >= allocatedXid) {
80 // Multiple segments, this a slow path
81 LOG.debug("Queue {} falling back to slow reservation for XID {}", this, xid);
83 synchronized (unflushedSegments) {
84 LOG.debug("Queue {} executing slow reservation for XID {}", this, xid);
86 // Shutdown was scheduled, need to fail the reservation
87 if (shutdownOffset != null) {
88 LOG.debug("Queue {} is being shutdown, failing reservation", this);
92 // Ensure we have the appropriate segment for the specified XID
93 final StackedSegment slowSegment = firstSegment;
94 final int slowOffset = (int) (xid - slowSegment.getBaseXid());
95 Verify.verify(slowOffset >= 0);
97 // Now, we let's see if we need to allocate a new segment
98 ensureSegment(slowSegment, slowOffset);
100 LOG.debug("Queue {} slow reservation finished", this);
103 LOG.debug("Queue {} XID {} is already backed", this, xid);
107 LOG.trace("Queue {} allocated XID {}", this, xid);
112 * This method is expected to be called from multiple threads concurrently
115 public void commitEntry(final Long xid, final OfHeader message, final FutureCallback<OfHeader> callback) {
116 final StackedSegment fastSegment = firstSegment;
117 final long calcOffset = xid - fastSegment.getBaseXid();
118 Preconditions.checkArgument(calcOffset >= 0, "Commit of XID %s does not match up with base XID %s", xid, fastSegment.getBaseXid());
120 Verify.verify(calcOffset <= Integer.MAX_VALUE);
121 final int fastOffset = (int) calcOffset;
123 final OutboundQueueEntry entry;
124 if (fastOffset >= StackedSegment.SEGMENT_SIZE) {
125 LOG.debug("Queue {} falling back to slow commit of XID {} at offset {}", this, xid, fastOffset);
127 final StackedSegment segment;
128 final int slowOffset;
129 synchronized (unflushedSegments) {
130 final StackedSegment slowSegment = firstSegment;
131 final long slowCalcOffset = xid - slowSegment.getBaseXid();
132 Verify.verify(slowCalcOffset >= 0 && slowCalcOffset <= Integer.MAX_VALUE);
133 slowOffset = (int) slowCalcOffset;
135 LOG.debug("Queue {} recalculated offset of XID {} to {}", this, xid, slowOffset);
136 segment = unflushedSegments.get(slowOffset / StackedSegment.SEGMENT_SIZE);
139 final int segOffset = slowOffset % StackedSegment.SEGMENT_SIZE;
140 entry = segment.getEntry(segOffset);
141 LOG.debug("Queue {} slow commit of XID {} completed at offset {} (segment {} offset {})", this, xid, slowOffset, segment, segOffset);
143 entry = fastSegment.getEntry(fastOffset);
146 entry.commit(message, callback);
147 if (entry.isBarrier()) {
150 final long prev = BARRIER_XID_UPDATER.getAndSet(this, my);
152 LOG.debug("Queue {} recorded pending barrier XID {}", this, my);
156 // We have traveled back, recover
157 LOG.debug("Queue {} retry pending barrier {} >= {}", this, prev, my);
162 LOG.trace("Queue {} committed XID {}", this, xid);
163 manager.ensureFlushing();
167 * Write some entries from the queue to the channel. Guaranteed to run
168 * in the corresponding EventLoop.
170 * @param channel Channel onto which we are writing
172 * @return Number of entries written out
174 int writeEntries(@Nonnull final Channel channel, final long now) {
176 StackedSegment segment = firstSegment;
179 while (channel.isWritable()) {
180 final OutboundQueueEntry entry = segment.getEntry(flushOffset);
181 if (!entry.isCommitted()) {
182 LOG.debug("Queue {} XID {} segment {} offset {} not committed yet", this, segment.getBaseXid() + flushOffset, segment, flushOffset);
186 LOG.trace("Queue {} flushing entry at offset {}", this, flushOffset);
187 final OfHeader message = entry.takeMessage();
191 if (message != null) {
192 manager.writeMessage(message, now);
194 entry.complete(null);
197 if (flushOffset >= StackedSegment.SEGMENT_SIZE) {
199 * Slow path: purge the current segment unless it's the last one.
200 * If it is, we leave it for replacement when a new reservation
203 * This costs us two slow paths, but hey, this should be very rare,
204 * so let's keep things simple.
206 synchronized (unflushedSegments) {
207 LOG.debug("Flush offset {} unflushed segments {}", flushOffset, unflushedSegments.size());
209 // We may have raced ahead of reservation code and need to allocate a segment
210 ensureSegment(segment, flushOffset);
212 // Remove the segment, update the firstSegment and reset flushOffset
213 final StackedSegment oldSegment = unflushedSegments.remove(0);
214 if (oldSegment.isComplete()) {
215 uncompletedSegments.remove(oldSegment);
216 oldSegment.recycle();
219 // Reset the first segment and add it to the uncompleted list
220 segment = unflushedSegments.get(0);
221 uncompletedSegments.add(segment);
223 // Update the shutdown offset
224 if (shutdownOffset != null) {
225 shutdownOffset -= StackedSegment.SEGMENT_SIZE;
228 // Allow reservations back on the fast path by publishing the new first segment
229 firstSegment = segment;
232 LOG.debug("Queue {} flush moved to segment {}", this, segment);
240 Long reserveBarrierIfNeeded() {
241 final long bXid = barrierXid;
242 final long fXid = firstSegment.getBaseXid() + flushOffset;
244 LOG.debug("Barrier found at XID {} (currently at {})", bXid, fXid);
247 return reserveEntry();
251 boolean pairRequest(final OfHeader message) {
252 Iterator<StackedSegment> it = uncompletedSegments.iterator();
253 while (it.hasNext()) {
254 final StackedSegment queue = it.next();
255 final OutboundQueueEntry entry = queue.pairRequest(message);
260 LOG.trace("Queue {} accepted response {}", queue, message);
262 // This has been a barrier request, we need to flush all
264 if (entry.isBarrier() && uncompletedSegments.size() > 1) {
265 LOG.trace("Queue {} indicated request was a barrier", queue);
267 it = uncompletedSegments.iterator();
268 while (it.hasNext()) {
269 final StackedSegment q = it.next();
271 // We want to complete all queues before the current one, we will
272 // complete the current queue below
273 if (!queue.equals(q)) {
274 LOG.trace("Queue {} is implied finished", q);
284 if (queue.isComplete()) {
285 LOG.trace("Queue {} is finished", queue);
293 LOG.debug("Failed to find completion for message {}", message);
297 long startShutdown(final Channel channel) {
299 * We are dealing with a multi-threaded shutdown, as the user may still
300 * be reserving entries in the queue. We are executing in a netty thread,
301 * so neither flush nor barrier can be running, which is good news.
303 * We will eat up all the slots in the queue here and mark the offset first
304 * reserved offset and free up all the cached queues. We then schedule
305 * the flush task, which will deal with the rest of the shutdown process.
307 synchronized (unflushedSegments) {
308 // Increment the offset by the segment size, preventing fast path allocations,
309 // since we are holding the slow path lock, any reservations will see the queue
310 // in shutdown and fail accordingly.
311 final long xid = LAST_XID_UPDATER.addAndGet(this, StackedSegment.SEGMENT_SIZE);
312 shutdownOffset = (int) (xid - firstSegment.getBaseXid() - StackedSegment.SEGMENT_SIZE);
314 return lockedShutdownFlush();
318 @GuardedBy("unflushedSegments")
319 private long lockedShutdownFlush() {
323 final Iterator<StackedSegment> it = uncompletedSegments.iterator();
324 while (it.hasNext()) {
325 final StackedSegment segment = it.next();
327 entries += segment.failAll(OutboundQueueException.DEVICE_DISCONNECTED);
328 if (segment.isComplete()) {
329 LOG.trace("Cleared segment {}", segment);
337 boolean finishShutdown() {
338 synchronized (unflushedSegments) {
339 lockedShutdownFlush();
342 return !needsFlush();
345 boolean needsFlush() {
346 // flushOffset always points to the first entry, which can be changed only
347 // from Netty, so we are fine here.
348 if (firstSegment.getBaseXid() + flushOffset > lastXid) {
352 if (shutdownOffset != null && flushOffset >= shutdownOffset) {
356 return firstSegment.getEntry(flushOffset).isCommitted();