2 * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.openflowjava.protocol.impl.core.connection;
11 import com.google.common.base.Preconditions;
12 import com.google.common.base.Verify;
13 import io.netty.channel.Channel;
14 import java.util.ArrayList;
15 import java.util.Iterator;
16 import java.util.List;
17 import java.util.concurrent.atomic.AtomicLongFieldUpdater;
18 import javax.annotation.Nonnull;
19 import javax.annotation.concurrent.GuardedBy;
20 import org.opendaylight.openflowjava.protocol.api.connection.OutboundQueue;
21 import org.opendaylight.openflowjava.protocol.api.connection.OutboundQueueException;
22 import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.OfHeader;
23 import org.slf4j.Logger;
24 import org.slf4j.LoggerFactory;
26 abstract class AbstractStackedOutboundQueue implements OutboundQueue {
28 private static final Logger LOG = LoggerFactory.getLogger(AbstractStackedOutboundQueue.class);
30 protected static final AtomicLongFieldUpdater<AbstractStackedOutboundQueue> LAST_XID_OFFSET_UPDATER = AtomicLongFieldUpdater
31 .newUpdater(AbstractStackedOutboundQueue.class, "lastXid");
33 @GuardedBy("unflushedSegments")
34 protected volatile StackedSegment firstSegment;
35 @GuardedBy("unflushedSegments")
36 protected final List<StackedSegment> unflushedSegments = new ArrayList<>(2);
37 @GuardedBy("unflushedSegments")
38 protected final List<StackedSegment> uncompletedSegments = new ArrayList<>(2);
40 private volatile long lastXid = -1;
41 private volatile long allocatedXid = -1;
43 @GuardedBy("unflushedSegments")
44 protected Integer shutdownOffset;
46 // Accessed from Netty only
47 protected int flushOffset;
49 protected final AbstractOutboundQueueManager<?, ?> manager;
51 AbstractStackedOutboundQueue(final AbstractOutboundQueueManager<?, ?> manager) {
52 this.manager = Preconditions.checkNotNull(manager);
53 firstSegment = StackedSegment.create(0L);
54 uncompletedSegments.add(firstSegment);
55 unflushedSegments.add(firstSegment);
58 @GuardedBy("unflushedSegments")
59 protected void ensureSegment(final StackedSegment first, final int offset) {
60 final int segmentOffset = offset / StackedSegment.SEGMENT_SIZE;
61 LOG.debug("Queue {} slow offset {} maps to {} segments {}", this, offset, segmentOffset, unflushedSegments.size());
63 for (int i = unflushedSegments.size(); i <= segmentOffset; ++i) {
64 final StackedSegment newSegment = StackedSegment.create(first.getBaseXid() + (StackedSegment.SEGMENT_SIZE * i));
65 LOG.debug("Adding segment {}", newSegment);
66 unflushedSegments.add(newSegment);
69 allocatedXid = unflushedSegments.get(unflushedSegments.size() - 1).getEndXid();
73 * This method is expected to be called from multiple threads concurrently.
76 public Long reserveEntry() {
77 final long xid = LAST_XID_OFFSET_UPDATER.incrementAndGet(this);
78 final StackedSegment fastSegment = firstSegment;
80 if (xid >= fastSegment.getBaseXid() + StackedSegment.SEGMENT_SIZE) {
81 if (xid >= allocatedXid) {
82 // Multiple segments, this a slow path
83 LOG.debug("Queue {} falling back to slow reservation for XID {}", this, xid);
85 synchronized (unflushedSegments) {
86 LOG.debug("Queue {} executing slow reservation for XID {}", this, xid);
88 // Shutdown was scheduled, need to fail the reservation
89 if (shutdownOffset != null) {
90 LOG.debug("Queue {} is being shutdown, failing reservation", this);
94 // Ensure we have the appropriate segment for the specified XID
95 final StackedSegment slowSegment = firstSegment;
96 final int slowOffset = (int) (xid - slowSegment.getBaseXid());
97 Verify.verify(slowOffset >= 0);
99 // Now, we let's see if we need to allocate a new segment
100 ensureSegment(slowSegment, slowOffset);
102 LOG.debug("Queue {} slow reservation finished", this);
105 LOG.debug("Queue {} XID {} is already backed", this, xid);
109 LOG.trace("Queue {} allocated XID {}", this, xid);
114 * Write some entries from the queue to the channel. Guaranteed to run
115 * in the corresponding EventLoop.
117 * @param channel Channel onto which we are writing
119 * @return Number of entries written out
121 int writeEntries(@Nonnull final Channel channel, final long now) {
123 StackedSegment segment = firstSegment;
126 while (channel.isWritable()) {
127 final OutboundQueueEntry entry = segment.getEntry(flushOffset);
128 if (!entry.isCommitted()) {
129 LOG.debug("Queue {} XID {} segment {} offset {} not committed yet", this, segment.getBaseXid() + flushOffset, segment, flushOffset);
133 LOG.trace("Queue {} flushing entry at offset {}", this, flushOffset);
134 final OfHeader message = entry.takeMessage();
138 if (message != null) {
139 manager.writeMessage(message, now);
141 entry.complete(null);
144 if (flushOffset >= StackedSegment.SEGMENT_SIZE) {
146 * Slow path: purge the current segment unless it's the last one.
147 * If it is, we leave it for replacement when a new reservation
150 * This costs us two slow paths, but hey, this should be very rare,
151 * so let's keep things simple.
153 synchronized (unflushedSegments) {
154 LOG.debug("Flush offset {} unflushed segments {}", flushOffset, unflushedSegments.size());
156 // We may have raced ahead of reservation code and need to allocate a segment
157 ensureSegment(segment, flushOffset);
159 // Remove the segment, update the firstSegment and reset flushOffset
160 final StackedSegment oldSegment = unflushedSegments.remove(0);
161 if (oldSegment.isComplete()) {
162 uncompletedSegments.remove(oldSegment);
163 oldSegment.recycle();
166 // Reset the first segment and add it to the uncompleted list
167 segment = unflushedSegments.get(0);
168 uncompletedSegments.add(segment);
170 // Update the shutdown offset
171 if (shutdownOffset != null) {
172 shutdownOffset -= StackedSegment.SEGMENT_SIZE;
175 // Allow reservations back on the fast path by publishing the new first segment
176 firstSegment = segment;
179 LOG.debug("Queue {} flush moved to segment {}", this, segment);
187 boolean pairRequest(final OfHeader message) {
188 Iterator<StackedSegment> it = uncompletedSegments.iterator();
189 while (it.hasNext()) {
190 final StackedSegment queue = it.next();
191 final OutboundQueueEntry entry = queue.pairRequest(message);
196 LOG.trace("Queue {} accepted response {}", queue, message);
198 // This has been a barrier request, we need to flush all
200 if (entry.isBarrier() && uncompletedSegments.size() > 1) {
201 LOG.trace("Queue {} indicated request was a barrier", queue);
203 it = uncompletedSegments.iterator();
204 while (it.hasNext()) {
205 final StackedSegment q = it.next();
207 // We want to complete all queues before the current one, we will
208 // complete the current queue below
209 if (!queue.equals(q)) {
210 LOG.trace("Queue {} is implied finished", q);
220 if (queue.isComplete()) {
221 LOG.trace("Queue {} is finished", queue);
229 LOG.debug("Failed to find completion for message {}", message);
233 boolean needsFlush() {
234 // flushOffset always points to the first entry, which can be changed only
235 // from Netty, so we are fine here.
236 if (firstSegment.getBaseXid() + flushOffset > lastXid) {
240 if (shutdownOffset != null && flushOffset >= shutdownOffset) {
244 return firstSegment.getEntry(flushOffset).isCommitted();
247 long startShutdown() {
249 * We are dealing with a multi-threaded shutdown, as the user may still
250 * be reserving entries in the queue. We are executing in a netty thread,
251 * so neither flush nor barrier can be running, which is good news.
252 * We will eat up all the slots in the queue here and mark the offset first
253 * reserved offset and free up all the cached queues. We then schedule
254 * the flush task, which will deal with the rest of the shutdown process.
256 synchronized (unflushedSegments) {
257 // Increment the offset by the segment size, preventing fast path allocations,
258 // since we are holding the slow path lock, any reservations will see the queue
259 // in shutdown and fail accordingly.
260 final long xid = LAST_XID_OFFSET_UPDATER.addAndGet(this, StackedSegment.SEGMENT_SIZE);
261 shutdownOffset = (int) (xid - firstSegment.getBaseXid() - StackedSegment.SEGMENT_SIZE);
263 // Fails all uncompleted entries, because they will never be completed due to disconnected channel.
264 return lockedFailSegments(uncompletedSegments.iterator());
269 * Checks if the shutdown is in final phase -> all allowed entries (number of entries < shutdownOffset) are flushed
270 * and fails all not completed entries (if in final phase)
271 * @param channel netty channel
272 * @return true if in final phase, false if a flush is needed
274 boolean finishShutdown(final Channel channel) {
276 synchronized (unflushedSegments) {
277 // Fails all entries, that were flushed in shutdownOffset (became uncompleted)
278 // - they will never be completed due to disconnected channel.
279 lockedFailSegments(uncompletedSegments.iterator());
280 // If no further flush is needed or we are not able to write to channel anymore, then we fail all unflushed
281 // segments, so that each enqueued entry is reported as unsuccessful due to channel disconnection.
282 // No further entries should be enqueued by this time.
283 needsFlush = channel.isWritable() && needsFlush();
285 lockedFailSegments(unflushedSegments.iterator());
291 protected OutboundQueueEntry getEntry(final Long xid) {
292 final StackedSegment fastSegment = firstSegment;
293 final long calcOffset = xid - fastSegment.getBaseXid();
294 Preconditions.checkArgument(calcOffset >= 0, "Commit of XID %s does not match up with base XID %s", xid, fastSegment.getBaseXid());
296 Verify.verify(calcOffset <= Integer.MAX_VALUE);
297 final int fastOffset = (int) calcOffset;
299 if (fastOffset >= StackedSegment.SEGMENT_SIZE) {
300 LOG.debug("Queue {} falling back to slow commit of XID {} at offset {}", this, xid, fastOffset);
302 final StackedSegment segment;
303 final int slowOffset;
304 synchronized (unflushedSegments) {
305 final StackedSegment slowSegment = firstSegment;
306 final long slowCalcOffset = xid - slowSegment.getBaseXid();
307 Verify.verify(slowCalcOffset >= 0 && slowCalcOffset <= Integer.MAX_VALUE);
308 slowOffset = (int) slowCalcOffset;
310 LOG.debug("Queue {} recalculated offset of XID {} to {}", this, xid, slowOffset);
311 segment = unflushedSegments.get(slowOffset / StackedSegment.SEGMENT_SIZE);
314 final int segOffset = slowOffset % StackedSegment.SEGMENT_SIZE;
315 LOG.debug("Queue {} slow commit of XID {} completed at offset {} (segment {} offset {})", this, xid, slowOffset, segment, segOffset);
316 return segment.getEntry(segOffset);
318 return fastSegment.getEntry(fastOffset);
322 * Fails not completed entries in segments and frees completed segments
323 * @param iterator list of segments to be failed
324 * @return number of failed entries
326 @GuardedBy("unflushedSegments")
327 private long lockedFailSegments(Iterator<StackedSegment> iterator) {
331 while (iterator.hasNext()) {
332 final StackedSegment segment = iterator.next();
334 entries += segment.failAll(OutboundQueueException.DEVICE_DISCONNECTED);
335 if (segment.isComplete()) {
336 LOG.trace("Cleared segment {}", segment);