/** * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ package org.opendaylight.openflowplugin.openflow.md.queue; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.opendaylight.openflowplugin.api.openflow.md.core.TranslatorKey; import org.opendaylight.openflowplugin.api.openflow.md.queue.HarvesterHandle; import org.opendaylight.openflowplugin.api.openflow.md.queue.PopListener; import org.opendaylight.openflowplugin.api.openflow.md.queue.QueueItem; import org.opendaylight.openflowplugin.api.openflow.md.queue.QueueKeeper; import org.opendaylight.openflowplugin.api.openflow.md.queue.QueueProcessor; import org.opendaylight.openflowplugin.api.statistics.MessageSpy; import org.opendaylight.openflowplugin.api.statistics.MessageSpy.STATISTIC_GROUP; import org.opendaylight.openflowplugin.api.openflow.md.core.IMDMessageTranslator; import org.opendaylight.openflowplugin.openflow.md.core.ThreadPoolLoggingExecutor; import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.OfHeader; import org.opendaylight.yangtools.yang.binding.DataContainer; import org.opendaylight.yangtools.yang.binding.DataObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * {@link org.opendaylight.openflowplugin.api.openflow.md.queue.QueueKeeper} implementation focused to keep order and use up mutiple threads for translation phase. *
* There is internal thread pool of limited size ({@link QueueProcessorLightImpl#setProcessingPoolSize(int)}) * dedicated to translation. Then there is singleThreadPool dedicated to publishing (via popListeners) *
* Workflow: *
    *
  1. upon message push ticket is created and enqueued
  2. *
  3. available threads from internal pool translate the massage wrapped in ticket
  4. *
  5. when translation of particular message is finished, result is set in future result of wrapping ticket
    * (order of tickets in queue is not touched during translate) *
  6. *
  7. at the end of queue there is {@link TicketFinisher} running in singleThreadPool and for each ticket it does: *
      *
    1. invoke blocking {@link BlockingQueue#take()} method in order to get the oldest ticket
    2. *
    3. invoke blocking {@link Future#get()} on the dequeued ticket
    4. *
    5. as soon as the result of translation is available, appropriate popListener is invoked
    6. *
    * and this way the order of messages is preserved and also multiple threads are used by translating *
  8. *
* * */ public class QueueProcessorLightImpl implements QueueProcessor { protected static final Logger LOG = LoggerFactory .getLogger(QueueProcessorLightImpl.class); private BlockingQueue> ticketQueue; private ThreadPoolExecutor processorPool; private int processingPoolSize = 4; private ExecutorService harvesterPool; private ExecutorService finisherPool; protected Map, Collection>> popListenersMapping; private Map>>> translatorMapping; private TicketProcessorFactory ticketProcessorFactory; private MessageSpy messageSpy; protected Collection> messageSources; private QueueKeeperHarvester harvester; protected TicketFinisher finisher; /** * prepare queue */ public void init() { int ticketQueueCapacity = 1500; ticketQueue = new ArrayBlockingQueue<>(ticketQueueCapacity); /* * TODO FIXME - DOES THIS REALLY NEED TO BE CONCURRENT? Can we figure out * a better lifecycle? Why does this have to be a Set? */ messageSources = new CopyOnWriteArraySet<>(); processorPool = new ThreadPoolLoggingExecutor(processingPoolSize, processingPoolSize, 0, TimeUnit.MILLISECONDS, new ArrayBlockingQueue(ticketQueueCapacity), "OFmsgProcessor"); // force blocking when pool queue is full processorPool.setRejectedExecutionHandler(new RejectedExecutionHandler() { @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { try { executor.getQueue().put(r); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IllegalStateException(e); } } }); harvesterPool = new ThreadPoolLoggingExecutor(1, 1, 0, TimeUnit.MILLISECONDS, new ArrayBlockingQueue(1), "OFmsgHarvester"); finisherPool = new ThreadPoolLoggingExecutor(1, 1, 0, TimeUnit.MILLISECONDS, new ArrayBlockingQueue(1), "OFmsgFinisher"); finisher = new TicketFinisherImpl( ticketQueue, popListenersMapping); finisherPool.execute(finisher); harvester = new QueueKeeperHarvester(this, messageSources); harvesterPool.execute(harvester); ticketProcessorFactory = new TicketProcessorFactoryImpl(); ticketProcessorFactory.setTranslatorMapping(translatorMapping); ticketProcessorFactory.setSpy(messageSpy); ticketProcessorFactory.setTicketFinisher(finisher); } /** * stop processing queue */ public void shutdown() { processorPool.shutdown(); } @Override public void enqueueQueueItem(QueueItem queueItem) { messageSpy.spyMessage(queueItem.getMessage(), STATISTIC_GROUP.FROM_SWITCH_ENQUEUED); TicketImpl ticket = new TicketImpl<>(); ticket.setConductor(queueItem.getConnectionConductor()); ticket.setMessage(queueItem.getMessage()); ticket.setQueueType(queueItem.getQueueType()); LOG.trace("ticket scheduling: {}, ticket: {}", queueItem.getMessage().getImplementedInterface().getSimpleName(), System.identityHashCode(queueItem)); scheduleTicket(ticket); } @Override public void directProcessQueueItem(QueueItem queueItem) { messageSpy.spyMessage(queueItem.getMessage(), STATISTIC_GROUP.FROM_SWITCH_ENQUEUED); TicketImpl ticket = new TicketImpl<>(); ticket.setConductor(queueItem.getConnectionConductor()); ticket.setMessage(queueItem.getMessage()); LOG.debug("ticket scheduling: {}, ticket: {}", queueItem.getMessage().getImplementedInterface().getSimpleName(), System.identityHashCode(queueItem)); ticketProcessorFactory.createProcessor(ticket).run(); // publish notification finisher.firePopNotification(ticket.getDirectResult()); } /** * @param ticket */ private void scheduleTicket(Ticket ticket) { switch (ticket.getQueueType()) { case DEFAULT: Runnable ticketProcessor = ticketProcessorFactory.createProcessor(ticket); processorPool.execute(ticketProcessor); try { ticketQueue.put(ticket); } catch (InterruptedException e) { LOG.warn("enqeueue of unordered message ticket failed", e); } break; case UNORDERED: Runnable ticketProcessorSync = ticketProcessorFactory.createSyncProcessor(ticket); processorPool.execute(ticketProcessorSync); break; default: LOG.warn("unsupported enqueue type: {}", ticket.getQueueType()); } } /** * @param poolSize the poolSize to set */ public void setProcessingPoolSize(int poolSize) { this.processingPoolSize = poolSize; } @Override public void setTranslatorMapping( Map>>> translatorMapping) { this.translatorMapping = translatorMapping; } @Override public void setPopListenersMapping( Map, Collection>> popListenersMapping) { this.popListenersMapping = popListenersMapping; } /** * @param messageSpy the messageSpy to set */ public void setMessageSpy(MessageSpy messageSpy) { this.messageSpy = messageSpy; } @Override public AutoCloseable registerMessageSource(QueueKeeper queue) { boolean added = messageSources.add(queue); if (! added) { LOG.debug("registration of message source queue failed - already registered"); } MessageSourcePollRegistration> queuePollRegistration = new MessageSourcePollRegistration<>(this, queue); return queuePollRegistration; } @Override public boolean unregisterMessageSource(QueueKeeper queue) { return messageSources.remove(queue); } @Override public Collection> getMessageSources() { return messageSources; } @Override public HarvesterHandle getHarvesterHandle() { return harvester; } }