+ // Distributes FRM programming in the cluster
+ private IConnectionManager connectionManager;
+
+ /*
+ * Name clustered caches used to support FRM entry distribution these are by
+ * necessity non-transactional as long as need to be able to synchronize
+ * states also while a transaction is in progress
+ */
+ static final String WORKORDERCACHE = "frm.workOrder";
+ static final String WORKSTATUSCACHE = "frm.workStatus";
+
+ /*
+ * Data structure responsible for distributing the FlowEntryInstall requests
+ * in the cluster. The key value is entry that is being either Installed or
+ * Updated or Delete. The value field is the same of the key value in case
+ * of Installation or Deletion, it's the new entry in case of Modification,
+ * this because the clustering caches don't allow null values.
+ *
+ * The logic behind this data structure is that the controller that initiate
+ * the request will place the order here, someone will pick it and then will
+ * remove from this data structure because is being served.
+ *
+ * TODO: We need to have a way to cleanup this data structure if entries are
+ * not picked by anyone, which is always a case can happen especially on
+ * Node disconnect cases.
+ */
+ private ConcurrentMap<FlowEntryDistributionOrder, FlowEntryInstall> workOrder;
+
+ /*
+ * Data structure responsible for retrieving the results of the workOrder
+ * submitted to the cluster.
+ *
+ * The logic behind this data structure is that the controller that has
+ * executed the order will then place the result in workStatus signaling
+ * that there was a success or a failure.
+ *
+ * TODO: The workStatus entries need to have a lifetime associated in case
+ * of requestor controller leaving the cluster.
+ */
+ private ConcurrentMap<FlowEntryDistributionOrder, Status> workStatus;
+
+ /*
+ * Local Map used to hold the Future which a caller can use to monitor for
+ * completion
+ */
+ private ConcurrentMap<FlowEntryDistributionOrder, FlowEntryDistributionOrderFutureTask> workMonitor =
+ new ConcurrentHashMap<FlowEntryDistributionOrder, FlowEntryDistributionOrderFutureTask>();
+
+ /*
+ * Create an executor pool to create the distributionOrder, this is a stop
+ * gap solution caused by an issue with non-transactional caches in the
+ * implementation we use, being currently worked on. It has been noticed in
+ * fact that when non-transactional caches are being used sometime the key
+ * are no distributed to all the nodes properly. To workaround the issue
+ * transactional caches are being used, but there was a reason for using
+ * non-transactional caches to start with, in fact we needed to be able in
+ * the context of a northbound transaction to program the FRM entries
+ * irrespective of the fact that transaction would commit or no else we
+ * would not be able to achieve the entry programming and implement the
+ * scheme for recovery from network element failures. Bottom line, now in
+ * order to make sure an update on a transactional cache goes out while in a
+ * transaction that need to be initiated by a different thread.
+ */
+ private ExecutorService executor;
+
+ class DistributeOrderCallable implements Callable<Future<Status>> {
+ private FlowEntryInstall e;
+ private FlowEntryInstall u;
+ private UpdateType t;
+ DistributeOrderCallable(FlowEntryInstall e, FlowEntryInstall u, UpdateType t) {
+ this.e = e;
+ this.u = u;
+ this.t = t;
+ }
+
+ @Override
+ public Future<Status> call() throws Exception {
+ if (e == null || t == null) {
+ logsync.error("Unexpected null Entry up update type");
+ return null;
+ }
+ // Create the work order and distribute it
+ FlowEntryDistributionOrder fe =
+ new FlowEntryDistributionOrder(e, t, clusterContainerService.getMyAddress());
+ // First create the monitor job
+ FlowEntryDistributionOrderFutureTask ret = new FlowEntryDistributionOrderFutureTask(fe);
+ logsync.trace("Node {} not local so sending fe {}", e.getNode(), fe);
+ workMonitor.put(fe, ret);
+ if (t.equals(UpdateType.CHANGED)) {
+ // Then distribute the work
+ workOrder.put(fe, u);
+ } else {
+ // Then distribute the work
+ workOrder.put(fe, e);
+ }
+ logsync.trace("WorkOrder requested");
+ // Now create an Handle to monitor the execution of the operation
+ return ret;
+ }
+ }
+
+ /**
+ * @param e
+ * Entry being installed/updated/removed
+ * @param u
+ * New entry will be placed after the update operation. Valid
+ * only for UpdateType.CHANGED, null for all the other cases
+ * @param t
+ * Type of update
+ * @return a Future object for monitoring the progress of the result, or
+ * null in case the processing should take place locally
+ */
+ private Future<Status> distributeWorkOrder(FlowEntryInstall e, FlowEntryInstall u, UpdateType t) {
+ // A null entry it's an unexpected condition, anyway it's safe to keep
+ // the handling local
+ if (e == null) {
+ return null;
+ }
+
+ Node n = e.getNode();
+ if (!connectionManager.isLocal(n)) {
+ Callable<Future<Status>> worker = new DistributeOrderCallable(e, u, t);
+ if (worker != null) {
+ Future<Future<Status>> workerRes = this.executor.submit(worker);
+ try {
+ return workerRes.get();
+ } catch (InterruptedException e1) {
+ // we where interrupted, not a big deal.
+ return null;
+ } catch (ExecutionException e1) {
+ logsync.error(
+ "We got an execution exception {} we cannot much, so returning we don't have nothing to wait for",
+ e);
+ return null;
+ }
+ }
+ }
+
+ logsync.trace("LOCAL Node {} so processing Entry:{} UpdateType:{}", n, e, t);
+ return null;
+ }
+