2 * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.openflowplugin.impl.registry.flow;
10 import com.google.common.annotations.VisibleForTesting;
11 import com.google.common.base.Optional;
12 import com.google.common.util.concurrent.CheckedFuture;
13 import com.google.common.util.concurrent.FutureCallback;
14 import com.google.common.util.concurrent.Futures;
15 import com.google.common.util.concurrent.ListenableFuture;
16 import com.romix.scala.collection.concurrent.TrieMap;
17 import java.util.ArrayList;
18 import java.util.Arrays;
19 import java.util.Collection;
20 import java.util.Collections;
21 import java.util.HashSet;
22 import java.util.Iterator;
23 import java.util.List;
25 import java.util.concurrent.ConcurrentMap;
26 import java.util.concurrent.atomic.AtomicInteger;
27 import java.util.function.Consumer;
28 import javax.annotation.concurrent.GuardedBy;
29 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
30 import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
31 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
32 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
33 import org.opendaylight.openflowplugin.api.openflow.registry.flow.DeviceFlowRegistry;
34 import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowDescriptor;
35 import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowRegistryKey;
36 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
37 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
38 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
39 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
40 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
41 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
42 import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
43 import org.slf4j.Logger;
44 import org.slf4j.LoggerFactory;
47 * Created by Martin Bobak <mbobak@cisco.com> on 8.4.2015.
49 public class DeviceFlowRegistryImpl implements DeviceFlowRegistry {
50 private static final Logger LOG = LoggerFactory.getLogger(DeviceFlowRegistryImpl.class);
51 private static final String ALIEN_SYSTEM_FLOW_ID = "#UF$TABLE*";
52 private static final AtomicInteger UNACCOUNTED_FLOWS_COUNTER = new AtomicInteger(0);
54 private final ConcurrentMap<FlowRegistryKey, FlowDescriptor> flowRegistry = new TrieMap<>();
56 private final Collection<FlowRegistryKey> marks = new HashSet<>();
57 private final DataBroker dataBroker;
58 private final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier;
59 private final List<ListenableFuture<List<Optional<FlowCapableNode>>>> lastFillFutures = new ArrayList<>();
61 // Specifies what to do with flow read from datastore
62 private final Consumer<Flow> flowConsumer = flow -> {
63 // Create flow registry key from flow
64 final FlowRegistryKey key = FlowRegistryKeyFactory.create(flow);
66 // Now, we will update the registry, but we will also try to prevent duplicate entries
67 if (!flowRegistry.containsKey(key)) {
68 LOG.trace("Found flow with table ID : {} and flow ID : {}", flow.getTableId(), flow.getId().getValue());
69 final FlowDescriptor descriptor = FlowDescriptorFactory.create(flow.getTableId(), flow.getId());
70 store(key, descriptor);
75 public DeviceFlowRegistryImpl(final DataBroker dataBroker, final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier) {
76 this.dataBroker = dataBroker;
77 this.instanceIdentifier = instanceIdentifier;
81 public ListenableFuture<List<Optional<FlowCapableNode>>> fill() {
82 LOG.debug("Filling flow registry with flows for node: {}", instanceIdentifier);
84 // Prepare path for read transaction
85 // TODO: Read only Tables, and not entire FlowCapableNode (fix Yang model)
86 final InstanceIdentifier<FlowCapableNode> path = instanceIdentifier.augmentation(FlowCapableNode.class);
88 // First, try to fill registry with flows from DS/Configuration
89 CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> configFuture = fillFromDatastore(LogicalDatastoreType.CONFIGURATION, path);
91 // Now, try to fill registry with flows from DS/Operational
92 // in case of cluster fail over, when clients are not using DS/Configuration
93 // for adding flows, but only RPCs
94 CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> operationalFuture = fillFromDatastore(LogicalDatastoreType.OPERATIONAL, path);
96 // And at last, chain and return futures created above.
97 // Also, cache this future, so call to DeviceFlowRegistry.close() will be able
98 // to cancel this future immediately if it will be still in progress
99 final ListenableFuture<List<Optional<FlowCapableNode>>> lastFillFuture = Futures.allAsList(Arrays.asList(configFuture, operationalFuture));
100 lastFillFutures.add(lastFillFuture);
101 return lastFillFuture;
104 private CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> fillFromDatastore(final LogicalDatastoreType logicalDatastoreType, final InstanceIdentifier<FlowCapableNode> path) {
105 // Create new read-only transaction
106 final ReadOnlyTransaction transaction = dataBroker.newReadOnlyTransaction();
108 // Bail out early if transaction is null
109 if (transaction == null) {
110 return Futures.immediateFailedCheckedFuture(
111 new ReadFailedException("Read transaction is null"));
114 // Prepare read operation from datastore for path
115 final CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> future =
116 transaction.read(logicalDatastoreType, path);
118 // Bail out early if future is null
119 if (future == null) {
120 return Futures.immediateFailedCheckedFuture(
121 new ReadFailedException("Future from read transaction is null"));
124 Futures.addCallback(future, new FutureCallback<Optional<FlowCapableNode>>() {
126 public void onSuccess(Optional<FlowCapableNode> result) {
127 result.asSet().stream()
128 .flatMap(flowCapableNode -> flowCapableNode.getTable().stream())
129 .flatMap(table -> table.getFlow().stream())
130 .forEach(flowConsumer);
132 // After we are done with reading from datastore, close the transaction
137 public void onFailure(Throwable t) {
138 // Even when read operation failed, close the transaction
147 public FlowDescriptor retrieveIdForFlow(final FlowRegistryKey flowRegistryKey) {
148 LOG.trace("Retrieving flowDescriptor for flow hash: {}", flowRegistryKey.hashCode());
150 // Get FlowDescriptor from flow registry
151 return flowRegistry.get(flowRegistryKey);
155 public void store(final FlowRegistryKey flowRegistryKey, final FlowDescriptor flowDescriptor) {
156 LOG.trace("Storing flowDescriptor with table ID : {} and flow ID : {} for flow hash : {}", flowDescriptor.getTableKey().getId(), flowDescriptor.getFlowId().getValue(), flowRegistryKey.hashCode());
157 flowRegistry.put(flowRegistryKey, flowDescriptor);
161 public FlowId storeIfNecessary(final FlowRegistryKey flowRegistryKey) {
162 LOG.trace("Trying to retrieve flowDescriptor for flow hash: {}", flowRegistryKey.hashCode());
164 // First, try to get FlowDescriptor from flow registry
165 FlowDescriptor flowDescriptor = flowRegistry.get(flowRegistryKey);
167 // We was not able to retrieve FlowDescriptor, so we will at least try to generate it
168 if (flowDescriptor == null) {
169 final short tableId = flowRegistryKey.getTableId();
170 final FlowId alienFlowId = createAlienFlowId(tableId);
171 flowDescriptor = FlowDescriptorFactory.create(tableId, alienFlowId);
173 // Finally we got flowDescriptor, so now we will store it to registry,
174 // so next time we won't need to generate it again
175 store(flowRegistryKey, flowDescriptor);
178 return flowDescriptor.getFlowId();
182 public void markToBeremoved(final FlowRegistryKey flowRegistryKey) {
183 synchronized (marks) {
184 marks.add(flowRegistryKey);
187 LOG.trace("Flow hash {} was marked for removal.", flowRegistryKey.hashCode());
191 public void removeMarked() {
192 synchronized (marks) {
193 for (FlowRegistryKey flowRegistryKey : marks) {
194 LOG.trace("Removing flowDescriptor for flow hash : {}", flowRegistryKey.hashCode());
195 flowRegistry.remove(flowRegistryKey);
203 public Map<FlowRegistryKey, FlowDescriptor> getAllFlowDescriptors() {
204 return Collections.unmodifiableMap(flowRegistry);
208 public void close() {
209 final Iterator<ListenableFuture<List<Optional<FlowCapableNode>>>> iterator = lastFillFutures.iterator();
211 while(iterator.hasNext()) {
212 final ListenableFuture<List<Optional<FlowCapableNode>>> next = iterator.next();
213 boolean success = next.cancel(true);
214 LOG.trace("Cancelling filling flow registry with flows job {} with result: {}", next, success);
218 flowRegistry.clear();
223 static FlowId createAlienFlowId(final short tableId) {
224 final String alienId = ALIEN_SYSTEM_FLOW_ID + tableId + '-' + UNACCOUNTED_FLOWS_COUNTER.incrementAndGet();
225 return new FlowId(alienId);