2 * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.openflowplugin.impl.registry.flow;
10 import com.google.common.annotations.VisibleForTesting;
11 import com.google.common.base.Optional;
12 import com.google.common.collect.BiMap;
13 import com.google.common.collect.HashBiMap;
14 import com.google.common.collect.Maps;
15 import com.google.common.util.concurrent.CheckedFuture;
16 import com.google.common.util.concurrent.FutureCallback;
17 import com.google.common.util.concurrent.Futures;
18 import com.google.common.util.concurrent.ListenableFuture;
19 import com.google.common.util.concurrent.MoreExecutors;
20 import java.util.ArrayList;
21 import java.util.Arrays;
22 import java.util.Iterator;
23 import java.util.List;
25 import java.util.Objects;
26 import java.util.concurrent.atomic.AtomicInteger;
27 import java.util.function.Consumer;
28 import javax.annotation.Nonnull;
29 import javax.annotation.concurrent.ThreadSafe;
30 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
31 import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
32 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
33 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
34 import org.opendaylight.openflowplugin.api.openflow.registry.flow.DeviceFlowRegistry;
35 import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowDescriptor;
36 import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowRegistryKey;
37 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
38 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
39 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
40 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
41 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
42 import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowplugin.extension.general.rev140714.GeneralAugMatchNodesNodeTableFlow;
43 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
44 import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
45 import org.slf4j.Logger;
46 import org.slf4j.LoggerFactory;
49 public class DeviceFlowRegistryImpl implements DeviceFlowRegistry {
50 private static final Logger LOG = LoggerFactory.getLogger(DeviceFlowRegistryImpl.class);
51 private static final String ALIEN_SYSTEM_FLOW_ID = "#UF$TABLE*";
52 private static final AtomicInteger UNACCOUNTED_FLOWS_COUNTER = new AtomicInteger(0);
54 private final BiMap<FlowRegistryKey, FlowDescriptor> flowRegistry = Maps.synchronizedBiMap(HashBiMap.create());
55 private final DataBroker dataBroker;
56 private final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier;
57 private final List<ListenableFuture<List<Optional<FlowCapableNode>>>> lastFillFutures = new ArrayList<>();
58 private final Consumer<Flow> flowConsumer;
60 public DeviceFlowRegistryImpl(final short version,
61 final DataBroker dataBroker,
62 final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier) {
63 this.dataBroker = dataBroker;
64 this.instanceIdentifier = instanceIdentifier;
66 // Specifies what to do with flow read from data store
67 flowConsumer = flow -> {
68 final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(version, flow);
70 if (getExistingKey(flowRegistryKey) == null) {
71 // Now, we will update the registry
72 storeDescriptor(flowRegistryKey, FlowDescriptorFactory.create(flow.getTableId(), flow.getId()));
78 public ListenableFuture<List<Optional<FlowCapableNode>>> fill() {
79 if (LOG.isDebugEnabled()) {
80 LOG.debug("Filling flow registry with flows for node: {}", instanceIdentifier.getKey().getId().getValue());
83 // Prepare path for read transaction
84 // TODO: Read only Tables, and not entire FlowCapableNode (fix Yang model)
85 final InstanceIdentifier<FlowCapableNode> path = instanceIdentifier.augmentation(FlowCapableNode.class);
87 // First, try to fill registry with flows from DS/Configuration
88 final CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> configFuture =
89 fillFromDatastore(LogicalDatastoreType.CONFIGURATION, path);
91 // Now, try to fill registry with flows from DS/Operational
92 // in case of cluster fail over, when clients are not using DS/Configuration
93 // for adding flows, but only RPCs
94 final CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> operationalFuture =
95 fillFromDatastore(LogicalDatastoreType.OPERATIONAL, path);
97 // And at last, chain and return futures created above.
98 // Also, cache this future, so call to DeviceFlowRegistry.close() will be able
99 // to cancel this future immediately if it will be still in progress
100 final ListenableFuture<List<Optional<FlowCapableNode>>> lastFillFuture =
101 Futures.allAsList(Arrays.asList(configFuture, operationalFuture));
102 lastFillFutures.add(lastFillFuture);
103 return lastFillFuture;
106 private CheckedFuture<Optional<FlowCapableNode>, ReadFailedException>
107 fillFromDatastore(final LogicalDatastoreType logicalDatastoreType,
108 final InstanceIdentifier<FlowCapableNode> path) {
109 // Create new read-only transaction
110 final ReadOnlyTransaction transaction = dataBroker.newReadOnlyTransaction();
112 // Bail out early if transaction is null
113 if (transaction == null) {
114 return Futures.immediateFailedCheckedFuture(
115 new ReadFailedException("Read transaction is null"));
118 // Prepare read operation from datastore for path
119 final CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> future =
120 transaction.read(logicalDatastoreType, path);
122 // Bail out early if future is null
123 if (future == null) {
124 return Futures.immediateFailedCheckedFuture(
125 new ReadFailedException("Future from read transaction is null"));
128 Futures.addCallback(future, new FutureCallback<Optional<FlowCapableNode>>() {
130 public void onSuccess(@Nonnull Optional<FlowCapableNode> result) {
131 result.asSet().stream()
132 .filter(Objects::nonNull)
133 .filter(flowCapableNode -> Objects.nonNull(flowCapableNode.getTable()))
134 .flatMap(flowCapableNode -> flowCapableNode.getTable().stream())
135 .filter(Objects::nonNull)
136 .filter(table -> Objects.nonNull(table.getFlow()))
137 .flatMap(table -> table.getFlow().stream())
138 .filter(Objects::nonNull)
139 .filter(flow -> Objects.nonNull(flow.getId()))
140 .forEach(flowConsumer);
142 // After we are done with reading from datastore, close the transaction
147 public void onFailure(Throwable throwable) {
148 // Even when read operation failed, close the transaction
151 }, MoreExecutors.directExecutor());
157 public FlowDescriptor retrieveDescriptor(@Nonnull final FlowRegistryKey flowRegistryKey) {
158 if (LOG.isTraceEnabled()) {
159 LOG.trace("Retrieving flow descriptor for flow registry : {}", flowRegistryKey.toString());
162 FlowRegistryKey existingFlowRegistryKey = getExistingKey(flowRegistryKey);
163 if (existingFlowRegistryKey != null) {
164 return flowRegistry.get(existingFlowRegistryKey);
170 public void storeDescriptor(@Nonnull final FlowRegistryKey flowRegistryKey,
171 @Nonnull final FlowDescriptor flowDescriptor) {
173 if (LOG.isTraceEnabled()) {
174 LOG.trace("Storing flowDescriptor with table ID : {} and flow ID : {} for flow hash : {}",
175 flowDescriptor.getTableKey().getId(),
176 flowDescriptor.getFlowId().getValue(),
177 flowRegistryKey.toString());
180 addToFlowRegistry(flowRegistryKey, flowDescriptor);
181 } catch (IllegalArgumentException ex) {
182 if (LOG.isWarnEnabled()) {
183 LOG.warn("Flow with flow ID {} already exists in table {}, generating alien flow ID",
184 flowDescriptor.getFlowId().getValue(),
185 flowDescriptor.getTableKey().getId());
188 // We are trying to store new flow to flow registry, but we already have different flow with same flow ID
189 // stored in registry, so we need to create alien ID for this new flow here.
192 FlowDescriptorFactory.create(
193 flowDescriptor.getTableKey().getId(),
194 createAlienFlowId(flowDescriptor.getTableKey().getId())));
199 public void store(final FlowRegistryKey flowRegistryKey) {
200 if (Objects.isNull(retrieveDescriptor(flowRegistryKey))) {
201 if (LOG.isDebugEnabled()) {
202 LOG.debug("Flow descriptor for flow hash : {} not found, generating alien flow ID",
203 flowRegistryKey.toString());
206 // We do not found flow in flow registry, that means it do not have any ID already assigned, so we need
207 // to generate new alien flow ID here.
210 FlowDescriptorFactory.create(
211 flowRegistryKey.getTableId(),
212 createAlienFlowId(flowRegistryKey.getTableId())));
217 public void addMark(final FlowRegistryKey flowRegistryKey) {
218 if (LOG.isTraceEnabled()) {
219 LOG.trace("Removing flow descriptor for flow hash : {}", flowRegistryKey.toString());
222 removeFromFlowRegistry(flowRegistryKey);
226 public void processMarks() {
231 public void forEach(final Consumer<FlowRegistryKey> consumer) {
232 synchronized (flowRegistry) {
233 flowRegistry.keySet().forEach(consumer);
239 return flowRegistry.size();
243 public void close() {
244 final Iterator<ListenableFuture<List<Optional<FlowCapableNode>>>> iterator = lastFillFutures.iterator();
246 // We need to force interrupt and clear all running futures that are trying to read flow IDs from data store
247 while (iterator.hasNext()) {
248 final ListenableFuture<List<Optional<FlowCapableNode>>> next = iterator.next();
249 boolean success = next.cancel(true);
250 LOG.trace("Cancelling filling flow registry with flows job {} with result: {}", next, success);
254 flowRegistry.clear();
258 static FlowId createAlienFlowId(final short tableId) {
259 final String alienId = ALIEN_SYSTEM_FLOW_ID + tableId + '-' + UNACCOUNTED_FLOWS_COUNTER.incrementAndGet();
260 LOG.debug("Created alien flow id {} for table id {}", alienId, tableId);
261 return new FlowId(alienId);
264 //Hashcode generation of the extension augmentation can differ for the same object received from the datastore and
265 // the one received after deserialization of switch message. OpenFlowplugin extensions are list, and the order in
266 // which it can receive the extensions back from switch can differ and that lead to a different hashcode. In that
267 // scenario, hashcode won't match and flowRegistry return the related key. To overcome this issue, these methods
268 // make sure that key is stored only if it doesn't equals to any existing key.
269 private void addToFlowRegistry(final FlowRegistryKey flowRegistryKey, final FlowDescriptor flowDescriptor) {
270 FlowRegistryKey existingFlowRegistryKey = getExistingKey(flowRegistryKey);
271 if (existingFlowRegistryKey == null) {
272 flowRegistry.put(flowRegistryKey, flowDescriptor);
274 flowRegistry.put(existingFlowRegistryKey, flowDescriptor);
278 private void removeFromFlowRegistry(final FlowRegistryKey flowRegistryKey) {
279 FlowRegistryKey existingFlowRegistryKey = getExistingKey(flowRegistryKey);
280 if (existingFlowRegistryKey != null) {
281 flowRegistry.remove(existingFlowRegistryKey);
283 flowRegistry.remove(flowRegistryKey);
287 private FlowRegistryKey getExistingKey(final FlowRegistryKey flowRegistryKey) {
288 if (flowRegistryKey.getMatch().getAugmentation(GeneralAugMatchNodesNodeTableFlow.class) == null) {
289 if (flowRegistry.containsKey(flowRegistryKey)) {
290 return flowRegistryKey;
293 synchronized (flowRegistry) {
294 for (Map.Entry<FlowRegistryKey, FlowDescriptor> keyValueSet : flowRegistry.entrySet()) {
295 if (keyValueSet.getKey().equals(flowRegistryKey)) {
296 return keyValueSet.getKey();
305 Map<FlowRegistryKey, FlowDescriptor> getAllFlowDescriptors() {