a71a738ecc7272bb896c2153c80acee7dd8a0de4
[openflowplugin.git] / openflowplugin-impl / src / main / java / org / opendaylight / openflowplugin / impl / registry / flow / DeviceFlowRegistryImpl.java
1 /*
2  * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.openflowplugin.impl.registry.flow;
9
10 import com.google.common.annotations.VisibleForTesting;
11 import com.google.common.collect.BiMap;
12 import com.google.common.collect.HashBiMap;
13 import com.google.common.collect.Maps;
14 import com.google.common.util.concurrent.FluentFuture;
15 import com.google.common.util.concurrent.FutureCallback;
16 import com.google.common.util.concurrent.Futures;
17 import com.google.common.util.concurrent.ListenableFuture;
18 import com.google.common.util.concurrent.MoreExecutors;
19 import java.util.ArrayList;
20 import java.util.Arrays;
21 import java.util.Iterator;
22 import java.util.List;
23 import java.util.Map;
24 import java.util.Objects;
25 import java.util.Optional;
26 import java.util.concurrent.atomic.AtomicInteger;
27 import java.util.function.Consumer;
28 import org.eclipse.jdt.annotation.NonNull;
29 import org.opendaylight.mdsal.binding.api.DataBroker;
30 import org.opendaylight.mdsal.binding.api.ReadTransaction;
31 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
32 import org.opendaylight.openflowplugin.api.openflow.registry.flow.DeviceFlowRegistry;
33 import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowDescriptor;
34 import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowRegistryKey;
35 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
36 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
37 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
38 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
39 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
40 import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowplugin.extension.general.rev140714.GeneralAugMatchNodesNodeTableFlow;
41 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
42 import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
43 import org.opendaylight.yangtools.yang.common.Uint8;
44 import org.slf4j.Logger;
45 import org.slf4j.LoggerFactory;
46
47 /*
48  * this class is marked to be thread safe
49  */
50 public class DeviceFlowRegistryImpl implements DeviceFlowRegistry {
51     private static final Logger LOG = LoggerFactory.getLogger(DeviceFlowRegistryImpl.class);
52     private static final String ALIEN_SYSTEM_FLOW_ID = "#UF$TABLE*";
53     private static final AtomicInteger UNACCOUNTED_FLOWS_COUNTER = new AtomicInteger(0);
54
55     private final BiMap<FlowRegistryKey, FlowDescriptor> flowRegistry = Maps.synchronizedBiMap(HashBiMap.create());
56     private final DataBroker dataBroker;
57     private final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier;
58     private final List<ListenableFuture<List<Optional<FlowCapableNode>>>> lastFillFutures = new ArrayList<>();
59     private final Consumer<Flow> flowConsumer;
60
61     public DeviceFlowRegistryImpl(final short version,
62                                   final DataBroker dataBroker,
63                                   final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier) {
64         this.dataBroker = dataBroker;
65         this.instanceIdentifier = instanceIdentifier;
66
67         // Specifies what to do with flow read from data store
68         flowConsumer = flow -> {
69             final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(version, flow);
70
71             if (getExistingKey(flowRegistryKey) == null) {
72                 // Now, we will update the registry
73                 storeDescriptor(flowRegistryKey, FlowDescriptorFactory.create(flow.getTableId(), flow.getId()));
74             }
75         };
76     }
77
78     @Override
79     public ListenableFuture<List<Optional<FlowCapableNode>>> fill() {
80         if (LOG.isDebugEnabled()) {
81             LOG.debug("Filling flow registry with flows for node: {}", instanceIdentifier.getKey().getId().getValue());
82         }
83
84         // Prepare path for read transaction
85         // TODO: Read only Tables, and not entire FlowCapableNode (fix Yang model)
86         final InstanceIdentifier<FlowCapableNode> path = instanceIdentifier.augmentation(FlowCapableNode.class);
87
88         // First, try to fill registry with flows from DS/Configuration
89         final FluentFuture<Optional<FlowCapableNode>> configFuture =
90                 fillFromDatastore(LogicalDatastoreType.CONFIGURATION, path);
91
92         // Now, try to fill registry with flows from DS/Operational
93         // in case of cluster fail over, when clients are not using DS/Configuration
94         // for adding flows, but only RPCs
95         final FluentFuture<Optional<FlowCapableNode>> operationalFuture =
96                 fillFromDatastore(LogicalDatastoreType.OPERATIONAL, path);
97
98         // And at last, chain and return futures created above.
99         // Also, cache this future, so call to DeviceFlowRegistry.close() will be able
100         // to cancel this future immediately if it will be still in progress
101         final ListenableFuture<List<Optional<FlowCapableNode>>> lastFillFuture =
102                 Futures.allAsList(Arrays.asList(configFuture, operationalFuture));
103         lastFillFutures.add(lastFillFuture);
104         return lastFillFuture;
105     }
106
107     private FluentFuture<Optional<FlowCapableNode>> fillFromDatastore(final LogicalDatastoreType logicalDatastoreType,
108                               final InstanceIdentifier<FlowCapableNode> path) {
109         // Prepare read operation from datastore for path
110         final FluentFuture<Optional<FlowCapableNode>> future;
111         try (ReadTransaction transaction = dataBroker.newReadOnlyTransaction()) {
112             future = transaction.read(logicalDatastoreType, path);
113         }
114
115         future.addCallback(new FutureCallback<Optional<FlowCapableNode>>() {
116             @Override
117             public void onSuccess(final Optional<FlowCapableNode> result) {
118                 result.ifPresent(flowCapableNode -> {
119                     flowCapableNode.nonnullTable().stream()
120                     .filter(Objects::nonNull)
121                     .flatMap(table -> table.nonnullFlow().stream())
122                     .filter(Objects::nonNull)
123                     .filter(flow -> flow.getId() != null)
124                     .forEach(flowConsumer);
125                 });
126             }
127
128             @Override
129             public void onFailure(final Throwable throwable) {
130                 LOG.debug("Failed to read {} path {}", logicalDatastoreType, path, throwable);
131             }
132         }, MoreExecutors.directExecutor());
133
134         return future;
135     }
136
137     @Override
138     public FlowDescriptor retrieveDescriptor(@NonNull final FlowRegistryKey flowRegistryKey) {
139         if (LOG.isTraceEnabled()) {
140             LOG.trace("Retrieving flow descriptor for flow registry : {}", flowRegistryKey.toString());
141         }
142
143         FlowRegistryKey existingFlowRegistryKey = getExistingKey(flowRegistryKey);
144         if (existingFlowRegistryKey != null) {
145             return flowRegistry.get(existingFlowRegistryKey);
146         }
147         return null;
148     }
149
150     @Override
151     public void storeDescriptor(@NonNull final FlowRegistryKey flowRegistryKey,
152                                 @NonNull final FlowDescriptor flowDescriptor) {
153         try {
154             if (LOG.isTraceEnabled()) {
155                 LOG.trace("Storing flowDescriptor with table ID : {} and flow ID : {} for flow hash : {}",
156                         flowDescriptor.getTableKey().getId(),
157                         flowDescriptor.getFlowId().getValue(),
158                         flowRegistryKey.toString());
159             }
160
161             addToFlowRegistry(flowRegistryKey, flowDescriptor);
162         } catch (IllegalArgumentException ex) {
163             if (LOG.isWarnEnabled()) {
164                 LOG.warn("Flow with flow ID {} already exists in table {}, generating alien flow ID",
165                         flowDescriptor.getFlowId().getValue(),
166                         flowDescriptor.getTableKey().getId());
167             }
168
169             // We are trying to store new flow to flow registry, but we already have different flow with same flow ID
170             // stored in registry, so we need to create alien ID for this new flow here.
171             addToFlowRegistry(
172                     flowRegistryKey,
173                     FlowDescriptorFactory.create(
174                             flowDescriptor.getTableKey().getId(),
175                             createAlienFlowId(flowDescriptor.getTableKey().getId())));
176         }
177     }
178
179     @Override
180     public void store(final FlowRegistryKey flowRegistryKey) {
181         if (retrieveDescriptor(flowRegistryKey) == null) {
182             LOG.debug("Flow descriptor for flow hash : {} not found, generating alien flow ID", flowRegistryKey);
183
184             // We do not found flow in flow registry, that means it do not have any ID already assigned, so we need
185             // to generate new alien flow ID here.
186             final Uint8 tableId = Uint8.valueOf(flowRegistryKey.getTableId());
187             storeDescriptor(flowRegistryKey, FlowDescriptorFactory.create(tableId, createAlienFlowId(tableId)));
188         }
189     }
190
191     @Override
192     public void addMark(final FlowRegistryKey flowRegistryKey) {
193         if (LOG.isTraceEnabled()) {
194             LOG.trace("Removing flow descriptor for flow hash : {}", flowRegistryKey.toString());
195         }
196
197         removeFromFlowRegistry(flowRegistryKey);
198     }
199
200     @Override
201     public void processMarks() {
202         // Do nothing
203     }
204
205     @Override
206     public void forEach(final Consumer<FlowRegistryKey> consumer) {
207         synchronized (flowRegistry) {
208             flowRegistry.keySet().forEach(consumer);
209         }
210     }
211
212     @Override
213     public int size() {
214         return flowRegistry.size();
215     }
216
217     @Override
218     public void close() {
219         final Iterator<ListenableFuture<List<Optional<FlowCapableNode>>>> iterator = lastFillFutures.iterator();
220
221         // We need to force interrupt and clear all running futures that are trying to read flow IDs from data store
222         while (iterator.hasNext()) {
223             final ListenableFuture<List<Optional<FlowCapableNode>>> next = iterator.next();
224             boolean success = next.cancel(true);
225             LOG.trace("Cancelling filling flow registry with flows job {} with result: {}", next, success);
226             iterator.remove();
227         }
228
229         flowRegistry.clear();
230     }
231
232     @VisibleForTesting
233     static FlowId createAlienFlowId(final Uint8 tableId) {
234         final String alienId = ALIEN_SYSTEM_FLOW_ID + tableId + '-' + UNACCOUNTED_FLOWS_COUNTER.incrementAndGet();
235         LOG.debug("Created alien flow id {} for table id {}", alienId, tableId);
236         return new FlowId(alienId);
237     }
238
239     //Hashcode generation of the extension augmentation can differ for the same object received from the datastore and
240     // the one received after deserialization of switch message. OpenFlowplugin extensions are list, and the order in
241     // which it can receive the extensions back from switch can differ and that lead to a different hashcode. In that
242     // scenario, hashcode won't match and flowRegistry return the  related key. To overcome this issue, these methods
243     // make sure that key is stored only if it doesn't equals to any existing key.
244     private void addToFlowRegistry(final FlowRegistryKey flowRegistryKey, final FlowDescriptor flowDescriptor) {
245         FlowRegistryKey existingFlowRegistryKey = getExistingKey(flowRegistryKey);
246         if (existingFlowRegistryKey == null) {
247             flowRegistry.put(flowRegistryKey, flowDescriptor);
248         } else {
249             flowRegistry.put(existingFlowRegistryKey, flowDescriptor);
250         }
251     }
252
253     private void removeFromFlowRegistry(final FlowRegistryKey flowRegistryKey) {
254         FlowRegistryKey existingFlowRegistryKey = getExistingKey(flowRegistryKey);
255         if (existingFlowRegistryKey != null) {
256             flowRegistry.remove(existingFlowRegistryKey);
257         } else {
258             flowRegistry.remove(flowRegistryKey);
259         }
260     }
261
262     private FlowRegistryKey getExistingKey(final FlowRegistryKey flowRegistryKey) {
263         if (flowRegistryKey.getMatch().augmentation(GeneralAugMatchNodesNodeTableFlow.class) == null) {
264             if (flowRegistry.containsKey(flowRegistryKey)) {
265                 return flowRegistryKey;
266             }
267         } else {
268             synchronized (flowRegistry) {
269                 for (Map.Entry<FlowRegistryKey, FlowDescriptor> keyValueSet : flowRegistry.entrySet()) {
270                     if (keyValueSet.getKey().equals(flowRegistryKey)) {
271                         return keyValueSet.getKey();
272                     }
273                 }
274             }
275         }
276         return null;
277     }
278
279     @VisibleForTesting
280     Map<FlowRegistryKey, FlowDescriptor> getAllFlowDescriptors() {
281         return flowRegistry;
282     }
283 }