BUG 1839 - HTTP delete of non existing data
[controller.git] / opendaylight / md-sal / statistics-manager / src / main / java / org / opendaylight / controller / md / statistics / manager / FlowCapableTracker.java
1 /*
2  * Copyright IBM Corporation, 2013.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.md.statistics.manager;
9
10 import java.util.Collection;
11
12 import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
13 import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
14 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
15 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
16 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
17 import org.opendaylight.yangtools.yang.binding.DataObject;
18 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
19 import org.slf4j.Logger;
20 import org.slf4j.LoggerFactory;
21
22 import com.google.common.base.Function;
23 import com.google.common.base.Preconditions;
24 import com.google.common.base.Predicate;
25 import com.google.common.base.Predicates;
26 import com.google.common.collect.Collections2;
27 import com.google.common.collect.Sets;
28
29 /**
30  * There is a single instance of this class and that instance is responsible for
31  * monitoring the operational data store for nodes being created/deleted and
32  * notifying StatisticsProvider. These events then control the lifecycle of
33  * NodeStatisticsHandler for a particular switch.
34  */
35 final class FlowCapableTracker implements DataChangeListener {
36     private static final Logger logger = LoggerFactory.getLogger(FlowCapableTracker.class);
37
38     private final InstanceIdentifier<FlowCapableNode> root;
39     private final StatisticsProvider stats;
40
41     private final Predicate<InstanceIdentifier<?>> filterIdentifiers = new Predicate<InstanceIdentifier<?>>() {
42         @Override
43         public boolean apply(final InstanceIdentifier<?> input) {
44             /*
45              * This notification has been triggered either by the ancestor,
46              * descendant or directly for the FlowCapableNode itself. We
47              * are not interested descendants, so let's prune them based
48              * on the depth of their identifier.
49              */
50             if (root.getPath().size() < input.getPath().size()) {
51                 logger.debug("Ignoring notification for descendant {}", input);
52                 return false;
53             }
54
55             logger.debug("Including notification for {}", input);
56             return true;
57         }
58     };
59
60     public FlowCapableTracker(final StatisticsProvider stats, InstanceIdentifier<FlowCapableNode> root) {
61         this.stats = Preconditions.checkNotNull(stats);
62         this.root = Preconditions.checkNotNull(root);
63     }
64
65     /*
66      * This method is synchronized because we want to make sure to serialize input
67      * from the datastore. Competing add/remove could be problematic otherwise.
68      */
69     @Override
70     public synchronized void onDataChanged(final DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
71         logger.debug("Tracker at root {} processing notification", root);
72
73         /*
74          * First process all the identifiers which were removed, trying to figure out
75          * whether they constitute removal of FlowCapableNode.
76          */
77         final Collection<NodeKey> removedNodes =
78             Collections2.filter(Collections2.transform(
79                 Sets.filter(change.getRemovedOperationalData(), filterIdentifiers),
80                 new Function<InstanceIdentifier<?>, NodeKey>() {
81                     @Override
82                     public NodeKey apply(final InstanceIdentifier<?> input) {
83                         final NodeKey key = input.firstKeyOf(Node.class, NodeKey.class);
84                         if (key == null) {
85                             // FIXME: do we have a backup plan?
86                             logger.info("Failed to extract node key from {}", input);
87                         }
88                         return key;
89                     }
90                 }), Predicates.notNull());
91         stats.stopNodeHandlers(removedNodes);
92
93         final Collection<NodeKey> addedNodes =
94             Collections2.filter(Collections2.transform(
95                 Sets.filter(change.getCreatedOperationalData().keySet(), filterIdentifiers),
96                 new Function<InstanceIdentifier<?>, NodeKey>() {
97                     @Override
98                     public NodeKey apply(final InstanceIdentifier<?> input) {
99                         final NodeKey key = input.firstKeyOf(Node.class, NodeKey.class);
100                         if (key == null) {
101                             // FIXME: do we have a backup plan?
102                             logger.info("Failed to extract node key from {}", input);
103                     }
104                     return key;
105                 }
106             }), Predicates.notNull());
107         stats.startNodeHandlers(addedNodes);
108
109         logger.debug("Tracker at root {} finished processing notification", root);
110     }
111 }