Merge "Split off tracking of node presence"
[controller.git] / opendaylight / md-sal / statistics-manager / src / main / java / org / opendaylight / controller / md / statistics / manager / FlowCapableTracker.java
1 /*
2  * Copyright IBM Corporation, 2013.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.md.statistics.manager;
9
10 import java.util.Collection;
11
12 import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
13 import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
14 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
15 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
16 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
17 import org.opendaylight.yangtools.yang.binding.DataObject;
18 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
19 import org.slf4j.Logger;
20 import org.slf4j.LoggerFactory;
21
22 import com.google.common.base.Function;
23 import com.google.common.base.Preconditions;
24 import com.google.common.base.Predicate;
25 import com.google.common.base.Predicates;
26 import com.google.common.collect.Collections2;
27 import com.google.common.collect.Sets;
28
29 /**
30  * There is a single instance of this class and that instance is responsible for
31  * monitoring the operational data store for nodes being created/deleted and
32  * notifying StatisticsProvider. These events then control the lifecycle of
33  * NodeStatisticsHandler for a particular switch.
34  */
35 final class FlowCapableTracker implements DataChangeListener {
36     private static final Logger logger = LoggerFactory.getLogger(FlowCapableTracker.class);
37
38     private final InstanceIdentifier<FlowCapableNode> root;
39     private final StatisticsProvider stats;
40
41     private final Predicate<InstanceIdentifier<?>> filterIdentifiers = new Predicate<InstanceIdentifier<?>>() {
42         @Override
43         public boolean apply(final InstanceIdentifier<?> input) {
44             /*
45              * This notification has been triggered either by the ancestor,
46              * descendant or directly for the FlowCapableNode itself. We
47              * are not interested descendants, so let's prune them based
48              * on the depth of their identifier.
49              */
50             if (root.getPath().size() < input.getPath().size()) {
51                 logger.debug("Ignoring notification for descendant {}", input);
52                 return false;
53             }
54
55             logger.debug("Including notification for {}", input);
56             return true;
57         }
58     };
59
60     public FlowCapableTracker(final StatisticsProvider stats, InstanceIdentifier<FlowCapableNode> root) {
61         this.stats = Preconditions.checkNotNull(stats);
62         this.root = Preconditions.checkNotNull(root);
63     }
64
65     /*
66      * This method is synchronized because we want to make sure to serialize input
67      * from the datastore. Competing add/remove could be problematic otherwise.
68      */
69     @Override
70     public synchronized void onDataChanged(final DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
71         /*
72          * First process all the identifiers which were removed, trying to figure out
73          * whether they constitute removal of FlowCapableNode.
74          */
75         final Collection<NodeKey> removedNodes =
76             Collections2.filter(Collections2.transform(
77                 Sets.filter(change.getRemovedOperationalData(), filterIdentifiers),
78                 new Function<InstanceIdentifier<?>, NodeKey>() {
79                     @Override
80                     public NodeKey apply(final InstanceIdentifier<?> input) {
81                         final NodeKey key = input.firstKeyOf(Node.class, NodeKey.class);
82                         if (key == null) {
83                             // FIXME: do we have a backup plan?
84                             logger.info("Failed to extract node key from {}", input);
85                         }
86                         return key;
87                     }
88                 }), Predicates.notNull());
89         stats.stopNodeHandlers(removedNodes);
90
91         final Collection<NodeKey> addedNodes =
92             Collections2.filter(Collections2.transform(
93                 Sets.filter(change.getCreatedOperationalData().keySet(), filterIdentifiers),
94                 new Function<InstanceIdentifier<?>, NodeKey>() {
95                     @Override
96                     public NodeKey apply(final InstanceIdentifier<?> input) {
97                         final NodeKey key = input.firstKeyOf(Node.class, NodeKey.class);
98                         if (key == null) {
99                             // FIXME: do we have a backup plan?
100                             logger.info("Failed to extract node key from {}", input);
101                     }
102                     return key;
103                 }
104             }), Predicates.notNull());
105         stats.startNodeHandlers(addedNodes);
106     }
107 }