*/
package org.opendaylight.controller.md.statistics.manager;
+import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
private final InstanceIdentifier<Node> targetNodeIdentifier;
private final StatisticsProvider statisticsProvider;
private final NodeKey targetNodeKey;
+ private Collection<TableKey> knownTables = Collections.emptySet();
private int unaccountedFlowsCounter = 1;
public NodeStatisticsHandler(StatisticsProvider statisticsProvider, NodeKey nodeKey){
}
}
- private static final class QueueEntry{
+ private static final class QueueEntry {
private final NodeConnectorId nodeConnectorId;
private final QueueId queueId;
public QueueEntry(NodeConnectorId ncId, QueueId queueId){
return targetNodeKey;
}
+ public Collection<TableKey> getKnownTables() {
+ return knownTables;
+ }
+
public synchronized void updateGroupDescStats(List<GroupDescStats> list){
final Long expiryTime = getExpiryTime();
final DataModificationTransaction trans = statisticsProvider.startChange();
trans.commit();
}
-
public synchronized void updateGroupStats(List<GroupStats> list) {
final DataModificationTransaction trans = statisticsProvider.startChange();
public synchronized void updateFlowTableStats(List<FlowTableAndStatisticsMap> list) {
final DataModificationTransaction trans = statisticsProvider.startChange();
+ final Set<TableKey> knownTables = new HashSet<>(list.size());
for (FlowTableAndStatisticsMap ftStats : list) {
InstanceIdentifier<Table> tableRef = InstanceIdentifier.builder(Nodes.class).child(Node.class, targetNodeKey)
.augmentation(FlowCapableNode.class).child(Table.class, new TableKey(ftStats.getTableId().getValue())).toInstance();
FlowTableStatisticsDataBuilder statisticsDataBuilder = new FlowTableStatisticsDataBuilder();
-
- FlowTableStatisticsBuilder statisticsBuilder = new FlowTableStatisticsBuilder();
- statisticsBuilder.setActiveFlows(ftStats.getActiveFlows());
- statisticsBuilder.setPacketsLookedUp(ftStats.getPacketsLookedUp());
- statisticsBuilder.setPacketsMatched(ftStats.getPacketsMatched());
-
- final FlowTableStatistics stats = statisticsBuilder.build();
+ final FlowTableStatistics stats = new FlowTableStatisticsBuilder(ftStats).build();
statisticsDataBuilder.setFlowTableStatistics(stats);
logger.debug("Augment flow table statistics: {} for table {} on Node {}",
tableBuilder.addAugmentation(FlowTableStatisticsData.class, statisticsDataBuilder.build());
trans.putOperationalData(tableRef, tableBuilder.build());
- // FIXME: should we be tracking this data?
+ knownTables.add(tableBuilder.getKey());
}
+ this.knownTables = Collections.unmodifiableCollection(knownTables);
trans.commit();
}
package org.opendaylight.controller.md.statistics.manager;
import java.util.Collection;
-import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutput;
private void statsRequestSender() {
for (NodeStatisticsHandler h : handlers.values()) {
- sendStatisticsRequestsToNode(h.getTargetNodeKey());
+ sendStatisticsRequestsToNode(h);
}
}
- private void sendStatisticsRequestsToNode(NodeKey targetNode){
+ private void sendStatisticsRequestsToNode(NodeStatisticsHandler h) {
+ NodeKey targetNode = h.getTargetNodeKey();
+ spLogger.debug("Send requests for statistics collection to node : {}", targetNode.getId());
- spLogger.debug("Send requests for statistics collection to node : {})",targetNode.getId());
-
- InstanceIdentifier<Node> targetInstanceId = InstanceIdentifier.builder(Nodes.class).child(Node.class,targetNode).toInstance();
+ InstanceIdentifier<Node> targetInstanceId = InstanceIdentifier.builder(Nodes.class).child(Node.class, targetNode).build();
NodeRef targetNodeRef = new NodeRef(targetInstanceId);
try{
- if(flowStatsService != null){
- sendAggregateFlowsStatsFromAllTablesRequest(targetNode);
- sendAllFlowsStatsFromAllTablesRequest(targetNodeRef);
- }
if(flowTableStatsService != null){
sendAllFlowTablesStatisticsRequest(targetNodeRef);
}
+ if(flowStatsService != null){
+ // FIXME: it does not make sense to trigger this before sendAllFlowTablesStatisticsRequest()
+ // comes back -- we do not have any tables anyway.
+ sendAggregateFlowsStatsFromAllTablesRequest(h);
+
+ sendAllFlowsStatsFromAllTablesRequest(targetNodeRef);
+ }
if(portStatsService != null){
sendAllNodeConnectorsStatisticsRequest(targetNodeRef);
}
sendMeterConfigStatisticsRequest(targetNodeRef);
}
if(queueStatsService != null){
- sendAllQueueStatsFromAllNodeConnector (targetNodeRef);
+ sendAllQueueStatsFromAllNodeConnector(targetNodeRef);
}
}catch(Exception e){
spLogger.error("Exception occured while sending statistics requests : {}", e);
}
- private void sendAggregateFlowsStatsFromAllTablesRequest(final NodeKey nodeKey) throws InterruptedException, ExecutionException{
- FlowCapableNode node = (FlowCapableNode)dps.readOperationalData(
- InstanceIdentifier.builder(Nodes.class).child(Node.class,nodeKey).augmentation(FlowCapableNode.class).build());
- if (node != null) {
- final List<Table> tables = node.getTable();
- if (tables != null) {
- spLogger.debug("Node {} supports {} table(s)", nodeKey, tables.size());
- for(Table table : tables) {
- sendAggregateFlowsStatsFromTableRequest(nodeKey, table.getId());
- }
- } else {
- spLogger.debug("Node {} has no associated tables", nodeKey);
- }
- } else {
- spLogger.debug("Node {} not found", nodeKey);
+ private void sendAggregateFlowsStatsFromAllTablesRequest(final NodeStatisticsHandler h) throws InterruptedException, ExecutionException{
+ final Collection<TableKey> tables = h.getKnownTables();
+ spLogger.debug("Node {} supports {} table(s)", h, tables.size());
+
+ for (TableKey key : h.getKnownTables()) {
+ sendAggregateFlowsStatsFromTableRequest(h.getTargetNodeKey(), key.getId().shortValue());
}
}
spLogger.debug("Started node handler for {}", key.getId());
// FIXME: this should be in the NodeStatisticsHandler itself
- sendStatisticsRequestsToNode(key);
+ sendStatisticsRequestsToNode(h);
}
}