.settings\r
MANIFEST.MF\r
xtend-gen\r
+yang-gen-sal\r
+yang-gen-config\r
.externalToolBuilders\r
maven-eclipse.xml\r
-\r
+.metadata/\r
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>applications</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+ <groupId>org.opendaylight.openflowplugin.applications</groupId>
+ <artifactId>forwardingrules-manager</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-service</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-broker-impl</artifactId>
+ <scope>provided</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Bundle-Activator>org.opendaylight.controller.frm.FRMActivator</Bundle-Activator>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</developerConnection>
+ <tag>HEAD</tag>
+ </scm>
+</project>
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.frm;
+
+import org.opendaylight.controller.frm.impl.ForwardingRulesManagerImpl;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.sal.binding.api.AbstractBindingAwareProvider;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
+import org.osgi.framework.BundleContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Forwarding Rules Manager Activator
+ *
+ * Activator {@link ForwardingRulesManager}.
+ * It registers all listeners (DataChangeEvent, ReconcilNotification)
+ * in the Session Initialization phase.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ * *
+ */
+public class FRMActivator extends AbstractBindingAwareProvider {
+
+ private final static Logger LOG = LoggerFactory.getLogger(FRMActivator.class);
+
+ private ForwardingRulesManager manager;
+
+ @Override
+ public void onSessionInitiated(ProviderContext session) {
+ LOG.info("FRMActivator initialization.");
+ try {
+ final DataBroker dataBroker = session.getSALService(DataBroker.class);
+ this.manager = new ForwardingRulesManagerImpl(dataBroker, session);
+ this.manager.start();
+ LOG.info("FRMActivator initialization successfull.");
+ }
+ catch (Exception e) {
+ LOG.error("Unexpected error by FRM initialization!", e);
+ this.stopImpl(null);
+ }
+ }
+
+ @Override
+ protected void stopImpl(final BundleContext context) {
+ if (manager != null) {
+ try {
+ manager.close();
+ } catch (Exception e) {
+ LOG.error("Unexpected error by stopping FRMActivator", e);
+ }
+ manager = null;
+ LOG.info("FRMActivator stopped.");
+ }
+ }
+ }
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.frm;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * forwardingrules-manager
+ * org.opendaylight.controller.frm
+ *
+ * FlowNodeReconciliation
+ * It represent Reconciliation functionality for every new device.
+ * So we have to read all possible pre-configured Flows, Meters and Groups from
+ * Config/DS and add all to new device.
+ * New device is represented by new {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}
+ * in Operational/DS. So we have to add listener for Wildcarded path in base data change scope.
+ *
+ * WildCarded InstanceIdentifier:
+ * {@code
+ *
+ * InstanceIdentifier.create(Nodes.class).child(Node.class).augmentation(FlowCapableNode.class)
+ *
+ * }
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 26, 2014
+ */
+public interface FlowNodeReconciliation extends DataChangeListener, AutoCloseable {
+
+ /**
+ * Method contains Node registration to {@link ForwardingRulesManager} functionality
+ * as a prevention to use a validation check to the Operational/DS for identify
+ * pre-configure transaction and serious device commit in every transaction.
+ *
+ * Second part of functionality is own reconciliation pre-configure
+ * Flows, Meters and Groups.
+ *
+ * @param connectedNode - {@link org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier} to new Node
+ */
+ void flowNodeConnected(InstanceIdentifier<FlowCapableNode> connectedNode);
+
+ /**
+ * Method contains functionality for registered Node {@FlowCapableNode} removing
+ * from {@Link ForwardingRulesManager}
+ *
+ * @param disconnectedNode - {@link org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier} to removed Node
+ */
+ void flowNodeDisconnected(InstanceIdentifier<FlowCapableNode> disconnectedNode);
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.frm;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * forwardingrules-manager
+ * org.opendaylight.controller.frm
+ *
+ * ForwardingRulesCommiter
+ * It represent a contract between DataStore DataChangeEvent and relevant
+ * SalRpcService for device. Every implementation has to be registered for
+ * Configurational/DS tree path.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 25, 2014
+ */
+public interface ForwardingRulesCommiter <D extends DataObject> extends AutoCloseable, DataChangeListener {
+
+ /**
+ * Method removes DataObject which is identified by InstanceIdentifier
+ * from device.
+ *
+ * @param InstanceIdentifier identifier - the whole path to DataObject
+ * @param DataObject remove - DataObject for removing
+ * @param InstanceIdentifier<FlowCapableNode> parent Node InstanceIdentifier
+ */
+ void remove(InstanceIdentifier<D> identifier, D del,
+ InstanceIdentifier<FlowCapableNode> nodeIdent);
+
+ /**
+ * Method updates the original DataObject to the update DataObject
+ * in device. Both are identified by same InstanceIdentifier
+ *
+ * @param InstanceIdentifier identifier - the whole path to DataObject
+ * @param DataObject original - original DataObject (for update)
+ * @param DataObject update - changed DataObject (contain updates)
+ */
+ void update(InstanceIdentifier<D> identifier, D original, D update,
+ InstanceIdentifier<FlowCapableNode> nodeIdent);
+
+ /**
+ * Method adds the DataObject which is identified by InstanceIdentifier
+ * to device.
+ *
+ * @param InstanceIdentifier identifier - the whole path to new DataObject
+ * @param DataObject add - new DataObject
+ */
+ void add(InstanceIdentifier<D> identifier, D add,
+ InstanceIdentifier<FlowCapableNode> nodeIdent);
+
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.frm;
+
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.SalMeterService;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * forwardingrules-manager
+ * org.opendaylight.controller.frm
+ *
+ * ForwardingRulesManager
+ * It represent a central point for whole modul. Implementation
+ * Flow Provider registers the link FlowChangeListener} and it holds all needed
+ * services for link FlowChangeListener}.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 25, 2014
+ */
+public interface ForwardingRulesManager extends AutoCloseable {
+
+ public void start();
+
+ /**
+ * Method returns information :
+ * "is Node with send InstanceIdentifier connected"?
+ *
+ * @param InstanceIdentifier<FlowCapableNode> ident - the key of the node
+ * @return boolean - is device connected
+ */
+ public boolean isNodeActive(InstanceIdentifier<FlowCapableNode> ident);
+
+ /**
+ * Method add new {@link FlowCapableNode} to active Node Holder.
+ * ActiveNodeHolder prevent unnecessary Operational/DS read for identify
+ * pre-configure and serious Configure/DS transactions.
+ *
+ * @param InstanceIdentifier<FlowCapableNode> ident - the key of the node
+ */
+ public void registrateNewNode(InstanceIdentifier<FlowCapableNode> ident);
+
+ /**
+ * Method remove disconnected {@link FlowCapableNode} from active Node
+ * Holder. And all next flows or groups or meters will stay in Config/DS
+ * only.
+ *
+ * @param InstanceIdentifier<FlowCapableNode> ident - the key of the node
+ */
+ public void unregistrateNode(InstanceIdentifier<FlowCapableNode> ident);
+
+ /**
+ * Method returns generated transaction ID, which is unique for
+ * every transaction. ID is composite from prefix ("DOM") and unique number.
+ *
+ * @return String transactionID for RPC transaction identification
+ */
+ public String getNewTransactionId();
+
+ /**
+ * Method returns Read Transacion. It is need for Node reconciliation only.
+ *
+ * @return ReadOnlyTransaction
+ */
+ public ReadOnlyTransaction getReadTranaction();
+
+ /**
+ * Flow RPC service
+ *
+ * @return
+ */
+ public SalFlowService getSalFlowService();
+
+ /**
+ * Group RPC service
+ *
+ * @return
+ */
+ public SalGroupService getSalGroupService();
+
+ /**
+ * Meter RPC service
+ *
+ * @return
+ */
+ public SalMeterService getSalMeterService();
+
+ /**
+ * Content definition method and prevent code duplicity in Reconcil
+ * @return ForwardingRulesCommiter<Flow>
+ */
+ public ForwardingRulesCommiter<Flow> getFlowCommiter();
+
+ /**
+ * Content definition method and prevent code duplicity in Reconcil
+ * @return ForwardingRulesCommiter<Group>
+ */
+ public ForwardingRulesCommiter<Group> getGroupCommiter();
+
+ /**
+ * Content definition method and prevent code duplicity
+ * @return ForwardingRulesCommiter<Meter>
+ */
+ public ForwardingRulesCommiter<Meter> getMeterCommiter();
+
+ /**
+ * Content definition method
+ * @return FlowNodeReconciliation
+ */
+ public FlowNodeReconciliation getFlowNodeReconciliation();
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.frm.impl;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.frm.ForwardingRulesCommiter;
+import org.opendaylight.controller.frm.ForwardingRulesManager;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * AbstractChangeListner implemented basic {@link AsyncDataChangeEvent} processing for
+ * flow node subDataObject (flows, groups and meters).
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public abstract class AbstractListeningCommiter <T extends DataObject> implements ForwardingRulesCommiter<T> {
+
+ protected ForwardingRulesManager provider;
+
+ protected final Class<T> clazz;
+
+ public AbstractListeningCommiter (ForwardingRulesManager provider, Class<T> clazz) {
+ this.provider = Preconditions.checkNotNull(provider, "ForwardingRulesManager can not be null!");
+ this.clazz = Preconditions.checkNotNull(clazz, "Class can not be null!");
+ }
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> changeEvent) {
+ Preconditions.checkNotNull(changeEvent,"Async ChangeEvent can not be null!");
+
+ /* All DataObjects for create */
+ final Map<InstanceIdentifier<?>, DataObject> createdData = changeEvent.getCreatedData() != null
+ ? changeEvent.getCreatedData() : Collections.<InstanceIdentifier<?>, DataObject> emptyMap();
+ /* All DataObjects for remove */
+ final Set<InstanceIdentifier<?>> removeData = changeEvent.getRemovedPaths() != null
+ ? changeEvent.getRemovedPaths() : Collections.<InstanceIdentifier<?>> emptySet();
+ /* All DataObjects for updates */
+ final Map<InstanceIdentifier<?>, DataObject> updateData = changeEvent.getUpdatedData() != null
+ ? changeEvent.getUpdatedData() : Collections.<InstanceIdentifier<?>, DataObject> emptyMap();
+ /* All Original DataObjects */
+ final Map<InstanceIdentifier<?>, DataObject> originalData = changeEvent.getOriginalData() != null
+ ? changeEvent.getOriginalData() : Collections.<InstanceIdentifier<?>, DataObject> emptyMap();
+
+ this.createData(createdData);
+ this.updateData(updateData, originalData);
+ this.removeData(removeData, originalData);
+ }
+
+ /**
+ * Method return wildCardPath for Listener registration
+ * and for identify the correct KeyInstanceIdentifier from data;
+ */
+ protected abstract InstanceIdentifier<T> getWildCardPath();
+
+
+
+ @SuppressWarnings("unchecked")
+ private void createData(final Map<InstanceIdentifier<?>, DataObject> createdData) {
+ final Set<InstanceIdentifier<?>> keys = createdData.keySet() != null
+ ? createdData.keySet() : Collections.<InstanceIdentifier<?>> emptySet();
+ for (InstanceIdentifier<?> key : keys) {
+ if (clazz.equals(key.getTargetType())) {
+ final InstanceIdentifier<FlowCapableNode> nodeIdent =
+ key.firstIdentifierOf(FlowCapableNode.class);
+ if (preConfigurationCheck(nodeIdent)) {
+ InstanceIdentifier<T> createKeyIdent = key.firstIdentifierOf(clazz);
+ final Optional<DataObject> value = Optional.of(createdData.get(key));
+ if (value.isPresent()) {
+ this.add(createKeyIdent, (T)value.get(), nodeIdent);
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private void updateData(final Map<InstanceIdentifier<?>, DataObject> updateData,
+ final Map<InstanceIdentifier<?>, DataObject> originalData) {
+
+ final Set<InstanceIdentifier<?>> keys = updateData.keySet() != null
+ ? updateData.keySet() : Collections.<InstanceIdentifier<?>> emptySet();
+ for (InstanceIdentifier<?> key : keys) {
+ if (clazz.equals(key.getTargetType())) {
+ final InstanceIdentifier<FlowCapableNode> nodeIdent =
+ key.firstIdentifierOf(FlowCapableNode.class);
+ if (preConfigurationCheck(nodeIdent)) {
+ InstanceIdentifier<T> updateKeyIdent = key.firstIdentifierOf(clazz);
+ final Optional<DataObject> value = Optional.of(updateData.get(key));
+ final Optional<DataObject> original = Optional.of(originalData.get(key));
+ if (value.isPresent() && original.isPresent()) {
+ this.update(updateKeyIdent, (T)original.get(), (T)value.get(), nodeIdent);
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private void removeData(final Set<InstanceIdentifier<?>> removeData,
+ final Map<InstanceIdentifier<?>, DataObject> originalData) {
+
+ for (InstanceIdentifier<?> key : removeData) {
+ if (clazz.equals(key.getTargetType())) {
+ final InstanceIdentifier<FlowCapableNode> nodeIdent =
+ key.firstIdentifierOf(FlowCapableNode.class);
+ if (preConfigurationCheck(nodeIdent)) {
+ final InstanceIdentifier<T> ident = key.firstIdentifierOf(clazz);
+ final DataObject removeValue = originalData.get(key);
+ this.remove(ident, (T)removeValue, nodeIdent);
+ }
+ }
+ }
+ }
+
+ private boolean preConfigurationCheck(final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ Preconditions.checkNotNull(nodeIdent, "FlowCapableNode ident can not be null!");
+ return provider.isNodeActive(nodeIdent);
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.frm.impl;
+
+import org.opendaylight.controller.frm.ForwardingRulesManager;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowTableRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.flow.update.OriginalFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.flow.update.UpdatedFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * GroupForwarder
+ * It implements {@link org.opendaylight.controller.md.sal.binding.api.DataChangeListener}}
+ * for WildCardedPath to {@link Flow} and ForwardingRulesCommiter interface for methods:
+ * add, update and remove {@link Flow} processing for
+ * {@link org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent}.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class FlowForwarder extends AbstractListeningCommiter<Flow> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(FlowForwarder.class);
+
+ private ListenerRegistration<DataChangeListener> listenerRegistration;
+
+ public FlowForwarder (final ForwardingRulesManager manager, final DataBroker db) {
+ super(manager, Flow.class);
+ Preconditions.checkNotNull(db, "DataBroker can not be null!");
+ registrationListener(db, 5);
+ }
+
+ private void registrationListener(final DataBroker db, int i) {
+ try {
+ listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
+ getWildCardPath(), FlowForwarder.this, DataChangeScope.SUBTREE);
+ } catch (final Exception e) {
+ if (i >= 1) {
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e1) {
+ LOG.error("Thread interrupted '{}'", e1);
+ Thread.currentThread().interrupt();
+ }
+ registrationListener(db, --i);
+ } else {
+ LOG.error("FRM Flow DataChange listener registration fail!", e);
+ throw new IllegalStateException("FlowForwarder registration Listener fail! System needs restart.", e);
+ }
+ }
+ }
+
+ @Override
+ public void close() {
+ if (listenerRegistration != null) {
+ try {
+ listenerRegistration.close();
+ } catch (final Exception e) {
+ LOG.error("Error by stop FRM FlowChangeListener.", e);
+ }
+ listenerRegistration = null;
+ }
+ }
+
+ @Override
+ public void remove(final InstanceIdentifier<Flow> identifier,
+ final Flow removeDataObj,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+
+ final TableKey tableKey = identifier.firstKeyOf(Table.class, TableKey.class);
+ if (tableIdValidationPrecondition(tableKey, removeDataObj)) {
+ final RemoveFlowInputBuilder builder = new RemoveFlowInputBuilder(removeDataObj);
+ builder.setFlowRef(new FlowRef(identifier));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
+ builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
+
+ // This method is called only when a given flow object has been
+ // removed from datastore. So FRM always needs to set strict flag
+ // into remove-flow input so that only a flow entry associated with
+ // a given flow object is removed.
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId())).
+ setStrict(Boolean.TRUE);
+ provider.getSalFlowService().removeFlow(builder.build());
+ }
+ }
+
+ @Override
+ public void update(final InstanceIdentifier<Flow> identifier,
+ final Flow original, final Flow update,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+
+ final TableKey tableKey = identifier.firstKeyOf(Table.class, TableKey.class);
+ if (tableIdValidationPrecondition(tableKey, update)) {
+ final UpdateFlowInputBuilder builder = new UpdateFlowInputBuilder();
+
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
+ builder.setFlowRef(new FlowRef(identifier));
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
+
+ // This method is called only when a given flow object in datastore
+ // has been updated. So FRM always needs to set strict flag into
+ // update-flow input so that only a flow entry associated with
+ // a given flow object is updated.
+ builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).setStrict(Boolean.TRUE).build());
+ builder.setOriginalFlow((new OriginalFlowBuilder(original)).setStrict(Boolean.TRUE).build());
+
+ provider.getSalFlowService().updateFlow(builder.build());
+ }
+ }
+
+ @Override
+ public void add(final InstanceIdentifier<Flow> identifier,
+ final Flow addDataObj,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+
+ final TableKey tableKey = identifier.firstKeyOf(Table.class, TableKey.class);
+ if (tableIdValidationPrecondition(tableKey, addDataObj)) {
+ final AddFlowInputBuilder builder = new AddFlowInputBuilder(addDataObj);
+
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
+ builder.setFlowRef(new FlowRef(identifier));
+ builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
+ provider.getSalFlowService().addFlow(builder.build());
+ }
+ }
+
+ @Override
+ protected InstanceIdentifier<Flow> getWildCardPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class)
+ .augmentation(FlowCapableNode.class).child(Table.class).child(Flow.class);
+ }
+
+ private boolean tableIdValidationPrecondition (final TableKey tableKey, final Flow flow) {
+ Preconditions.checkNotNull(tableKey, "TableKey can not be null or empty!");
+ Preconditions.checkNotNull(flow, "Flow can not be null or empty!");
+ if (! tableKey.getId().equals(flow.getTableId())) {
+ LOG.error("TableID in URI tableId={} and in palyload tableId={} is not same.",
+ flow.getTableId(), tableKey.getId());
+ return false;
+ }
+ return true;
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.frm.impl;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+import org.opendaylight.controller.frm.FlowNodeReconciliation;
+import org.opendaylight.controller.frm.ForwardingRulesManager;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+/**
+ * forwardingrules-manager
+ * org.opendaylight.controller.frm
+ *
+ * FlowNode Reconciliation Listener
+ * Reconciliation for a new FlowNode
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Jun 13, 2014
+ */
+public class FlowNodeReconciliationImpl implements FlowNodeReconciliation {
+
+ private static final Logger LOG = LoggerFactory.getLogger(FlowNodeReconciliationImpl.class);
+
+ private final ForwardingRulesManager provider;
+
+ private ListenerRegistration<DataChangeListener> listenerRegistration;
+
+ public FlowNodeReconciliationImpl (final ForwardingRulesManager manager, final DataBroker db) {
+ this.provider = Preconditions.checkNotNull(manager, "ForwardingRulesManager can not be null!");
+ Preconditions.checkNotNull(db, "DataBroker can not be null!");
+ /* Build Path */
+ InstanceIdentifier<FlowCapableNode> flowNodeWildCardIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class).augmentation(FlowCapableNode.class);
+ this.listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ flowNodeWildCardIdentifier, FlowNodeReconciliationImpl.this, DataChangeScope.BASE);
+ }
+
+ @Override
+ public void close() {
+ if (listenerRegistration != null) {
+ try {
+ listenerRegistration.close();
+ } catch (Exception e) {
+ LOG.error("Error by stop FRM FlowNodeReconilListener.", e);
+ }
+ listenerRegistration = null;
+ }
+ }
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> changeEvent) {
+ Preconditions.checkNotNull(changeEvent,"Async ChangeEvent can not be null!");
+ /* All DataObjects for create */
+ final Set<InstanceIdentifier<?>> createdData = changeEvent.getCreatedData() != null
+ ? changeEvent.getCreatedData().keySet() : Collections.<InstanceIdentifier<?>> emptySet();
+ /* All DataObjects for remove */
+ final Set<InstanceIdentifier<?>> removeData = changeEvent.getRemovedPaths() != null
+ ? changeEvent.getRemovedPaths() : Collections.<InstanceIdentifier<?>> emptySet();
+
+ for (InstanceIdentifier<?> entryKey : removeData) {
+ final InstanceIdentifier<FlowCapableNode> nodeIdent = entryKey
+ .firstIdentifierOf(FlowCapableNode.class);
+ if ( ! nodeIdent.isWildcarded()) {
+ flowNodeDisconnected(nodeIdent);
+ }
+ }
+ for (InstanceIdentifier<?> entryKey : createdData) {
+ final InstanceIdentifier<FlowCapableNode> nodeIdent = entryKey
+ .firstIdentifierOf(FlowCapableNode.class);
+ if ( ! nodeIdent.isWildcarded()) {
+ flowNodeConnected(nodeIdent);
+ }
+ }
+ }
+
+ @Override
+ public void flowNodeDisconnected(InstanceIdentifier<FlowCapableNode> disconnectedNode) {
+ provider.unregistrateNode(disconnectedNode);
+ }
+
+ @Override
+ public void flowNodeConnected(InstanceIdentifier<FlowCapableNode> connectedNode) {
+ if ( ! provider.isNodeActive(connectedNode)) {
+ provider.registrateNewNode(connectedNode);
+ reconciliation(connectedNode);
+ }
+ }
+
+ private void reconciliation(final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+
+ ReadOnlyTransaction trans = provider.getReadTranaction();
+ Optional<FlowCapableNode> flowNode = Optional.absent();
+
+ try {
+ flowNode = trans.read(LogicalDatastoreType.CONFIGURATION, nodeIdent).get();
+ }
+ catch (Exception e) {
+ LOG.error("Fail with read Config/DS for Node {} !", nodeIdent, e);
+ }
+
+ if (flowNode.isPresent()) {
+ /* Groups - have to be first */
+ List<Group> groups = flowNode.get().getGroup() != null
+ ? flowNode.get().getGroup() : Collections.<Group> emptyList();
+ for (Group group : groups) {
+ final KeyedInstanceIdentifier<Group, GroupKey> groupIdent =
+ nodeIdent.child(Group.class, group.getKey());
+ this.provider.getGroupCommiter().add(groupIdent, group, nodeIdent);
+ }
+ /* Meters */
+ List<Meter> meters = flowNode.get().getMeter() != null
+ ? flowNode.get().getMeter() : Collections.<Meter> emptyList();
+ for (Meter meter : meters) {
+ final KeyedInstanceIdentifier<Meter, MeterKey> meterIdent =
+ nodeIdent.child(Meter.class, meter.getKey());
+ this.provider.getMeterCommiter().add(meterIdent, meter, nodeIdent);
+ }
+ /* Flows */
+ List<Table> tables = flowNode.get().getTable() != null
+ ? flowNode.get().getTable() : Collections.<Table> emptyList();
+ for (Table table : tables) {
+ final KeyedInstanceIdentifier<Table, TableKey> tableIdent =
+ nodeIdent.child(Table.class, table.getKey());
+ List<Flow> flows = table.getFlow() != null ? table.getFlow() : Collections.<Flow> emptyList();
+ for (Flow flow : flows) {
+ final KeyedInstanceIdentifier<Flow, FlowKey> flowIdent =
+ tableIdent.child(Flow.class, flow.getKey());
+ this.provider.getFlowCommiter().add(flowIdent, flow, nodeIdent);
+ }
+ }
+ }
+ /* clean transaction */
+ trans.close();
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.frm.impl;
+
+import java.util.Collections;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.opendaylight.controller.frm.FlowNodeReconciliation;
+import org.opendaylight.controller.frm.ForwardingRulesCommiter;
+import org.opendaylight.controller.frm.ForwardingRulesManager;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.SalMeterService;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Sets;
+
+/**
+ * forwardingrules-manager
+ * org.opendaylight.controller.frm.impl
+ *
+ * Manager and middle point for whole module.
+ * It contains ActiveNodeHolder and provide all RPC services.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 25, 2014
+ */
+public class ForwardingRulesManagerImpl implements ForwardingRulesManager {
+
+ private static final Logger LOG = LoggerFactory.getLogger(ForwardingRulesManagerImpl.class);
+
+ private final AtomicLong txNum = new AtomicLong();
+ private final Object lockObj = new Object();
+ private Set<InstanceIdentifier<FlowCapableNode>> activeNodes = Collections.emptySet();
+
+ private final DataBroker dataService;
+ private final SalFlowService salFlowService;
+ private final SalGroupService salGroupService;
+ private final SalMeterService salMeterService;
+
+ private ForwardingRulesCommiter<Flow> flowListener;
+ private ForwardingRulesCommiter<Group> groupListener;
+ private ForwardingRulesCommiter<Meter> meterListener;
+ private FlowNodeReconciliation nodeListener;
+
+ public ForwardingRulesManagerImpl(final DataBroker dataBroker,
+ final RpcConsumerRegistry rpcRegistry) {
+ this.dataService = Preconditions.checkNotNull(dataBroker, "DataBroker can not be null!");
+
+ Preconditions.checkArgument(rpcRegistry != null, "RpcConsumerRegistry can not be null !");
+
+ this.salFlowService = Preconditions.checkNotNull(rpcRegistry.getRpcService(SalFlowService.class),
+ "RPC SalFlowService not found.");
+ this.salGroupService = Preconditions.checkNotNull(rpcRegistry.getRpcService(SalGroupService.class),
+ "RPC SalGroupService not found.");
+ this.salMeterService = Preconditions.checkNotNull(rpcRegistry.getRpcService(SalMeterService.class),
+ "RPC SalMeterService not found.");
+ }
+
+ @Override
+ public void start() {
+ this.flowListener = new FlowForwarder(this, dataService);
+ this.groupListener = new GroupForwarder(this, dataService);
+ this.meterListener = new MeterForwarder(this, dataService);
+ this.nodeListener = new FlowNodeReconciliationImpl(this, dataService);
+ LOG.info("ForwardingRulesManager has started successfull.");
+ }
+
+ @Override
+ public void close() throws Exception {
+ if(this.flowListener != null) {
+ this.flowListener.close();
+ this.flowListener = null;
+ }
+ if (this.groupListener != null) {
+ this.groupListener.close();
+ this.groupListener = null;
+ }
+ if (this.meterListener != null) {
+ this.meterListener.close();
+ this.meterListener = null;
+ }
+ if (this.nodeListener != null) {
+ this.nodeListener.close();
+ this.nodeListener = null;
+ }
+ }
+
+ @Override
+ public ReadOnlyTransaction getReadTranaction() {
+ return dataService.newReadOnlyTransaction();
+ }
+
+ @Override
+ public String getNewTransactionId() {
+ return "DOM-" + txNum.getAndIncrement();
+ }
+
+ @Override
+ public boolean isNodeActive(InstanceIdentifier<FlowCapableNode> ident) {
+ return activeNodes.contains(ident);
+ }
+
+ @Override
+ public void registrateNewNode(InstanceIdentifier<FlowCapableNode> ident) {
+ if ( ! activeNodes.contains(ident)) {
+ synchronized (lockObj) {
+ if ( ! activeNodes.contains(ident)) {
+ Set<InstanceIdentifier<FlowCapableNode>> set =
+ Sets.newHashSet(activeNodes);
+ set.add(ident);
+ activeNodes = Collections.unmodifiableSet(set);
+ }
+ }
+ }
+ }
+
+ @Override
+ public void unregistrateNode(InstanceIdentifier<FlowCapableNode> ident) {
+ if (activeNodes.contains(ident)) {
+ synchronized (lockObj) {
+ if (activeNodes.contains(ident)) {
+ Set<InstanceIdentifier<FlowCapableNode>> set =
+ Sets.newHashSet(activeNodes);
+ set.remove(ident);
+ activeNodes = Collections.unmodifiableSet(set);
+ }
+ }
+ }
+ }
+
+ @Override
+ public SalFlowService getSalFlowService() {
+ return salFlowService;
+ }
+
+ @Override
+ public SalGroupService getSalGroupService() {
+ return salGroupService;
+ }
+
+ @Override
+ public SalMeterService getSalMeterService() {
+ return salMeterService;
+ }
+
+ @Override
+ public ForwardingRulesCommiter<Flow> getFlowCommiter() {
+ return flowListener;
+ }
+
+ @Override
+ public ForwardingRulesCommiter<Group> getGroupCommiter() {
+ return groupListener;
+ }
+
+ @Override
+ public ForwardingRulesCommiter<Meter> getMeterCommiter() {
+ return meterListener;
+ }
+
+ @Override
+ public FlowNodeReconciliation getFlowNodeReconciliation() {
+ return nodeListener;
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.frm.impl;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.frm.ForwardingRulesManager;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.group.update.OriginalGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.group.update.UpdatedGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * GroupForwarder
+ * It implements {@link org.opendaylight.controller.md.sal.binding.api.DataChangeListener}}
+ * for WildCardedPath to {@link Group} and ForwardingRulesCommiter interface for methods:
+ * add, update and remove {@link Group} processing for
+ * {@link org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent}.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class GroupForwarder extends AbstractListeningCommiter<Group> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(GroupForwarder.class);
+
+ private ListenerRegistration<DataChangeListener> listenerRegistration;
+
+ public GroupForwarder (final ForwardingRulesManager manager, final DataBroker db) {
+ super(manager, Group.class);
+ Preconditions.checkNotNull(db, "DataBroker can not be null!");
+ this.listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
+ getWildCardPath(), GroupForwarder.this, DataChangeScope.SUBTREE);
+ }
+
+ @Override
+ public void close() {
+ if (listenerRegistration != null) {
+ try {
+ listenerRegistration.close();
+ } catch (Exception e) {
+ LOG.error("Error by stop FRM GroupChangeListener.", e);
+ }
+ listenerRegistration = null;
+ }
+ }
+
+ @Override
+ protected InstanceIdentifier<Group> getWildCardPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class)
+ .augmentation(FlowCapableNode.class).child(Group.class);
+ }
+
+ @Override
+ public void remove(final InstanceIdentifier<Group> identifier, final Group removeDataObj,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+
+ final Group group = (removeDataObj);
+ final RemoveGroupInputBuilder builder = new RemoveGroupInputBuilder(group);
+
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
+ builder.setGroupRef(new GroupRef(identifier));
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
+ this.provider.getSalGroupService().removeGroup(builder.build());
+ }
+
+ @Override
+ public void update(final InstanceIdentifier<Group> identifier,
+ final Group original, final Group update,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+
+ final Group originalGroup = (original);
+ final Group updatedGroup = (update);
+ final UpdateGroupInputBuilder builder = new UpdateGroupInputBuilder();
+
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
+ builder.setGroupRef(new GroupRef(identifier));
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
+ builder.setUpdatedGroup((new UpdatedGroupBuilder(updatedGroup)).build());
+ builder.setOriginalGroup((new OriginalGroupBuilder(originalGroup)).build());
+
+ this.provider.getSalGroupService().updateGroup(builder.build());
+ }
+
+ @Override
+ public void add(final InstanceIdentifier<Group> identifier, final Group addDataObj,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+
+ final Group group = (addDataObj);
+ final AddGroupInputBuilder builder = new AddGroupInputBuilder(group);
+
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
+ builder.setGroupRef(new GroupRef(identifier));
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
+ this.provider.getSalGroupService().addGroup(builder.build());
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.frm.impl;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.frm.ForwardingRulesManager;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.meter.update.OriginalMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.meter.update.UpdatedMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterRef;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * MeterForwarder
+ * It implements {@link org.opendaylight.controller.md.sal.binding.api.DataChangeListener}}
+ * for WildCardedPath to {@link Meter} and ForwardingRulesCommiter interface for methods:
+ * add, update and remove {@link Meter} processing for
+ * {@link org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent}.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class MeterForwarder extends AbstractListeningCommiter<Meter> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(MeterForwarder.class);
+
+ private ListenerRegistration<DataChangeListener> listenerRegistration;
+
+ public MeterForwarder (final ForwardingRulesManager manager, final DataBroker db) {
+ super(manager, Meter.class);
+ Preconditions.checkNotNull(db, "DataBroker can not be null!");
+ this.listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
+ getWildCardPath(), MeterForwarder.this, DataChangeScope.SUBTREE);
+ }
+
+ @Override
+ public void close() {
+ if (listenerRegistration != null) {
+ try {
+ listenerRegistration.close();
+ } catch (Exception e) {
+ LOG.error("Error by stop FRM MeterChangeListener.", e);
+ }
+ listenerRegistration = null;
+ }
+ }
+
+ @Override
+ protected InstanceIdentifier<Meter> getWildCardPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class)
+ .augmentation(FlowCapableNode.class).child(Meter.class);
+ }
+
+ @Override
+ public void remove(final InstanceIdentifier<Meter> identifier, final Meter removeDataObj,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+
+ final RemoveMeterInputBuilder builder = new RemoveMeterInputBuilder(removeDataObj);
+
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
+ builder.setMeterRef(new MeterRef(identifier));
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
+ this.provider.getSalMeterService().removeMeter(builder.build());
+ }
+
+ @Override
+ public void update(final InstanceIdentifier<Meter> identifier,
+ final Meter original, final Meter update,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+
+ final UpdateMeterInputBuilder builder = new UpdateMeterInputBuilder();
+
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
+ builder.setMeterRef(new MeterRef(identifier));
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
+ builder.setUpdatedMeter((new UpdatedMeterBuilder(update)).build());
+ builder.setOriginalMeter((new OriginalMeterBuilder(original)).build());
+
+ this.provider.getSalMeterService().updateMeter(builder.build());
+ }
+
+ @Override
+ public void add(final InstanceIdentifier<Meter> identifier, final Meter addDataObj,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+
+ final AddMeterInputBuilder builder = new AddMeterInputBuilder(addDataObj);
+
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
+ builder.setMeterRef(new MeterRef(identifier));
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
+ this.provider.getSalMeterService().addMeter(builder.build());
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock;
+
+import org.junit.Test;
+import org.opendaylight.controller.frm.impl.ForwardingRulesManagerImpl;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Dscp;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.IpMatch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.IpMatchBuilder;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import test.mock.util.FRMTest;
+import test.mock.util.RpcProviderRegistryMock;
+import test.mock.util.SalFlowServiceMock;
+
+import java.util.Collections;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+public class FlowListenerTest extends FRMTest {
+ RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
+ NodeKey s1Key = new NodeKey(new NodeId("S1"));
+ TableKey tableKey = new TableKey((short) 2);
+
+ @Test
+ public void addTwoFlowsTest() throws Exception {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ FlowKey flowKey = new FlowKey(new FlowId("test_Flow"));
+ InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey);
+ InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
+ Table table = new TableBuilder().setKey(tableKey).setFlow(Collections.<Flow>emptyList()).build();
+ Flow flow = new FlowBuilder().setKey(flowKey).setTableId((short) 2).build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, tableII, table);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
+ assertCommit(writeTx.submit());
+ SalFlowServiceMock salFlowService = (SalFlowServiceMock) forwardingRulesManager.getSalFlowService();
+ List<AddFlowInput> addFlowCalls = salFlowService.getAddFlowCalls();
+ assertEquals(1, addFlowCalls.size());
+ assertEquals("DOM-0", addFlowCalls.get(0).getTransactionUri().getValue());
+
+ flowKey = new FlowKey(new FlowId("test_Flow2"));
+ flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
+ flow = new FlowBuilder().setKey(flowKey).setTableId((short) 2).build();
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
+ assertCommit(writeTx.submit());
+ salFlowService = (SalFlowServiceMock) forwardingRulesManager.getSalFlowService();
+ addFlowCalls = salFlowService.getAddFlowCalls();
+ assertEquals(2, addFlowCalls.size());
+ assertEquals("DOM-1", addFlowCalls.get(1).getTransactionUri().getValue());
+ assertEquals(2, addFlowCalls.get(1).getTableId().intValue());
+ assertEquals(flowII, addFlowCalls.get(1).getFlowRef().getValue());
+
+ forwardingRulesManager.close();
+ }
+
+ @Test
+ public void updateFlowTest() throws Exception {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ FlowKey flowKey = new FlowKey(new FlowId("test_Flow"));
+ InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey);
+ InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
+ Table table = new TableBuilder().setKey(tableKey).setFlow(Collections.<Flow>emptyList()).build();
+ Flow flow = new FlowBuilder().setKey(flowKey).setTableId((short) 2).build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, tableII, table);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
+ assertCommit(writeTx.submit());
+ SalFlowServiceMock salFlowService = (SalFlowServiceMock) forwardingRulesManager.getSalFlowService();
+ List<AddFlowInput> addFlowCalls = salFlowService.getAddFlowCalls();
+ assertEquals(1, addFlowCalls.size());
+ assertEquals("DOM-0", addFlowCalls.get(0).getTransactionUri().getValue());
+
+ flowKey = new FlowKey(new FlowId("test_Flow"));
+ flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
+ flow = new FlowBuilder().setKey(flowKey).setTableId((short) 2).setOutGroup((long) 5).build();
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
+ assertCommit(writeTx.submit());
+ salFlowService = (SalFlowServiceMock) forwardingRulesManager.getSalFlowService();
+ List<UpdateFlowInput> updateFlowCalls = salFlowService.getUpdateFlowCalls();
+ assertEquals(1, updateFlowCalls.size());
+ assertEquals("DOM-1", updateFlowCalls.get(0).getTransactionUri().getValue());
+ assertEquals(flowII, updateFlowCalls.get(0).getFlowRef().getValue());
+ assertEquals(Boolean.TRUE, updateFlowCalls.get(0).getOriginalFlow().isStrict());
+ assertEquals(Boolean.TRUE, updateFlowCalls.get(0).getUpdatedFlow().isStrict());
+
+ forwardingRulesManager.close();
+ }
+
+ @Test
+ public void updateFlowScopeTest() throws Exception {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ FlowKey flowKey = new FlowKey(new FlowId("test_Flow"));
+ InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey);
+ InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
+ Table table = new TableBuilder().setKey(tableKey).setFlow(Collections.<Flow>emptyList()).build();
+ IpMatch ipMatch = new IpMatchBuilder().setIpDscp(new Dscp((short)4)).build();
+ Match match = new MatchBuilder().setIpMatch(ipMatch).build();
+ Flow flow = new FlowBuilder().setMatch(match).setKey(flowKey).setTableId((short) 2).build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, tableII, table);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
+ assertCommit(writeTx.submit());
+ SalFlowServiceMock salFlowService = (SalFlowServiceMock) forwardingRulesManager.getSalFlowService();
+ List<AddFlowInput> addFlowCalls = salFlowService.getAddFlowCalls();
+ assertEquals(1, addFlowCalls.size());
+ assertEquals("DOM-0", addFlowCalls.get(0).getTransactionUri().getValue());
+
+ flowKey = new FlowKey(new FlowId("test_Flow"));
+ flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
+ ipMatch = new IpMatchBuilder().setIpDscp(new Dscp((short)5)).build();
+ match = new MatchBuilder().setIpMatch(ipMatch).build();
+ flow = new FlowBuilder().setMatch(match).setKey(flowKey).setTableId((short) 2).build();
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
+ assertCommit(writeTx.submit());
+ salFlowService = (SalFlowServiceMock) forwardingRulesManager.getSalFlowService();
+ List<UpdateFlowInput> updateFlowCalls = salFlowService.getUpdateFlowCalls();
+ assertEquals(1, updateFlowCalls.size());
+ assertEquals("DOM-1", updateFlowCalls.get(0).getTransactionUri().getValue());
+ assertEquals(flowII, updateFlowCalls.get(0).getFlowRef().getValue());
+ assertEquals(ipMatch, updateFlowCalls.get(0).getUpdatedFlow().getMatch().getIpMatch());
+ forwardingRulesManager.close();
+ }
+
+ @Test
+ public void deleteFlowTest() throws Exception {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ FlowKey flowKey = new FlowKey(new FlowId("test_Flow"));
+ InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey);
+ InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
+ Table table = new TableBuilder().setKey(tableKey).setFlow(Collections.<Flow>emptyList()).build();
+ Flow flow = new FlowBuilder().setKey(flowKey).setTableId((short) 2).build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, tableII, table);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
+ assertCommit(writeTx.submit());
+ SalFlowServiceMock salFlowService = (SalFlowServiceMock) forwardingRulesManager.getSalFlowService();
+ List<AddFlowInput> addFlowCalls = salFlowService.getAddFlowCalls();
+ assertEquals(1, addFlowCalls.size());
+ assertEquals("DOM-0", addFlowCalls.get(0).getTransactionUri().getValue());
+
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, flowII);
+ assertCommit(writeTx.submit());
+ salFlowService = (SalFlowServiceMock) forwardingRulesManager.getSalFlowService();
+ List<RemoveFlowInput> removeFlowCalls = salFlowService.getRemoveFlowCalls();
+ assertEquals(1, removeFlowCalls.size());
+ assertEquals("DOM-1", removeFlowCalls.get(0).getTransactionUri().getValue());
+ assertEquals(flowII, removeFlowCalls.get(0).getFlowRef().getValue());
+ assertEquals(Boolean.TRUE, removeFlowCalls.get(0).isStrict());
+
+ forwardingRulesManager.close();
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock;
+
+import org.junit.Test;
+import org.opendaylight.controller.frm.impl.ForwardingRulesManagerImpl;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import test.mock.util.FRMTest;
+import test.mock.util.RpcProviderRegistryMock;
+import test.mock.util.SalGroupServiceMock;
+
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+public class GroupListenerTest extends FRMTest {
+ RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
+ NodeKey s1Key = new NodeKey(new NodeId("S1"));
+
+ @Test
+ public void addTwoGroupsTest() throws Exception {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ GroupKey groupKey = new GroupKey(new GroupId((long) 255));
+ InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Group.class, groupKey);
+ Group group = new GroupBuilder().setKey(groupKey).setGroupName("Group1").build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, groupII, group);
+ assertCommit(writeTx.submit());
+ SalGroupServiceMock salGroupService = (SalGroupServiceMock) forwardingRulesManager.getSalGroupService();
+ List<AddGroupInput> addGroupCalls = salGroupService.getAddGroupCalls();
+ assertEquals(1, addGroupCalls.size());
+ assertEquals("DOM-0", addGroupCalls.get(0).getTransactionUri().getValue());
+
+ groupKey = new GroupKey(new GroupId((long) 256));
+ groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Group.class, groupKey);
+ group = new GroupBuilder().setKey(groupKey).setGroupName("Group1").build();
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, groupII, group);
+ assertCommit(writeTx.submit());
+ salGroupService = (SalGroupServiceMock) forwardingRulesManager.getSalGroupService();
+ addGroupCalls = salGroupService.getAddGroupCalls();
+ assertEquals(2, addGroupCalls.size());
+ assertEquals("DOM-1", addGroupCalls.get(1).getTransactionUri().getValue());
+
+ forwardingRulesManager.close();
+ }
+
+ @Test
+ public void updateGroupTest() throws Exception {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ GroupKey groupKey = new GroupKey(new GroupId((long) 255));
+ InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Group.class, groupKey);
+ Group group = new GroupBuilder().setKey(groupKey).setGroupName("Group1").build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, groupII, group);
+ assertCommit(writeTx.submit());
+ SalGroupServiceMock salGroupService = (SalGroupServiceMock) forwardingRulesManager.getSalGroupService();
+ List<AddGroupInput> addGroupCalls = salGroupService.getAddGroupCalls();
+ assertEquals(1, addGroupCalls.size());
+ assertEquals("DOM-0", addGroupCalls.get(0).getTransactionUri().getValue());
+
+ group = new GroupBuilder().setKey(groupKey).setGroupName("Group2").build();
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, groupII, group);
+ assertCommit(writeTx.submit());
+ salGroupService = (SalGroupServiceMock) forwardingRulesManager.getSalGroupService();
+ List<UpdateGroupInput> updateGroupCalls = salGroupService.getUpdateGroupCalls();
+ assertEquals(1, updateGroupCalls.size());
+ assertEquals("DOM-1", updateGroupCalls.get(0).getTransactionUri().getValue());
+
+ forwardingRulesManager.close();
+ }
+
+ @Test
+ public void removeGroupTest() throws Exception {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ GroupKey groupKey = new GroupKey(new GroupId((long) 255));
+ InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Group.class, groupKey);
+ Group group = new GroupBuilder().setKey(groupKey).setGroupName("Group1").build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, groupII, group);
+ assertCommit(writeTx.submit());
+ SalGroupServiceMock salGroupService = (SalGroupServiceMock) forwardingRulesManager.getSalGroupService();
+ List<AddGroupInput> addGroupCalls = salGroupService.getAddGroupCalls();
+ assertEquals(1, addGroupCalls.size());
+ assertEquals("DOM-0", addGroupCalls.get(0).getTransactionUri().getValue());
+
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, groupII);
+ assertCommit(writeTx.submit());
+ salGroupService = (SalGroupServiceMock) forwardingRulesManager.getSalGroupService();
+ List<RemoveGroupInput> removeGroupCalls = salGroupService.getRemoveGroupCalls();
+ assertEquals(1, removeGroupCalls.size());
+ assertEquals("DOM-1", removeGroupCalls.get(0).getTransactionUri().getValue());
+
+ forwardingRulesManager.close();
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock;
+
+import org.junit.Test;
+import org.opendaylight.controller.frm.impl.ForwardingRulesManagerImpl;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import test.mock.util.FRMTest;
+import test.mock.util.RpcProviderRegistryMock;
+import test.mock.util.SalMeterServiceMock;
+
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+public class MeterListenerTest extends FRMTest {
+ RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
+ NodeKey s1Key = new NodeKey(new NodeId("S1"));
+
+ @Test
+ public void addTwoMetersTest() throws Exception {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ MeterKey meterKey = new MeterKey(new MeterId((long) 2000));
+ InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Meter.class, meterKey);
+ Meter meter = new MeterBuilder().setKey(meterKey).setMeterName("meter_one").build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, meterII, meter);
+ assertCommit(writeTx.submit());
+ SalMeterServiceMock salMeterService = (SalMeterServiceMock) forwardingRulesManager.getSalMeterService();
+ List<AddMeterInput> addMeterCalls = salMeterService.getAddMeterCalls();
+ assertEquals(1, addMeterCalls.size());
+ assertEquals("DOM-0", addMeterCalls.get(0).getTransactionUri().getValue());
+
+ meterKey = new MeterKey(new MeterId((long) 2001));
+ meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Meter.class, meterKey);
+ meter = new MeterBuilder().setKey(meterKey).setMeterName("meter_two").setBarrier(true).build();
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, meterII, meter);
+ assertCommit(writeTx.submit());
+ salMeterService = (SalMeterServiceMock) forwardingRulesManager.getSalMeterService();
+ addMeterCalls = salMeterService.getAddMeterCalls();
+ assertEquals(2, addMeterCalls.size());
+ assertEquals("DOM-1", addMeterCalls.get(1).getTransactionUri().getValue());
+ assertEquals(meterII, addMeterCalls.get(1).getMeterRef().getValue());
+
+ forwardingRulesManager.close();
+ }
+
+ @Test
+ public void updateMeterTest() throws Exception {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ MeterKey meterKey = new MeterKey(new MeterId((long) 2000));
+ InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Meter.class, meterKey);
+ Meter meter = new MeterBuilder().setKey(meterKey).setMeterName("meter_one").setBarrier(false).build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, meterII, meter);
+ assertCommit(writeTx.submit());
+ SalMeterServiceMock salMeterService = (SalMeterServiceMock) forwardingRulesManager.getSalMeterService();
+ List<AddMeterInput> addMeterCalls = salMeterService.getAddMeterCalls();
+ assertEquals(1, addMeterCalls.size());
+ assertEquals("DOM-0", addMeterCalls.get(0).getTransactionUri().getValue());
+
+ meter = new MeterBuilder().setKey(meterKey).setMeterName("meter_two").setBarrier(true).build();
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, meterII, meter);
+ assertCommit(writeTx.submit());
+ salMeterService = (SalMeterServiceMock) forwardingRulesManager.getSalMeterService();
+ List<UpdateMeterInput> updateMeterCalls = salMeterService.getUpdateMeterCalls();
+ assertEquals(1, updateMeterCalls.size());
+ assertEquals("DOM-1", updateMeterCalls.get(0).getTransactionUri().getValue());
+ assertEquals(meterII, updateMeterCalls.get(0).getMeterRef().getValue());
+
+ forwardingRulesManager.close();
+ }
+
+ @Test
+ public void removeMeterTest() throws Exception {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ MeterKey meterKey = new MeterKey(new MeterId((long) 2000));
+ InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Meter.class, meterKey);
+ Meter meter = new MeterBuilder().setKey(meterKey).setMeterName("meter_one").build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, meterII, meter);
+ assertCommit(writeTx.submit());
+ SalMeterServiceMock salMeterService = (SalMeterServiceMock) forwardingRulesManager.getSalMeterService();
+ List<AddMeterInput> addMeterCalls = salMeterService.getAddMeterCalls();
+ assertEquals(1, addMeterCalls.size());
+ assertEquals("DOM-0", addMeterCalls.get(0).getTransactionUri().getValue());
+
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, meterII);
+ assertCommit(writeTx.submit());
+ salMeterService = (SalMeterServiceMock) forwardingRulesManager.getSalMeterService();
+ List<RemoveMeterInput> removeMeterCalls = salMeterService.getRemoveMeterCalls();
+ assertEquals(1, removeMeterCalls.size());
+ assertEquals("DOM-1", removeMeterCalls.get(0).getTransactionUri().getValue());
+ assertEquals(meterII, removeMeterCalls.get(0).getMeterRef().getValue());
+
+ forwardingRulesManager.close();
+ }
+
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock;
+
+import org.junit.Test;
+import org.opendaylight.controller.frm.impl.ForwardingRulesManagerImpl;
+import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import test.mock.util.FRMTest;
+import test.mock.util.RpcProviderRegistryMock;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class NodeListenerTest extends FRMTest {
+
+ RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
+ NodeKey s1Key = new NodeKey(new NodeId("S1"));
+
+ @Test
+ public void addRemoveNodeTest() throws ExecutionException, InterruptedException {
+ ForwardingRulesManagerImpl forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock);
+ forwardingRulesManager.start();
+
+ addFlowCapableNode(s1Key);
+
+ InstanceIdentifier<FlowCapableNode> nodeII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class);
+
+ boolean nodeActive = forwardingRulesManager.isNodeActive(nodeII);
+ assertTrue(nodeActive);
+
+ removeNode(s1Key);
+
+ nodeActive = forwardingRulesManager.isNodeActive(nodeII);
+ assertFalse(nodeActive);
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+public class AbstractDataBrokerTest extends AbstractSchemaAwareTest {
+
+ private DataBrokerTestCustomizer testCustomizer;
+ private DataBroker dataBroker;
+ private DOMDataBroker domBroker;
+
+
+ @Override
+ protected void setupWithSchema(final SchemaContext context) {
+ testCustomizer = createDataBrokerTestCustomizer();
+ dataBroker = testCustomizer.createDataBroker();
+ domBroker = testCustomizer.createDOMDataBroker();
+ testCustomizer.updateSchema(context);
+ setupWithDataBroker(dataBroker);
+ }
+
+ protected void setupWithDataBroker(final DataBroker dataBroker) {
+ // Intentionally left No-op, subclasses may customize it
+ }
+
+ protected DataBrokerTestCustomizer createDataBrokerTestCustomizer() {
+ return new DataBrokerTestCustomizer();
+ }
+
+ public DataBroker getDataBroker() {
+ return dataBroker;
+ }
+
+ public DOMDataBroker getDomBroker() {
+ return domBroker;
+ }
+
+ protected static final void assertCommit(final ListenableFuture<Void> commit) {
+ try {
+ commit.get(500, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException | ExecutionException | TimeoutException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import org.junit.Before;
+import org.opendaylight.yangtools.sal.binding.generator.impl.ModuleInfoBackedContext;
+import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
+import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public abstract class AbstractSchemaAwareTest {
+
+ private Iterable<YangModuleInfo> moduleInfos;
+ private SchemaContext schemaContext;
+
+
+ protected Iterable<YangModuleInfo> getModuleInfos() {
+ return BindingReflections.loadModuleInfos();
+ }
+
+
+ @Before
+ public final void setup() {
+ moduleInfos = getModuleInfos();
+ ModuleInfoBackedContext moduleContext = ModuleInfoBackedContext.create();
+ moduleContext.addModuleInfos(moduleInfos);
+ schemaContext = moduleContext.tryToCreateSchemaContext().get();
+ setupWithSchema(schemaContext);
+ }
+
+ /**
+ * Setups test with Schema context.
+ * This method is called before {@link #setupWithSchemaService(SchemaService)}
+ *
+ * @param context
+ */
+ protected abstract void setupWithSchema(SchemaContext context);
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import javassist.ClassPool;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
+import org.opendaylight.controller.md.sal.binding.impl.ForwardedBackwardsCompatibleDataBroker;
+import org.opendaylight.controller.md.sal.binding.impl.ForwardedBindingDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.yangtools.binding.data.codec.gen.impl.DataObjectSerializerGenerator;
+import org.opendaylight.yangtools.binding.data.codec.gen.impl.StreamWriterGenerator;
+import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
+import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
+import org.opendaylight.yangtools.sal.binding.generator.impl.RuntimeGeneratedMappingServiceImpl;
+import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class DataBrokerTestCustomizer {
+
+ private DOMDataBroker domDataBroker;
+ private final RuntimeGeneratedMappingServiceImpl mappingService;
+ private final MockSchemaService schemaService;
+ private ImmutableMap<LogicalDatastoreType, DOMStore> datastores;
+ private final BindingToNormalizedNodeCodec bindingToNormalized ;
+
+ public ImmutableMap<LogicalDatastoreType, DOMStore> createDatastores() {
+ return ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
+ .put(LogicalDatastoreType.OPERATIONAL, createOperationalDatastore())
+ .put(LogicalDatastoreType.CONFIGURATION,createConfigurationDatastore())
+ .build();
+ }
+
+ public DataBrokerTestCustomizer() {
+ schemaService = new MockSchemaService();
+ ClassPool pool = ClassPool.getDefault();
+ mappingService = new RuntimeGeneratedMappingServiceImpl(pool);
+ DataObjectSerializerGenerator generator = StreamWriterGenerator.create(JavassistUtils.forClassPool(pool));
+ BindingNormalizedNodeCodecRegistry codecRegistry = new BindingNormalizedNodeCodecRegistry(generator);
+ GeneratedClassLoadingStrategy loading = GeneratedClassLoadingStrategy.getTCCLClassLoadingStrategy();
+ bindingToNormalized = new BindingToNormalizedNodeCodec(loading, mappingService, codecRegistry);
+ schemaService.registerSchemaContextListener(bindingToNormalized);
+ }
+
+ public DOMStore createConfigurationDatastore() {
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+ schemaService.registerSchemaContextListener(store);
+ return store;
+ }
+
+ public DOMStore createOperationalDatastore() {
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
+ schemaService.registerSchemaContextListener(store);
+ return store;
+ }
+
+ public DOMDataBroker createDOMDataBroker() {
+ return new SerializedDOMDataBroker(getDatastores(), getCommitCoordinatorExecutor());
+ }
+
+ public ListeningExecutorService getCommitCoordinatorExecutor() {
+ return MoreExecutors.sameThreadExecutor();
+ }
+
+ public DataBroker createDataBroker() {
+ return new ForwardedBindingDataBroker(getDOMDataBroker(), bindingToNormalized, schemaService );
+ }
+
+ public ForwardedBackwardsCompatibleDataBroker createBackwardsCompatibleDataBroker() {
+ return new ForwardedBackwardsCompatibleDataBroker(getDOMDataBroker(), bindingToNormalized, getSchemaService(), MoreExecutors.sameThreadExecutor());
+ }
+
+ private SchemaService getSchemaService() {
+ return schemaService;
+ }
+
+ private DOMDataBroker getDOMDataBroker() {
+ if(domDataBroker == null) {
+ domDataBroker = createDOMDataBroker();
+ }
+ return domDataBroker;
+ }
+
+ private synchronized ImmutableMap<LogicalDatastoreType, DOMStore> getDatastores() {
+ if (datastores == null) {
+ datastores = createDatastores();
+ }
+ return datastores;
+ }
+
+ public void updateSchema(final SchemaContext ctx) {
+ schemaService.changeSchema(ctx);
+ mappingService.onGlobalContextUpdated(ctx);
+ }
+
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import java.util.Collections;
+import java.util.concurrent.ExecutionException;
+
+public abstract class FRMTest extends AbstractDataBrokerTest{
+
+ public void addFlowCapableNode(NodeKey nodeKey) throws ExecutionException, InterruptedException {
+ Nodes nodes = new NodesBuilder().setNode(Collections.<Node>emptyList()).build();
+ InstanceIdentifier<Node> flowNodeIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, nodeKey);
+
+ FlowCapableNodeBuilder fcnBuilder = new FlowCapableNodeBuilder();
+ NodeBuilder nodeBuilder = new NodeBuilder();
+ nodeBuilder.setKey(nodeKey);
+ nodeBuilder.addAugmentation(FlowCapableNode.class, fcnBuilder.build());
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class), nodes);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, flowNodeIdentifier, nodeBuilder.build());
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, InstanceIdentifier.create(Nodes.class), nodes);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowNodeIdentifier, nodeBuilder.build());
+ assertCommit(writeTx.submit());
+ }
+
+ public void removeNode(NodeKey nodeKey) throws ExecutionException, InterruptedException {
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class).child(Node.class, nodeKey));
+ writeTx.submit().get();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.util.ListenerRegistry;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+
+public final class MockSchemaService implements SchemaService, SchemaContextProvider {
+
+ private SchemaContext schemaContext;
+
+ ListenerRegistry<SchemaContextListener> listeners = ListenerRegistry.create();
+
+ @Override
+ public void addModule(final Module module) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized SchemaContext getGlobalContext() {
+ return schemaContext;
+ }
+
+ @Override
+ public synchronized SchemaContext getSessionContext() {
+ return schemaContext;
+ }
+
+ @Override
+ public ListenerRegistration<SchemaContextListener> registerSchemaContextListener(
+ final SchemaContextListener listener) {
+ return listeners.register(listener);
+ }
+
+ @Override
+ public void removeModule(final Module module) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized SchemaContext getSchemaContext() {
+ return schemaContext;
+ }
+
+ public synchronized void changeSchema(final SchemaContext newContext) {
+ schemaContext = newContext;
+ for (ListenerRegistration<SchemaContextListener> listener : listeners) {
+ listener.getInstance().onGlobalContextUpdated(schemaContext);
+ }
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
+import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.SalMeterService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.RpcService;
+
+public class RpcProviderRegistryMock implements RpcProviderRegistry {
+ @Override
+ public <T extends RpcService> BindingAwareBroker.RpcRegistration<T> addRpcImplementation(Class<T> serviceInterface, T implementation) throws IllegalStateException {
+ return null;
+ }
+
+ @Override
+ public <T extends RpcService> BindingAwareBroker.RoutedRpcRegistration<T> addRoutedRpcImplementation(Class<T> serviceInterface, T implementation) throws IllegalStateException {
+ return null;
+ }
+
+ @Override
+ public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(L listener) {
+ return null;
+ }
+
+ @Override
+ public <T extends RpcService> T getRpcService(Class<T> serviceInterface) {
+ if (serviceInterface.equals(SalFlowService.class)) {
+ return (T) new SalFlowServiceMock();
+ } else if (serviceInterface.equals(SalGroupService.class)) {
+ return (T) new SalGroupServiceMock();
+ } else if (serviceInterface.equals(SalMeterService.class)) {
+ return (T) new SalMeterServiceMock();
+ } else {
+ return null;
+ }
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowOutput;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+
+public class SalFlowServiceMock implements SalFlowService{
+ private List<AddFlowInput> addFlowCalls = new ArrayList<>();
+ private List<RemoveFlowInput> removeFlowCalls = new ArrayList<>();
+ private List<UpdateFlowInput> updateFlowCalls = new ArrayList<>();
+
+ @Override
+ public Future<RpcResult<AddFlowOutput>> addFlow(AddFlowInput input) {
+ addFlowCalls.add(input);
+ return null;
+ }
+
+
+ @Override
+ public Future<RpcResult<RemoveFlowOutput>> removeFlow(RemoveFlowInput input) {
+ removeFlowCalls.add(input);
+ return null;
+ }
+
+ @Override
+ public Future<RpcResult<UpdateFlowOutput>> updateFlow(UpdateFlowInput input) {
+ updateFlowCalls.add(input);
+ return null;
+ }
+
+ public List<AddFlowInput> getAddFlowCalls() {
+ return addFlowCalls;
+ }
+
+ public List<RemoveFlowInput> getRemoveFlowCalls() {
+ return removeFlowCalls;
+ }
+
+ public List<UpdateFlowInput> getUpdateFlowCalls() {
+ return updateFlowCalls;
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupOutput;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+
+public class SalGroupServiceMock implements SalGroupService {
+ private List<AddGroupInput> addGroupCalls = new ArrayList<>();
+ private List<RemoveGroupInput> removeGroupCalls = new ArrayList<>();
+ private List<UpdateGroupInput> updateGroupCalls = new ArrayList<>();
+
+ @Override
+ public Future<RpcResult<AddGroupOutput>> addGroup(AddGroupInput input) {
+ addGroupCalls.add(input);
+ return null;
+ }
+
+ @Override
+ public Future<RpcResult<RemoveGroupOutput>> removeGroup(RemoveGroupInput input) {
+ removeGroupCalls.add(input);
+ return null;
+ }
+
+ @Override
+ public Future<RpcResult<UpdateGroupOutput>> updateGroup(UpdateGroupInput input) {
+ updateGroupCalls.add(input);
+ return null;
+ }
+
+ public List<AddGroupInput> getAddGroupCalls() {
+ return addGroupCalls;
+ }
+
+ public List<RemoveGroupInput> getRemoveGroupCalls() {
+ return removeGroupCalls;
+ }
+
+ public List<UpdateGroupInput> getUpdateGroupCalls() {
+ return updateGroupCalls;
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.SalMeterService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterOutput;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+
+public class SalMeterServiceMock implements SalMeterService {
+ private List<AddMeterInput> addMeterCalls = new ArrayList<>();
+ private List<RemoveMeterInput> removeMeterCalls = new ArrayList<>();
+ private List<UpdateMeterInput> updateMeterCalls = new ArrayList<>();
+
+ @Override
+ public Future<RpcResult<AddMeterOutput>> addMeter(AddMeterInput input) {
+ addMeterCalls.add(input);
+ return null;
+ }
+
+ @Override
+ public Future<RpcResult<RemoveMeterOutput>> removeMeter(RemoveMeterInput input) {
+ removeMeterCalls.add(input);
+ return null;
+ }
+
+ @Override
+ public Future<RpcResult<UpdateMeterOutput>> updateMeter(UpdateMeterInput input) {
+ updateMeterCalls.add(input);
+ return null;
+ }
+
+ public List<AddMeterInput> getAddMeterCalls() {
+ return addMeterCalls;
+ }
+
+ public List<RemoveMeterInput> getRemoveMeterCalls() {
+ return removeMeterCalls;
+ }
+
+ public List<UpdateMeterInput> getUpdateMeterCalls() {
+ return updateMeterCalls;
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>applications</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+ <groupId>org.opendaylight.openflowplugin.applications</groupId>
+ <artifactId>inventory-manager</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-service</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-inventory</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.core</artifactId>
+ <scope>provided</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Bundle-Activator>org.opendaylight.controller.md.inventory.manager.InventoryActivator</Bundle-Activator>
+ <Private-Package>org.opendaylight.controller.md.inventory.manager</Private-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+ </scm>
+</project>
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.inventory.manager;
+
+import java.util.ArrayList;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingDeque;
+
+import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+class FlowCapableInventoryProvider implements AutoCloseable, Runnable, TransactionChainListener {
+ private static final Logger LOG = LoggerFactory.getLogger(FlowCapableInventoryProvider.class);
+ private static final int QUEUE_DEPTH = 500;
+ private static final int MAX_BATCH = 100;
+
+ private final BlockingQueue<InventoryOperation> queue = new LinkedBlockingDeque<>(QUEUE_DEPTH);
+ private final NotificationProviderService notificationService;
+
+ private final DataBroker dataBroker;
+ private BindingTransactionChain txChain;
+ private ListenerRegistration<?> listenerRegistration;
+ private Thread thread;
+
+ FlowCapableInventoryProvider(final DataBroker dataBroker, final NotificationProviderService notificationService) {
+ this.dataBroker = Preconditions.checkNotNull(dataBroker);
+ this.notificationService = Preconditions.checkNotNull(notificationService);
+ }
+
+ void start() {
+ final NodeChangeCommiter changeCommiter = new NodeChangeCommiter(FlowCapableInventoryProvider.this);
+ this.listenerRegistration = this.notificationService.registerNotificationListener(changeCommiter);
+
+ this.txChain = (dataBroker.createTransactionChain(this));
+ thread = new Thread(this);
+ thread.setDaemon(true);
+ thread.setName("FlowCapableInventoryProvider");
+ thread.start();
+
+ LOG.info("Flow Capable Inventory Provider started.");
+ }
+
+ void enqueue(final InventoryOperation op) {
+ try {
+ queue.put(op);
+ } catch (final InterruptedException e) {
+ LOG.warn("Failed to enqueue operation {}", op, e);
+ }
+ }
+
+ @Override
+ public void run() {
+ try {
+ for (; ; ) {
+ InventoryOperation op = queue.take();
+ int ops = 0;
+ final ArrayList<InventoryOperation> opsToApply = new ArrayList<>(MAX_BATCH);
+ do {
+ opsToApply.add(op);
+ ops++;
+ if (ops < MAX_BATCH) {
+ op = queue.poll();
+ } else {
+ op = null;
+ }
+ } while (op != null);
+ submitOperations(opsToApply);
+ }
+ } catch (final InterruptedException e) {
+ LOG.info("Processing interrupted, terminating", e);
+ }
+
+ // Drain all events, making sure any blocked threads are unblocked
+ while (!queue.isEmpty()) {
+ queue.poll();
+ }
+ }
+
+ /**
+ * Starts new empty transaction, custimizes it with submitted operations
+ * and submit it to data broker.
+ *
+ * If transaction chain failed during customization of transaction
+ * it allocates new chain and empty transaction and customizes it
+ * with submitted operations.
+ *
+ * This does not retry failed transaction. It only retries it when
+ * chain failed during customization of transaction chain.
+ *
+ * @param opsToApply
+ */
+ private void submitOperations(final ArrayList<InventoryOperation> opsToApply) {
+ final ReadWriteTransaction tx = createCustomizedTransaction(opsToApply);
+ LOG.debug("Processed {} operations, submitting transaction {}", opsToApply.size(), tx.getIdentifier());
+ try {
+ tx.submit();
+ } catch (final IllegalStateException e) {
+ /*
+ * Transaction chain failed during doing batch, so we need to null
+ * tx chain and continue processing queue.
+ *
+ * We fail current txChain which was allocated with createTransaction.
+ */
+ failCurrentChain(txChain);
+ /*
+ * We will retry transaction once in order to not loose any data.
+ *
+ */
+ final ReadWriteTransaction retryTx = createCustomizedTransaction(opsToApply);
+ retryTx.submit();
+ }
+ }
+
+ /**
+ * Creates new empty ReadWriteTransaction. If transaction chain
+ * was failed, it will allocate new transaction chain
+ * and assign it with this Operation Executor.
+ *
+ * This call is synchronized to prevent reace with {@link #failCurrentChain(TransactionChain)}.
+ *
+ * @return New Empty ReadWrite transaction, which continues this chain or starts new transaction
+ * chain.
+ */
+ private synchronized ReadWriteTransaction newEmptyTransaction() {
+ try {
+ if(txChain == null) {
+ // Chain was broken so we need to replace it.
+ txChain = dataBroker.createTransactionChain(this);
+ }
+ return txChain.newReadWriteTransaction();
+ } catch (final IllegalStateException e) {
+ LOG.debug("Chain is broken, need to allocate new transaction chain.",e);
+ /*
+ * Chain was broken by previous transaction,
+ * but there was race between this.
+ * Chain will be closed by #onTransactionChainFailed method.
+ */
+ txChain = dataBroker.createTransactionChain(this);
+ return txChain.newReadWriteTransaction();
+ }
+ }
+
+ /**
+ * Creates customized not-submitted transaction, which is ready to be submitted.
+ *
+ * @param opsToApply Operations which are used to customize transaction.
+ * @return Non-empty transaction.
+ */
+ private ReadWriteTransaction createCustomizedTransaction(final ArrayList<InventoryOperation> opsToApply) {
+ final ReadWriteTransaction tx = newEmptyTransaction();
+ for(final InventoryOperation op : opsToApply) {
+ op.applyOperation(tx);
+ }
+ return tx;
+ }
+
+ private synchronized void failCurrentChain(final TransactionChain<?, ?> chain) {
+ if(txChain == chain) {
+ txChain = null;
+ }
+ }
+
+ @Override
+ public void onTransactionChainFailed(final TransactionChain<?, ?> chain, final AsyncTransaction<?, ?> transaction,
+ final Throwable cause) {
+ LOG.error("Failed to export Flow Capable Inventory, Transaction {} failed.", transaction.getIdentifier(), cause);
+ chain.close();
+ if(txChain == chain) {
+ // Current chain is broken, so we will null it, in order to not use it.
+ failCurrentChain(chain);
+ }
+ }
+
+ @Override
+ public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
+ // NOOP
+ }
+
+ @Override
+ public void close() throws InterruptedException {
+ LOG.info("Flow Capable Inventory Provider stopped.");
+ if (this.listenerRegistration != null) {
+ try {
+ this.listenerRegistration.close();
+ } catch (final Exception e) {
+ LOG.error("Failed to stop inventory provider", e);
+ }
+ listenerRegistration = null;
+ }
+
+ if (thread != null) {
+ thread.interrupt();
+ thread.join();
+ thread = null;
+ }
+ if (txChain != null) {
+ try {
+ txChain.close();
+ } catch (final IllegalStateException e) {
+ // It is possible chain failed and was closed by #onTransactionChainFailed
+ LOG.debug("Chain was already closed.");
+ }
+ txChain = null;
+ }
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.inventory.manager;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.sal.binding.api.AbstractBindingAwareProvider;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.osgi.framework.BundleContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class InventoryActivator extends AbstractBindingAwareProvider {
+ private static final Logger LOG = LoggerFactory.getLogger(InventoryActivator.class);
+ private FlowCapableInventoryProvider provider;
+
+ @Override
+ public void onSessionInitiated(final ProviderContext session) {
+ DataBroker dataBroker = session.getSALService(DataBroker.class);
+ NotificationProviderService salNotifiService =
+ session.getSALService(NotificationProviderService.class);
+
+ provider = new FlowCapableInventoryProvider(dataBroker, salNotifiService);
+ provider.start();
+ }
+
+ @Override
+ protected void stopImpl(final BundleContext context) {
+ if (provider != null) {
+ try {
+ provider.close();
+ } catch (InterruptedException e) {
+ LOG.warn("Interrupted while waiting for shutdown", e);
+ }
+ provider = null;
+ }
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.inventory.manager;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowNodeConnector;
+
+public class InventoryMapping {
+
+ public static FlowCapableNodeConnector toInventoryAugment(final FlowNodeConnector updated) {
+ if ((updated instanceof FlowCapableNodeConnector)) {
+ return ((FlowCapableNodeConnector) updated);
+ }
+ final FlowCapableNodeConnectorBuilder builder = new FlowCapableNodeConnectorBuilder();
+ builder.setAdvertisedFeatures(updated.getAdvertisedFeatures());
+ builder.setConfiguration(updated.getConfiguration());
+ builder.setCurrentFeature(updated.getCurrentFeature());
+ builder.setCurrentSpeed(updated.getCurrentSpeed());
+ builder.setHardwareAddress(updated.getHardwareAddress());
+ builder.setMaximumSpeed(updated.getMaximumSpeed());
+ builder.setName(updated.getName());
+ builder.setPeerFeatures(updated.getPeerFeatures());
+ builder.setPortNumber(updated.getPortNumber());
+ builder.setState(updated.getState());
+ builder.setSupported(updated.getSupported());
+ return builder.build();
+ }
+
+ public static FlowCapableNode toInventoryAugment(final FlowNode source) {
+ if ((source instanceof FlowCapableNode)) {
+ return ((FlowCapableNode) source);
+ }
+ return (new FlowCapableNodeBuilder(source)).build();
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.inventory.manager;
+
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+
+interface InventoryOperation {
+
+ void applyOperation(ReadWriteTransaction tx);
+
+}
--- /dev/null
+/**
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.inventory.manager;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.OpendaylightInventoryListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.InstanceIdentifierBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class NodeChangeCommiter implements OpendaylightInventoryListener {
+ private static final Logger LOG = LoggerFactory.getLogger(NodeChangeCommiter.class);
+
+ private final FlowCapableInventoryProvider manager;
+
+ public NodeChangeCommiter(final FlowCapableInventoryProvider manager) {
+ this.manager = Preconditions.checkNotNull(manager);
+ }
+
+ @Override
+ public synchronized void onNodeConnectorRemoved(final NodeConnectorRemoved connector) {
+ LOG.debug("Node connector removed notification received.");
+ manager.enqueue(new InventoryOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ final NodeConnectorRef ref = connector.getNodeConnectorRef();
+ LOG.debug("removing node connector {} ", ref.getValue());
+ tx.delete(LogicalDatastoreType.OPERATIONAL, ref.getValue());
+ }
+ });
+ }
+
+ @Override
+ public synchronized void onNodeConnectorUpdated(final NodeConnectorUpdated connector) {
+ LOG.debug("Node connector updated notification received.");
+ manager.enqueue(new InventoryOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ final NodeConnectorRef ref = connector.getNodeConnectorRef();
+ final NodeConnectorBuilder data = new NodeConnectorBuilder(connector);
+ data.setKey(new NodeConnectorKey(connector.getId()));
+
+ final FlowCapableNodeConnectorUpdated flowConnector = connector
+ .getAugmentation(FlowCapableNodeConnectorUpdated.class);
+ if (flowConnector != null) {
+ final FlowCapableNodeConnector augment = InventoryMapping.toInventoryAugment(flowConnector);
+ data.addAugmentation(FlowCapableNodeConnector.class, augment);
+ }
+ InstanceIdentifier<NodeConnector> value = (InstanceIdentifier<NodeConnector>) ref.getValue();
+ LOG.debug("updating node connector : {}.", value);
+ NodeConnector build = data.build();
+ tx.merge(LogicalDatastoreType.OPERATIONAL, value, build, true);
+ }
+ });
+ }
+
+ @Override
+ public synchronized void onNodeRemoved(final NodeRemoved node) {
+ LOG.debug("Node removed notification received.");
+ manager.enqueue(new InventoryOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ final NodeRef ref = node.getNodeRef();
+ LOG.debug("removing node : {}", ref.getValue());
+ tx.delete(LogicalDatastoreType.OPERATIONAL, ref.getValue());
+ }
+ });
+ }
+
+ @Override
+ public synchronized void onNodeUpdated(final NodeUpdated node) {
+ final FlowCapableNodeUpdated flowNode = node.getAugmentation(FlowCapableNodeUpdated.class);
+ if (flowNode == null) {
+ return;
+ }
+ LOG.debug("Node updated notification received.");
+ manager.enqueue(new InventoryOperation() {
+ @Override
+ public void applyOperation(ReadWriteTransaction tx) {
+ final NodeRef ref = node.getNodeRef();
+ @SuppressWarnings("unchecked")
+ InstanceIdentifierBuilder<Node> builder = ((InstanceIdentifier<Node>) ref.getValue()).builder();
+ InstanceIdentifierBuilder<FlowCapableNode> augmentation = builder.augmentation(FlowCapableNode.class);
+ final InstanceIdentifier<FlowCapableNode> path = augmentation.build();
+ CheckedFuture<Optional<FlowCapableNode>, ?> readFuture = tx.read(LogicalDatastoreType.OPERATIONAL, path);
+ Futures.addCallback(readFuture, new FutureCallback<Optional<FlowCapableNode>>() {
+ @Override
+ public void onSuccess(Optional<FlowCapableNode> optional) {
+ enqueueWriteNodeDataTx(node, flowNode, path);
+ if (!optional.isPresent()) {
+ enqueuePutTable0Tx(ref);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable throwable) {
+ LOG.debug(String.format("Can't retrieve node data for node %s. Writing node data with table0.", node));
+ enqueueWriteNodeDataTx(node, flowNode, path);
+ enqueuePutTable0Tx(ref);
+ }
+ });
+ }
+ });
+ }
+
+ private void enqueueWriteNodeDataTx(final NodeUpdated node, final FlowCapableNodeUpdated flowNode, final InstanceIdentifier<FlowCapableNode> path) {
+ manager.enqueue(new InventoryOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ final FlowCapableNode augment = InventoryMapping.toInventoryAugment(flowNode);
+ LOG.debug("updating node :{} ", path);
+ tx.merge(LogicalDatastoreType.OPERATIONAL, path, augment, true);
+ }
+ });
+ }
+
+ private void enqueuePutTable0Tx(final NodeRef ref) {
+ manager.enqueue(new InventoryOperation() {
+ @Override
+ public void applyOperation(ReadWriteTransaction tx) {
+ final TableKey tKey = new TableKey((short) 0);
+ final InstanceIdentifier<Table> tableIdentifier =
+ ((InstanceIdentifier<Node>) ref.getValue()).augmentation(FlowCapableNode.class).child(Table.class, new TableKey(tKey));
+ TableBuilder tableBuilder = new TableBuilder();
+ Table table0 = tableBuilder.setId((short) 0).build();
+ LOG.debug("writing table :{} ", tableIdentifier);
+ tx.put(LogicalDatastoreType.OPERATIONAL, tableIdentifier, table0, true);
+ }
+ });
+ }
+}
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>lldp-speaker</artifactId>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>flow-model-parent</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>model-flow-base</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>model-inventory</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>opendaylight-l2-types</artifactId>
+ </dependency>
+ </dependencies>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+ </scm>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.CommonPort.PortNumber;
+
+
+public class PortNumberBuilder {
+
+ public static PortNumber getDefaultInstance(java.lang.String defaultValue) {
+ try {
+ long uint32 = Long.parseLong(defaultValue);
+ return new PortNumber(uint32);
+ } catch(NumberFormatException e){
+ return new PortNumber(defaultValue);
+ }
+ }
+
+}
--- /dev/null
+module opendaylight-action-types {
+ namespace "urn:opendaylight:action:types";
+ prefix action;
+
+ import ietf-inet-types {prefix inet; revision-date "2010-09-24";}
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
+ import opendaylight-l2-types {prefix l2t; revision-date "2013-08-27";}
+ import opendaylight-match-types {prefix match; revision-date "2013-10-26";}
+
+ revision "2013-11-12" {
+ description "Initial revision of action service";
+ }
+
+ typedef vlan-cfi {
+ type int32;
+ }
+
+ grouping address {
+ choice address {
+ case ipv4 {
+ leaf ipv4-address {
+ type inet:ipv4-prefix;
+ }
+ }
+ case ipv6 {
+ leaf ipv6-address {
+ type inet:ipv6-prefix;
+ }
+ }
+ }
+ }
+
+ container action-types {
+ uses action-list;
+ }
+
+ grouping ordered {
+ leaf order {
+ type int32;
+ }
+ }
+
+ grouping action-list {
+ list action {
+ key "order";
+ uses ordered;
+ uses action;
+ }
+ }
+
+ grouping action {
+ choice action {
+ case output-action-case {
+ container output-action {
+ leaf output-node-connector {
+ type inet:uri;
+ }
+
+ leaf max-length {
+ type uint16;
+ }
+ }
+ }
+
+ case controller-action-case {
+ container controller-action {
+ leaf max-length {
+ type uint16;
+ }
+ }
+ }
+
+
+
+ case set-field-case {
+ container set-field {
+ uses match:match;
+ }
+ }
+
+
+ case set-queue-action-case {
+ container set-queue-action {
+ leaf queue {
+ type string;
+ }
+ leaf queue-id {
+ type uint32;
+ }
+ }
+ }
+
+ case pop-mpls-action-case {
+ container pop-mpls-action {
+ leaf ethernet-type {
+ type uint16; // TODO: define ethertype type
+ }
+ }
+ }
+
+
+ case set-mpls-ttl-action-case {
+ container set-mpls-ttl-action {
+ leaf mpls-ttl {
+ type uint8;
+ }
+ }
+ }
+
+
+ case set-nw-ttl-action-case {
+ container set-nw-ttl-action {
+ leaf nw-ttl {
+ type uint8;
+ }
+ }
+ }
+
+
+ case push-pbb-action-case {
+ container push-pbb-action {
+ leaf ethernet-type {
+ type uint16; // TODO: define ethertype type
+ }
+ }
+ }
+
+
+ case pop-pbb-action-case {
+ container pop-pbb-action {
+
+ }
+ }
+
+
+ case push-mpls-action-case {
+ container push-mpls-action {
+ leaf ethernet-type {
+ type uint16; // TODO: define ethertype type
+ }
+ }
+ }
+
+
+ case dec-mpls-ttl-case {
+ container dec-mpls-ttl {
+ }
+ }
+
+
+ case dec-nw-ttl-case {
+ container dec-nw-ttl {
+ }
+ }
+
+
+ case drop-action-case {
+ container drop-action {
+ }
+ }
+
+
+ case flood-action-case {
+ container flood-action {
+ }
+ }
+
+
+ case flood-all-action-case {
+ container flood-all-action {
+ }
+ }
+
+
+ case hw-path-action-case {
+ container hw-path-action {
+ }
+ }
+
+
+ case loopback-action-case {
+ container loopback-action {
+ }
+ }
+
+
+ case pop-vlan-action-case {
+ container pop-vlan-action {
+ }
+ }
+
+
+
+ case push-vlan-action-case {
+ container push-vlan-action {
+ leaf ethernet-type {
+ type uint16; // TODO: define ethertype type
+ }
+ leaf tag { // TPID - 16 bits
+ type int32;
+ }
+ leaf pcp { // PCP - 3 bits
+ type int32;
+ }
+ leaf cfi { // CFI - 1 bit (drop eligible)
+ type vlan-cfi;
+ }
+ leaf vlan-id { // VID - 12 bits
+ type l2t:vlan-id;
+ }
+// leaf tci { //TCI = [PCP + CFI + VID]
+// }
+// leaf header { //header = [TPID + TCI]
+// }
+ }
+ }
+
+ case copy-ttl-out-case {
+ container copy-ttl-out {
+ }
+ }
+
+
+ case copy-ttl-in-case {
+ container copy-ttl-in {
+ }
+ }
+
+
+ case set-dl-dst-action-case {
+ container set-dl-dst-action {
+ leaf address {
+ type yang:mac-address;
+ }
+ }
+ }
+
+
+ case set-dl-src-action-case {
+ container set-dl-src-action {
+ leaf address {
+ type yang:mac-address;
+ }
+ }
+
+ }
+ case group-action-case {
+ container group-action {
+ leaf group {
+ type string;
+ }
+
+ leaf group-id {
+ type uint32;
+ }
+ }
+ }
+
+ case set-dl-type-action-case {
+ container set-dl-type-action {
+ leaf dl-type {
+ type l2t:ether-type;
+ }
+ }
+ }
+
+
+ case set-next-hop-action-case {
+ container set-next-hop-action {
+ uses address;
+ }
+ }
+
+
+ case set-nw-dst-action-case {
+ container set-nw-dst-action {
+ uses address;
+ }
+ }
+
+
+ case set-nw-src-action-case {
+ container set-nw-src-action {
+ uses address;
+ }
+ }
+
+
+ case set-nw-tos-action-case {
+ container set-nw-tos-action {
+ leaf tos {
+ type int32;
+ }
+ }
+ }
+
+
+ case set-tp-dst-action-case {
+ container set-tp-dst-action {
+ leaf port {
+ type inet:port-number;
+ }
+ }
+
+ }
+ case set-tp-src-action-case {
+ container set-tp-src-action {
+ leaf port {
+ type inet:port-number;
+ }
+ }
+
+ }
+ case set-vlan-cfi-action-case {
+ container set-vlan-cfi-action {
+ leaf vlan-cfi {
+ type vlan-cfi;
+ }
+ }
+ }
+
+
+ case set-vlan-id-action-case {
+ container set-vlan-id-action {
+ leaf vlan-id {
+ type l2t:vlan-id;
+ }
+ }
+ }
+
+
+ case set-vlan-pcp-action-case {
+ container set-vlan-pcp-action {
+ leaf vlan-pcp {
+ type l2t:vlan-pcp;
+ }
+ }
+ }
+
+ case strip-vlan-action-case {
+ container strip-vlan-action {
+ }
+ }
+
+ case sw-path-action-case {
+ container sw-path-action {
+ }
+ }
+ }
+ }
+}
--- /dev/null
+module opendaylight-flow-types {
+ namespace "urn:opendaylight:flow:types";
+ prefix flow;
+
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
+ import opendaylight-match-types {prefix match; revision-date "2013-10-26";}
+ import opendaylight-action-types {prefix action;}
+ import opendaylight-meter-types {prefix meter; revision-date "2013-09-18";}
+
+ revision "2013-10-26" {
+ description "Initial revision of flow service";
+ }
+
+ typedef flow-ref {
+ type instance-identifier;
+ }
+
+ typedef flow-cookie {
+ description "openflow specific type - flow cookie / flow cookie mask";
+ type uint64;
+ }
+
+ typedef output-port-values {
+ type enumeration {
+ enum MAX {
+ value 1;
+ }
+ enum IN_PORT {
+ value 2;
+ }
+ enum TABLE {
+ value 3;
+ }
+ enum NORMAL {
+ value 4;
+ }
+ enum FLOOD {
+ value 5;
+ }
+ enum ALL {
+ value 6;
+ }
+ enum CONTROLLER {
+ value 7;
+ }
+ enum LOCAL {
+ value 8;
+ }
+ enum ANY {
+ value 9;
+ }
+ enum NONE {
+ value 10;
+ }
+
+ }
+ }
+ grouping instruction-list {
+ list instruction {
+ key "order";
+ uses action:ordered;
+ uses instruction;
+ }
+ }
+
+ grouping instruction {
+ choice instruction {
+ case go-to-table-case {
+ container go-to-table {
+ leaf table_id {
+ type uint8;
+ }
+ }
+ }
+
+ case write-metadata-case {
+ container write-metadata {
+ leaf metadata {
+ type uint64;
+ }
+
+ leaf metadata-mask {
+ type uint64;
+ }
+ }
+ }
+
+ case write-actions-case {
+ container write-actions {
+ uses action:action-list;
+ }
+ }
+
+ case apply-actions-case {
+ container apply-actions {
+ uses action:action-list;
+ }
+ }
+
+ case clear-actions-case {
+ container clear-actions {
+ uses action:action-list;
+ }
+ }
+
+ case meter-case {
+ container meter {
+ leaf meter-id {
+ type meter:meter-id;
+ }
+ }
+ }
+ }
+ }
+
+ typedef flow-mod-flags {
+ type bits {
+ bit CHECK_OVERLAP;
+ bit RESET_COUNTS;
+ bit NO_PKT_COUNTS;
+ bit NO_BYT_COUNTS;
+ bit SEND_FLOW_REM;
+ }
+ }
+
+ typedef removed_reason_flags {
+ type bits {
+ bit IDLE_TIMEOUT;
+ bit HARD_TIMEOUT;
+ bit DELETE;
+ bit GROUP_DELETE;
+ }
+ }
+
+ grouping generic_flow_attributes {
+ leaf priority {
+ type uint16;
+ }
+
+ leaf idle-timeout {
+ type uint16;
+ }
+
+ leaf hard-timeout {
+ type uint16;
+ }
+
+ leaf cookie {
+ type flow-cookie;
+ }
+
+ leaf table_id {
+ type uint8;
+ }
+ }
+
+ grouping flow {
+ container match {
+ uses match:match;
+ }
+
+ container instructions {
+ uses instruction-list;
+ }
+
+ uses generic_flow_attributes;
+
+ leaf container-name {
+ type string;
+ }
+
+ leaf cookie_mask {
+ type flow-cookie;
+ }
+
+ leaf buffer_id {
+ type uint32;
+ }
+
+ leaf out_port {
+ type uint64;
+ }
+
+ leaf out_group {
+ type uint32;
+ }
+
+ leaf flags {
+ type flow-mod-flags;
+ }
+
+ leaf flow-name {
+ type string;
+ }
+
+ leaf installHw {
+ type boolean;
+ }
+
+ leaf barrier {
+ type boolean;
+ }
+
+ leaf strict {
+ type boolean;
+ default "false";
+ }
+
+ }
+
+ grouping flow-statistics {
+ leaf packet-count {
+ type yang:counter64;
+ }
+
+ leaf byte-count {
+ type yang:counter64;
+ }
+
+ container duration {
+ leaf second {
+ type yang:counter64;
+ }
+ leaf nanosecond {
+ type yang:counter64;
+ }
+ }
+ }
+
+ grouping flow-table-statistics {
+ leaf active {
+ type yang:counter64;
+ }
+
+ leaf lookup {
+ type yang:counter64;
+ }
+
+ leaf matched {
+ type yang:counter64;
+ }
+ }
+
+ grouping flow-mod-removed {
+ uses generic_flow_attributes;
+
+ leaf removed_reason {
+ type removed_reason_flags;
+ }
+
+ leaf duration_nsec {
+ type uint32;
+ }
+
+ leaf duration_sec {
+ type uint32;
+ }
+
+ leaf packet_count {
+ type uint64;
+ }
+
+ leaf byte_count {
+ type uint64;
+ }
+
+ container match {
+ uses match:match;
+ }
+ }
+}
--- /dev/null
+module opendaylight-group-types {
+ namespace "urn:opendaylight:group:types";
+ prefix group;
+
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
+ import opendaylight-action-types {prefix action;}
+
+ revision "2013-10-18" {
+ description "Initial revision of group service";
+ }
+
+ typedef group-id {
+ type uint32;
+ }
+
+ typedef bucket-id {
+ type uint32;
+ }
+
+ typedef group-types {
+ type enumeration {
+ enum group-all;
+ enum group-select;
+ enum group-indirect;
+ enum group-ff;
+ }
+ }
+
+ typedef group-capabilities {
+ type enumeration {
+ enum select-weight;
+ enum select-liveness;
+ enum chaining;
+ enum chaining-checks;
+ }
+ }
+
+ identity group-type {
+ description "Base identity for all the available group types";
+ }
+
+ identity group-all {
+ base group-type;
+ description "All (multicast/broadcast) group";
+ }
+
+ identity group-select {
+ base group-type;
+ description "Select group";
+ }
+
+ identity group-indirect {
+ base group-type;
+ description "Indirect group";
+ }
+
+ identity group-ff {
+ base group-type;
+ description "Fast failover group";
+ }
+
+ identity group-capability {
+ description "Base identity for all the supported group capabilities";
+ }
+
+ identity select-weight{
+ base group-capability;
+ description "Support weight for select groups";
+ }
+
+ identity select-liveness{
+ base group-capability;
+ description "Support liveness for select groups";
+ }
+
+ identity chaining{
+ base group-capability;
+ description "Support chaining groups";
+ }
+
+ identity chaining-checks{
+ base group-capability;
+ description "Check chaining for loops and delete";
+ }
+
+ typedef group-ref {
+ type instance-identifier;
+ }
+
+ grouping group {
+
+ leaf group-type {
+ type group-types;
+ }
+
+ leaf group-id {
+ type group-id;
+ }
+
+ leaf group-name {
+ type string;
+ }
+
+ leaf container-name {
+ type string;
+ }
+
+ leaf barrier {
+ type boolean;
+ }
+
+ container buckets {
+ list bucket {
+ key "bucket-id";
+ leaf bucket-id {
+ type bucket-id;
+ }
+
+ leaf weight {
+ type uint16;
+ }
+
+ leaf watch_port {
+ type uint32;
+ }
+
+ leaf watch_group {
+ type uint32;
+ }
+
+ uses action:action-list;
+ }
+ }
+ }
+
+ grouping group-statistics {
+
+ leaf group-id {
+ type group-id;
+ }
+
+ leaf ref-count {
+ type yang:counter32;
+ }
+
+ leaf packet-count {
+ type yang:counter64;
+ }
+
+ leaf byte-count {
+ type yang:counter64;
+ }
+
+ container duration {
+ leaf second {
+ type yang:counter32;
+ }
+ leaf nanosecond {
+ type yang:counter32;
+ }
+ }
+
+ container buckets {
+ list bucket-counter {
+ key "bucket-id";
+ leaf bucket-id {
+ type bucket-id;
+ }
+
+ leaf packet-count {
+ type yang:counter64;
+ }
+
+ leaf byte-count {
+ type yang:counter64;
+ }
+ }
+ }
+ }
+
+ grouping group-features {
+
+ leaf-list group-types-supported {
+ type identityref {
+ base group-type;
+ }
+ }
+
+ leaf-list group-capabilities-supported {
+ type identityref {
+ base group-capability;
+ }
+ }
+
+ leaf-list max-groups {
+ type uint32;
+ description "Maximum number of groups for each type";
+ max-elements 4;
+ }
+
+ leaf-list actions {
+ type uint32;
+ description "Bitmap number OFPAT_* that are supported";
+ max-elements 4;
+ }
+ }
+
+ grouping group-statistics-request {
+ list group-stats {
+ key "group-id";
+ leaf group-id {
+ type group-id;
+ }
+ }
+ }
+
+
+ grouping group-statistics-reply {
+
+ list group-stats {
+ key "group-id";
+ uses group-statistics;
+ }
+ }
+
+ grouping group-desc-stats-reply {
+
+ list group-desc-stats {
+ key "group-id";
+ uses group;
+ }
+ }
+
+ grouping group-features-reply {
+ uses group-features;
+ }
+
+ grouping groups {
+ list group {
+ key "group-id";
+
+ uses group;
+ }
+ }
+
+}
--- /dev/null
+module opendaylight-match-types {
+ namespace "urn:opendaylight:model:match:types";
+ prefix "match";
+
+ import ietf-inet-types {prefix inet; revision-date "2010-09-24";}
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
+ import opendaylight-l2-types {prefix l2t;revision-date "2013-08-27";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+
+ revision "2013-10-26" {
+ description "Initial revision of match types";
+ }
+
+ grouping "mac-address-filter" {
+ leaf address {
+ mandatory true;
+ type yang:mac-address;
+ }
+ leaf mask {
+ type yang:mac-address;
+ }
+ }
+
+ grouping "of-metadata" {
+ leaf metadata {
+ type uint64;
+ }
+
+ leaf metadata-mask {
+ type uint64;
+ }
+ }
+
+ /** Match Groupings **/
+ grouping "ethernet-match-fields" {
+ container ethernet-source {
+ description "Ethernet source address.";
+ presence "Match field is active and set";
+ uses mac-address-filter;
+ }
+ container ethernet-destination {
+ description "Ethernet destination address.";
+ presence "Match field is active and set";
+ uses mac-address-filter;
+ }
+ container ethernet-type {
+ description "Ethernet frame type.";
+ presence "Match field is active and set";
+
+ leaf type {
+ mandatory true;
+ type l2t:ether-type; // Needs to define that as general model
+ }
+ }
+ }
+
+ grouping "vlan-match-fields" {
+ container vlan-id {
+ description "VLAN id.";
+ presence "Match field is active and set";
+
+ leaf vlan-id-present {
+ type boolean;
+ }
+
+ leaf vlan-id {
+ type l2t:vlan-id;
+ }
+ }
+ leaf vlan-pcp {
+ description "VLAN priority.";
+ type l2t:vlan-pcp;
+ }
+ }
+
+ grouping "ip-match-fields" {
+ leaf ip-protocol {
+ description "IP protocol.";
+ type uint8;
+ }
+
+ leaf ip-dscp {
+ description "IP DSCP (6 bits in ToS field).";
+ type inet:dscp;
+ }
+
+ leaf ip-ecn {
+ description "IP ECN (2 bits in ToS field).";
+ type uint8;
+ }
+
+ leaf ip-proto {
+ description "IP Proto (IPv4 or IPv6 Protocol Number).";
+ type inet:ip-version;
+ }
+ }
+
+ grouping "ipv4-match-fields" {
+ leaf ipv4-source {
+ description "IPv4 source address.";
+ type inet:ipv4-prefix;
+ }
+
+ leaf ipv4-destination {
+ description "IPv4 destination address.";
+ type inet:ipv4-prefix;
+ }
+
+ }
+
+ grouping "ipv6-match-fields" {
+ leaf ipv6-source {
+ description "IPv6 source address.";
+ type inet:ipv6-prefix;
+ }
+
+ leaf ipv6-destination {
+ description "IPv6 destination address.";
+ type inet:ipv6-prefix;
+ }
+
+ leaf ipv6-nd-target {
+ description "IPv6 target address for neighbour discovery message";
+ type inet:ipv6-address;
+ }
+
+ container "ipv6-label" {
+ leaf ipv6-flabel {
+ type inet:ipv6-flow-label;
+ }
+
+ leaf flabel-mask {
+ type inet:ipv6-flow-label;
+ }
+ }
+
+ leaf ipv6-nd-sll {
+ description "Link layer source address for neighbour discovery message";
+ type yang:mac-address;
+ }
+
+ leaf ipv6-nd-tll {
+ description "Link layer target address for neighbour discovery message";
+ type yang:mac-address;
+ }
+
+ container "ipv6-ext-header" {
+ leaf ipv6-exthdr {
+ description "IPv6 Extension Header field";
+ type uint16;
+ }
+
+ leaf ipv6-exthdr-mask {
+ type uint16 {
+ range "0..512";
+ }
+ }
+ }
+ }
+
+ grouping "udp-match-fields" {
+ leaf udp-source-port {
+ description "UDP source port.";
+ type inet:port-number;
+ }
+ leaf udp-destination-port {
+ description "UDP destination port.";
+ type inet:port-number;
+ }
+ }
+
+ grouping "protocol-match-fields" {
+ leaf mpls-label {
+ description "Label in the first MPLS shim header";
+ type uint32;
+ }
+
+ leaf mpls-tc {
+ description "TC in the first MPLS shim header";
+ type uint8;
+ }
+
+ leaf mpls-bos {
+ description "BoS bit in the first MPLS shim header";
+ type uint8;
+ }
+
+ container "pbb" {
+ leaf pbb-isid {
+ description "I-SID in the first PBB service instance tag";
+ type uint32;
+ }
+
+ leaf pbb-mask {
+ type uint32 {
+ range "0..16777216";
+ }
+ }
+ }
+ }
+
+ grouping "tcp-match-fields" {
+ leaf tcp-source-port {
+ description "TCP source port.";
+ type inet:port-number;
+ }
+ leaf tcp-destination-port {
+ description "TCP destination port.";
+ type inet:port-number;
+ }
+ }
+
+ grouping "sctp-match-fields" {
+ leaf sctp-source-port {
+ description "SCTP source port.";
+ type inet:port-number;
+ }
+ leaf sctp-destination-port {
+ description "SCTP destination port.";
+ type inet:port-number;
+ }
+ }
+
+ grouping "icmpv4-match-fields" {
+ leaf icmpv4-type {
+ description "ICMP type.";
+ type uint8; // Define ICMP Type
+ }
+ description "ICMP code.";
+ leaf icmpv4-code {
+ type uint8; // Define ICMP Code
+ }
+ }
+
+ grouping "icmpv6-match-fields" {
+ leaf icmpv6-type {
+ description "ICMP type.";
+ type uint8; // Define ICMP Type
+ }
+ description "ICMP code.";
+ leaf icmpv6-code {
+ type uint8; // Define ICMP Code
+ }
+ }
+
+ grouping "arp-match-fields" {
+ leaf arp-op {
+ type uint16;
+ }
+
+ leaf arp-source-transport-address {
+ description "ARP source IPv4 address.";
+ type inet:ipv4-prefix;
+ }
+
+ leaf arp-target-transport-address {
+ description "ARP target IPv4 address.";
+ type inet:ipv4-prefix;
+ }
+ container arp-source-hardware-address {
+ description "ARP source hardware address.";
+ presence "Match field is active and set";
+ uses mac-address-filter;
+ }
+ container arp-target-hardware-address {
+ description "ARP target hardware address.";
+ presence "Match field is active and set";
+ uses mac-address-filter;
+ }
+ }
+
+ grouping "tcp-flag-match-fields" {
+ leaf tcp-flag {
+ type uint16;
+ }
+ }
+
+ grouping "tunnel-ipv4-match-fields" {
+ leaf tunnel-ipv4-source {
+ description "IPv4 source tunnel endpoint address.";
+ type inet:ipv4-prefix;
+ }
+ leaf tunnel-ipv4-destination {
+ description "IPv4 destination tunnel endpoint address.";
+ type inet:ipv4-prefix;
+ }
+ }
+
+ grouping match {
+ leaf in-port {
+ type inv:node-connector-id;
+ }
+
+ leaf in-phy-port {
+ type inv:node-connector-id;
+ }
+
+ container "metadata" {
+ uses of-metadata;
+ }
+
+ container "tunnel" {
+ leaf tunnel-id {
+ description "Metadata associated in the logical port";
+ type uint64;
+ }
+
+ leaf tunnel-mask {
+ type uint64;
+ }
+ }
+
+ container "ethernet-match" {
+ uses "ethernet-match-fields";
+ }
+
+ container "vlan-match" {
+ uses "vlan-match-fields";
+ }
+
+ container "ip-match" {
+ uses "ip-match-fields";
+ }
+
+ choice layer-3-match {
+ case "ipv4-match" {
+ uses "ipv4-match-fields";
+ }
+ case "ipv6-match" {
+ uses "ipv6-match-fields";
+ }
+ case "arp-match" {
+ uses "arp-match-fields";
+ }
+ case "tunnel-ipv4-match" {
+ uses "tunnel-ipv4-match-fields";
+ }
+ }
+
+ choice layer-4-match {
+ case "udp-match" {
+ uses "udp-match-fields";
+ }
+ case "tcp-match" {
+ uses "tcp-match-fields";
+ }
+ case "sctp-match" {
+ uses "sctp-match-fields";
+ }
+ }
+
+ container "icmpv4-match" {
+ uses "icmpv4-match-fields";
+ }
+
+ container "icmpv6-match" {
+ uses "icmpv6-match-fields";
+ }
+
+ container "protocol-match-fields" {
+ uses "protocol-match-fields";
+ }
+
+ container tcp-flag-match {
+ uses "tcp-flag-match-fields";
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module opendaylight-meter-types {
+ namespace "urn:opendaylight:meter:types";
+ prefix meter;
+
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
+
+
+ revision "2013-09-18" {
+ description "Initial revision of meter service";
+ }
+
+ typedef meter-id {
+ type uint32;
+ }
+ typedef band-id {
+ type uint32;
+ }
+
+ typedef meter-flags {
+ type bits {
+ bit meter-kbps;
+ bit meter-pktps;
+ bit meter-burst;
+ bit meter-stats;
+ }
+ }
+
+ identity meter-capability {
+ description "Base identity for all the supported meter capabilities/flags";
+ }
+ identity meter-kbps {
+ base meter-capability;
+ description "Rate value in kb/s (kilo-bit per second)";
+ }
+ identity meter-pktps {
+ base meter-capability;
+ description "Rate value in packet/sec.";
+ }
+ identity meter-burst {
+ base meter-capability;
+ description "Do burst size.";
+ }
+ identity meter-stats {
+ base meter-capability;
+ description "Collect statistics.";
+ }
+
+ typedef meter-band-type {
+ type bits {
+ bit ofpmbt-drop;
+ bit ofpmbt-dscp-remark;
+ bit ofpmbt-experimenter;
+ }
+ }
+
+ identity meter-band {
+ description "Base identity for all the band type available";
+ }
+ identity meter-band-drop {
+ base meter-band;
+ description "Drop packet";
+ }
+ identity meter-band-dscp-remark {
+ base meter-band;
+ description "Remark DSCP in the IP header";
+ }
+ identity meter-band-experimenter {
+ base meter-band;
+ description "Experimenter meter band";
+ }
+
+ grouping band-type {
+ choice band-type {
+ case drop {
+ leaf drop-rate {
+ description "Rate for dropping packets";
+ type uint32;
+ }
+
+ leaf drop-burst-size {
+ description "Size of bursts";
+ type uint32;
+ }
+ }
+
+ case dscp-remark {
+ leaf dscp-remark-rate {
+ description "Rate for remarking packets";
+ type uint32;
+ }
+
+ leaf dscp-remark-burst-size {
+ description "Size of bursts";
+ type uint32;
+ }
+
+ leaf prec_level {
+ description "Number of drop precedence level to add";
+ type uint8;
+ }
+ }
+
+ case experimenter {
+ leaf experimenter-rate {
+ description "Rate for remarking packets";
+ type uint32;
+ }
+
+ leaf experimenter-burst-size {
+ description "Size of bursts";
+ type uint32;
+ }
+
+ leaf experimenter {
+ description "Experimenter id";
+ type uint32;
+ }
+ }
+ }
+ }
+
+ typedef meter-ref {
+ type instance-identifier;
+ }
+
+ grouping meter {
+
+ leaf flags {
+ description "Meter configuration flags";
+ type meter-flags;
+ }
+
+ leaf meter-id {
+ description "Meter instance";
+ type meter-id;
+ }
+
+ leaf barrier {
+ description "If true, barrier message is sent";
+ type boolean;
+ }
+
+ leaf meter-name {
+ description "Name of meter instance";
+ type string;
+ }
+
+ leaf container-name {
+ description "Name of container";
+ type string;
+ }
+
+ container meter-band-headers {
+ list meter-band-header {
+ key "band-id";
+ leaf band-id {
+ description "Meter band id";
+ type band-id;
+ }
+
+ container meter-band-types {
+ leaf flags {
+ description "Meter band flags";
+ type meter-band-type;
+ }
+ }
+
+ leaf band-rate {
+ description "Rate for this band";
+ type uint32;
+ }
+
+ leaf band-burst-size {
+ description "Size of bursts";
+ type uint32;
+ }
+ uses band-type;
+ }
+ }
+ }
+
+ grouping meter-statistics {
+
+ leaf meter-id {
+ type meter-id;
+ }
+
+ leaf flow-count {
+ type yang:counter32;
+ }
+
+ leaf packet-in-count {
+ type yang:counter64;
+ }
+
+ leaf byte-in-count {
+ type yang:counter64;
+ }
+
+ container duration {
+ leaf second {
+ type yang:counter32;
+ }
+ leaf nanosecond {
+ type yang:counter32;
+ }
+ }
+
+ container meter-band-stats {
+ list band-stat {
+ key "band-id";
+ leaf band-id {
+ type band-id;
+ }
+
+ leaf packet-band-count {
+ type yang:counter64;
+ }
+
+ leaf byte-band-count {
+ type yang:counter64;
+ }
+ }
+ }
+ }
+
+ grouping meter-features {
+
+ leaf max_meter {
+ type yang:counter32;
+ }
+
+ leaf-list meter-band-supported {
+ type identityref {
+ base meter-band;
+ }
+ }
+
+ leaf-list meter-capabilities-supported {
+ type identityref {
+ base meter-capability;
+ }
+ }
+
+ leaf max_bands {
+ type uint8;
+ }
+
+ leaf max_color {
+ type uint8;
+ }
+ }
+
+ grouping meter-stats-config-request {
+ list meter-stats {
+ key "meter-id";
+ leaf meter-id {
+ type meter-id;
+ }
+ }
+ }
+
+ grouping meter-statistics-reply {
+ list meter-stats {
+ key "meter-id";
+ uses meter-statistics;
+ }
+ }
+
+ grouping meter-config-stats-reply {
+ list meter-config-stats {
+ key "meter-id";
+ uses meter;
+ }
+ }
+
+ grouping meter-features-reply {
+ uses meter-features;
+ }
+
+}
--- /dev/null
+module opendaylight-port-types {
+ namespace "urn:opendaylight:flow:types:port";
+ prefix port-types;
+
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
+ import opendaylight-queue-types {prefix queue-types; revision-date "2013-09-25";}
+
+ revision "2013-09-25" {
+ description "Initial revision of Port Inventory model";
+ }
+
+ typedef port-reason {
+ type enumeration {
+ enum add;
+ enum delete;
+ enum update;
+ }
+ }
+
+ typedef port-config {
+ type bits {
+ bit PORT-DOWN;
+ bit NO-RECV;
+ bit NO-FWD;
+ bit NO-PACKET-IN;
+ }
+ }
+
+ grouping port-state {
+ leaf link-down {
+ type boolean;
+ }
+ leaf blocked {
+ type boolean;
+ }
+ leaf live {
+ type boolean;
+ }
+ }
+
+ typedef port-features {
+ type bits {
+ bit ten-mb-hd;
+ bit ten-mb-fd;
+ bit hundred-mb-hd;
+ bit hundred-mb-fd;
+ bit one-gb-hd;
+ bit one-gb-fd;
+ bit ten-gb-fd;
+ bit forty-gb-fd;
+ bit hundred-gb-fd;
+ bit one-tb-fd;
+ bit other;
+ bit copper;
+ bit fiber;
+ bit autoeng;
+ bit pause;
+ bit pause-asym;
+ }
+ }
+
+ grouping common-port {
+
+ leaf port-number {
+ type union {
+ type uint32;
+ type string;
+ }
+ }
+
+ leaf hardware-address {
+ type yang:mac-address;
+ description "MAC Address of the port";
+
+ }
+
+ leaf configuration {
+ type port-config;
+ description "Bit map of OFPPC-* flags";
+ }
+
+ leaf advertised-features {
+ type port-features;
+ description "Features being advertised by the port";
+ }
+ }
+
+ grouping flow-port-status {
+ leaf reason {
+ type port-reason;
+ }
+
+ uses flow-capable-port;
+ }
+
+ grouping queues {
+ list queue {
+ key "queue-id";
+ uses queue-types:queue-packet;
+ }
+ }
+
+ grouping flow-capable-port {
+
+ uses common-port;
+
+ leaf name {
+ type string;
+ description "Human readable name of the port";
+ }
+
+ container state {
+ uses port-state;
+ description "Description of state of port";
+ }
+
+ leaf current-feature {
+ type port-features;
+ description "Bit map of OFPPF-* flags";
+ }
+
+ leaf supported {
+ type port-features;
+ description "Features supported by the port";
+ }
+
+ leaf peer-features {
+ type port-features;
+ description "Features advertised by peer";
+ }
+
+ leaf current-speed {
+ type uint32;
+ units "kbps";
+ description "Current port bit rate in kbps";
+ }
+
+ leaf maximum-speed {
+ type uint32;
+ units "kbps";
+ description "Max port bit rate in kbps";
+ }
+
+ uses queues;
+ }
+
+ grouping port-mod {
+ container port {
+ list port {
+ key "port-mod-order";
+ leaf port-mod-order {
+ type uint32;
+ }
+
+ uses common-port;
+
+ leaf mask {
+ type port-config;
+ description "Bitmap of OFPPC-* flags to be changed";
+ }
+
+ leaf container-name {
+ type string;
+ }
+
+ leaf port-name {
+ type string;
+ }
+
+ leaf barrier {
+ type boolean;
+ }
+ }
+ }
+ }
+}
--- /dev/null
+module opendaylight-queue-types {
+ namespace "urn:opendaylight:flow:types:queue";
+ prefix queue-types;
+
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
+
+ revision "2013-09-25" {
+ description "Initial revision of Queue Inventory model";
+ }
+
+ typedef queue-id {
+ type yang:counter32;
+ description "id for the specific queue.";
+ }
+
+ typedef queue-properties {
+ type enumeration {
+ enum min_rate;
+ enum max_rate;
+ }
+ }
+
+
+ grouping common-queue {
+
+ leaf property {
+ type uint16;
+ description "One of OFPQT_.";
+ }
+
+ }
+
+
+ grouping queue-prop-min-rate {
+
+ uses common-queue;
+
+ leaf rate {
+ type uint16;
+ description "OFPQT_MIN, len: 16";
+ }
+
+ }
+
+ grouping queue-prop-max-rate {
+
+ uses common-queue;
+
+ leaf rate {
+ type uint16;
+ description "OFPQT_MAX, len: 16";
+ }
+
+ }
+ grouping queue-packet {
+
+
+ leaf queue-id {
+ type queue-id;
+ description "id for the specific queue.";
+ }
+
+ leaf port {
+ type uint32;
+ description "Port this queue is attached to.";
+ }
+ uses common-queue;
+ }
+
+ grouping queue-config-request
+ {
+ leaf port {
+ type uint32;
+ description "Port to be queried.";
+ }
+
+ }
+}
--- /dev/null
+module opendaylight-table-types {
+ namespace "urn:opendaylight:table:types";
+ prefix table;
+
+ import opendaylight-flow-types {prefix flow;revision-date "2013-10-26";}
+ import opendaylight-action-types {prefix action;}
+
+ revision "2013-10-26" {
+ description "Initial revision of table service";
+ }
+
+ typedef table-id {
+ type uint8;
+ }
+
+ typedef table-ref {
+ type instance-identifier;
+ }
+
+ typedef table-config {
+ type bits {
+ bit DEPRECATED-MASK;
+ }
+ }
+
+ // field types
+ identity match-field {
+ description "Base identity for match Fields";
+ }
+
+ identity in_port {
+ base match-field;
+ description "Match for Switch input port.";
+ }
+ identity in_phy_port {
+ base match-field;
+ description "Match for Switch physical input port.";
+ }
+ identity metadata {
+ base match-field;
+ description "Match for Metadata passed between tables.";
+ }
+ identity eth_dst {
+ base match-field;
+ description "Match for Ethernet destination address.";
+ }
+ identity eth_src {
+ base match-field;
+ description "Match for Ethernet source address.";
+ }
+ identity eth_type {
+ base match-field;
+ description "Match for Ethernet frame type.";
+ }
+ identity vlan_vid {
+ base match-field;
+ description "Match for VLAN id.";
+ }
+ identity vlan_pcp {
+ base match-field;
+ description "Match for VLAN priority.";
+ }
+ identity ip_dscp {
+ base match-field;
+ description "Match for IP DSCP (6 bits in ToS field).";
+ }
+ identity ip_ecn {
+ base match-field;
+ description "Match for IP ECN (2 bits in ToS field).";
+ }
+ identity ip_proto {
+ base match-field;
+ description "Match for IP protocol.";
+ }
+ identity ipv4_src {
+ base match-field;
+ description "Match for IPv4 source address.";
+ }
+ identity ipv4_dst {
+ base match-field;
+ description "Match for IPv4 destination address.";
+ }
+ identity tcp_src {
+ base match-field;
+ description "Match for TCP source port.";
+ }
+ identity tcp_dst {
+ base match-field;
+ description "Match for TCP destination port.";
+ }
+ identity udp_src {
+ base match-field;
+ description "Match for UDP source port.";
+ }
+ identity udp_dst {
+ base match-field;
+ description "Match for UDP destination port.";
+ }
+ identity sctp_src {
+ base match-field;
+ description "Match for SCTP source port.";
+ }
+ identity sctp_dst {
+ base match-field;
+ description "Match for SCTP destination port.";
+ }
+ identity icmpv4_type {
+ base match-field;
+ description "Match for ICMP type.";
+ }
+ identity icmpv4_code {
+ base match-field;
+ description "Match for ICMP code.";
+ }
+ identity arp_op {
+ base match-field;
+ description "Match for ARP opcode.";
+ }
+ identity arp_spa {
+ base match-field;
+ description "Match for ARP source IPv4 address.";
+ }
+ identity arp_tpa {
+ base match-field;
+ description "Match for ARP target IPv4 address.";
+ }
+ identity arp_sha {
+ base match-field;
+ description "Match for ARP source hardware address.";
+ }
+ identity arp_tha {
+ base match-field;
+ description "Match for ARP target hardware address.";
+ }
+ identity ipv6_src {
+ base match-field;
+ description "Match for IPv6 source address.";
+ }
+ identity ipv6_dst {
+ base match-field;
+ description "Match for IPv6 destination address.";
+ }
+ identity ipv6_flabel {
+ base match-field;
+ description "Match for IPv6 Flow Label";
+ }
+ identity icmpv6_type {
+ base match-field;
+ description "Match for ICMPv6 type.";
+ }
+ identity icmpv6_code {
+ base match-field;
+ description "Match for ICMPv6 code.";
+ }
+ identity ipv6_nd_target {
+ base match-field;
+ description "Match for Target address for ND.";
+ }
+ identity ipv6_nd_sll {
+ base match-field;
+ description "Match for Source link-layer for ND.";
+ }
+ identity ipv6_nd_tll {
+ base match-field;
+ description "Match for Target link-layer for ND.";
+ }
+ identity mpls_label {
+ base match-field;
+ description "Match for MPLS label.";
+ }
+ identity mpls_tc {
+ base match-field;
+ description "Match for MPLS TC.";
+ }
+ identity mpls_bos {
+ base match-field;
+ description "Match for MPLS BoS bit.";
+ }
+ identity pbb_isid {
+ base match-field;
+ description "Match for PBB I-SID.";
+ }
+ identity tunnel_id {
+ base match-field;
+ description "Match for Logical Port Metadata";
+ }
+ identity ipv6_exthdr {
+ base match-field;
+ description "Match for IPv6 Extension Header pseudo-field";
+ }
+ identity tcp_flag {
+ base match-field;
+ description "TCP Flag Match";
+ }
+ identity tunnel_ipv4_dst {
+ base match-field;
+ description "IPv4 destination tunnel endpoint address.";
+ }
+ identity tunnel_ipv4_src {
+ base match-field;
+ description "IPv4 source tunnel endpoint address.";
+ }
+
+ grouping set-field-match {
+ list set-field-match {
+ key "match-type";
+ leaf match-type {
+ type identityref {
+ base match-field;
+ }
+ }
+ leaf has-mask {
+ type boolean;
+ }
+ }
+ }
+
+ grouping table-feature-prop-type {
+ choice table-feature-prop-type {
+ case instructions {
+ container instructions {
+ uses flow:instruction-list;
+ }
+ }
+
+ case instructions-miss {
+ container instructions-miss {
+ uses flow:instruction-list;
+ }
+ }
+
+ case next-table {
+ container tables {
+ leaf-list table-ids {
+ type uint8;
+ }
+ }
+ }
+
+ case next-table-miss {
+ container tables-miss {
+ leaf-list table-ids {
+ type uint8;
+ }
+ }
+ }
+
+ case write-actions {
+ container write-actions {
+ uses action:action-list;
+ }
+ }
+
+ case write-actions-miss {
+ container write-actions-miss {
+ uses action:action-list;
+ }
+ }
+
+ case apply-actions {
+ container apply-actions {
+ uses action:action-list;
+ }
+ }
+
+ case apply-actions-miss {
+ container apply-actions-miss {
+ uses action:action-list;
+ }
+ }
+
+ case match {
+ container match-setfield {
+ uses set-field-match;
+ }
+ }
+
+ case wildcards {
+ container wildcard-setfield {
+ uses set-field-match;
+ }
+ }
+
+ case write-setfield {
+ container write-setfield {
+ uses set-field-match;
+ }
+ }
+
+ case write-setfield-miss {
+ container write-setfield-miss {
+ uses set-field-match;
+ }
+ }
+
+ case apply-setfield {
+ container apply-setfield {
+ uses set-field-match;
+ }
+ }
+
+ case apply-setfield-miss {
+ container apply-setfield-miss {
+ uses set-field-match;
+ }
+ }
+ }
+ }
+
+ grouping table-features {
+ list table-features {
+ key "table-id";
+
+ leaf table-id {
+ type uint8;
+ }
+
+ leaf name {
+ description "Name of the table";
+ type string;
+ }
+
+ leaf metadata-match {
+ description "Bits of metadata table can match";
+ type uint64;
+ }
+
+ leaf metadata-write {
+ description "Bits of metadata table can write";
+ type uint64;
+ }
+
+ leaf max-entries {
+ description "Max number of entries supported";
+ type uint32;
+ }
+
+ leaf config {
+ description "Bitmap of OFPTC_ values";
+ type table-config;
+ }
+
+ container table-properties {
+ list table-feature-properties {
+ key "order";
+ uses action:ordered;
+ uses table-feature-prop-type;
+ }
+ }
+ }
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>flow-model-parent</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>model-flow-service</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>model-flow-base</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>model-inventory</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>opendaylight-l2-types</artifactId>
+ </dependency>
+ </dependencies>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+ </scm>
+</project>
--- /dev/null
+module flow-capable-transaction {
+ namespace "urn:opendaylight:flow:transaction";
+ prefix type;
+
+ import opendaylight-inventory {prefix inv; revision-date "2013-08-19";}
+ import ietf-inet-types {prefix inet; revision-date "2010-09-24";}
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+
+ revision "2013-11-03" {
+ description "Initial revision";
+ }
+
+ typedef transaction-id {
+ type uint64;
+ }
+ // This refers to MD-SAL transaction reference.
+ grouping transaction-metadata {
+ leaf transaction-uri {
+ type inet:uri;
+ }
+ }
+
+ grouping transaction-aware {
+ leaf transaction-id {
+ type transaction-id;
+ }
+ }
+
+ grouping multipart-transaction-aware {
+ uses transaction-aware;
+
+ leaf moreReplies {
+ type boolean;
+ default false;
+ }
+ }
+
+ rpc get-next-transaction-id {
+ input {
+ leaf node {
+ ext:context-reference "inv:node-context";
+ type inv:node-ref;
+ }
+ }
+ output {
+ uses transaction-aware;
+ }
+ }
+
+ // Barier request?
+ rpc finish-transaction {
+ input {
+ leaf node {
+ ext:context-reference "inv:node-context";
+ type inv:node-ref;
+ }
+ leaf transaction-id {
+ type transaction-id;
+ }
+ }
+ }
+}
--- /dev/null
+module flow-errors {
+ namespace "urn:opendaylight:flow:errors";
+ prefix error;
+
+ revision "2013-11-16" {
+ description "Initial revision of error";
+ }
+
+ typedef error-type {
+ type enumeration {
+ enum hello-failed;
+ enum bad-request;
+ enum bad-action;
+ enum bad-instruction;
+ enum bad-match;
+ enum flow-mod-failed;
+ enum group-mod-failed;
+ enum port-mod-failed;
+ enum table-mod-failed;
+ enum queue-op-failed;
+ enum switch-config-failed;
+ enum role-request-failed;
+ enum meter-mod-failed;
+ enum table-features-failed;
+ enum experimenter {
+ value "65535";
+ }
+ }
+ }
+
+ grouping error-message {
+ leaf type {
+ type error-type;
+ }
+
+ leaf code {
+ type uint16;
+ }
+
+ leaf data {
+ type string;
+ }
+ }
+
+ grouping experimenter-error-message {
+ leaf type {
+ type error-type;
+ }
+
+ leaf exp-type {
+ type uint16;
+ }
+
+ leaf experimenter-id {
+ type uint32;
+ }
+
+ leaf data {
+ type string;
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module flow-node-inventory {
+ namespace "urn:opendaylight:flow:inventory";
+ prefix flownode;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import ietf-inet-types {prefix inet; revision-date "2010-09-24";}
+ import opendaylight-port-types {prefix port;revision-date "2013-09-25";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-table-types {prefix table;revision-date "2013-10-26";}
+ import opendaylight-flow-types {prefix flow;revision-date "2013-10-26";}
+ import opendaylight-group-types {prefix group;revision-date "2013-10-18";}
+ import opendaylight-meter-types {prefix meter;revision-date "2013-09-18";}
+
+ revision "2013-08-19" {
+ description "Flow Capable Node extensions to the Inventory model";
+ }
+
+ identity feature-capability {
+ }
+
+ identity flow-feature-capability-flow-stats {
+ description "Flow statistics";
+ base feature-capability;
+ }
+
+ identity flow-feature-capability-table-stats {
+ description "Table statistics";
+ base feature-capability;
+ }
+
+ identity flow-feature-capability-port-stats {
+ description "Port statistics";
+ base feature-capability;
+ }
+
+ identity flow-feature-capability-stp {
+ description "802.1d spanning tree";
+ base feature-capability;
+ }
+
+ identity flow-feature-capability-reserved {
+ description "Reserved, must be zero";
+ base feature-capability;
+ }
+
+ identity flow-feature-capability-ip-reasm {
+ description "Can reassemble IP fragments";
+ base feature-capability;
+ }
+
+ identity flow-feature-capability-queue-stats {
+ description "Queue statistics";
+ base feature-capability;
+ }
+
+ identity flow-feature-capability-arp-match-ip {
+ description "Match IP addresses in ARP pkts";
+ base feature-capability;
+ }
+
+ identity flow-feature-capability-group-stats {
+ description "Group statistics";
+ base feature-capability;
+ }
+
+ identity flow-feature-capability-port-blocked {
+ description "Switch will block looping ports";
+ base feature-capability;
+ }
+
+ grouping feature {
+ leaf support-state {
+ type inv:support-type;
+ }
+ }
+
+ grouping queue {
+ leaf queue-id {
+ type uint32;
+ description "id for the specific queue";
+ mandatory true;
+ }
+ container properties {
+ leaf minimum-rate {
+ type uint32;
+ }
+ leaf maximum-rate{
+ type uint32;
+ }
+ }
+ }
+
+ typedef flow-id {
+ type inet:uri;
+ }
+
+ grouping tables {
+ list table {
+ key "id";
+
+ leaf id {
+ type uint8;
+ }
+
+ uses table:table-features;
+
+ list flow {
+ key "id";
+
+ leaf id {
+ type flow-id;
+ }
+
+ uses flow:flow;
+ }
+ }
+ }
+
+ grouping meters {
+ list meter {
+ key "meter-id";
+ uses meter:meter;
+ }
+ }
+
+ grouping ip-address-grouping {
+ leaf ip-address {
+ description "IP address of a flow capable node.";
+ type inet:ip-address;
+ }
+ }
+
+ grouping flow-node {
+ leaf manufacturer {
+ type string;
+ }
+ leaf hardware {
+ type string;
+ }
+ leaf software {
+ type string;
+ }
+ leaf serial-number {
+ type string;
+ }
+ leaf description {
+ type string;
+ }
+
+ uses tables;
+ uses group:groups;
+ uses meters;
+ uses ip-address-grouping;
+ // TODO: ports
+
+ container supported-match-types {
+ list match-type {
+ key "match";
+ uses feature;
+ leaf match {
+ type string; // FIXME: Add identity
+ }
+
+ }
+ }
+
+ container supported-instructions {
+ list instruction-type {
+ key "instruction";
+ uses feature;
+ leaf instruction {
+ type string; // FIXME: Add identity
+ }
+ }
+ }
+
+ container supported-actions {
+ list action-type {
+ key "action";
+ uses feature;
+
+ leaf action {
+ type string; // FIXME: Add identity
+ }
+ }
+ }
+
+ container switch-features {
+
+ leaf max_buffers {
+ type uint32;
+ }
+
+ leaf max_tables {
+ type uint8;
+ }
+
+ leaf-list capabilities {
+ type identityref {
+ base feature-capability;
+ }
+ }
+
+ }
+ }
+
+ grouping flow-node-connector {
+
+ uses port:flow-capable-port;
+ }
+
+ augment "/inv:nodes/inv:node" {
+ ext:augment-identifier "flow-capable-node";
+ uses flow-node;
+ }
+
+ augment "/inv:nodes/inv:node/inv:node-connector" {
+ ext:augment-identifier "flow-capable-node-connector";
+ uses flow-node-connector;
+ }
+
+ augment "/inv:node-updated" {
+ ext:augment-identifier "flow-capable-node-updated";
+ uses flow-node;
+ }
+
+ augment "/inv:node-updated/inv:node-connector" {
+ //ext:identical-augment "flow-capable-node-connector";
+ ext:augment-identifier "flow-capable-node-connector-update-fields";
+ uses flow-node-connector;
+ }
+
+ augment "/inv:node-connector-updated" {
+ ext:augment-identifier "flow-capable-node-connector-updated";
+ uses flow-node-connector;
+ }
+
+ augment "/inv:nodes/inv:node/table" {
+ ext:augment-identifier "flow-hash-id-mapping";
+ description "Flow is identified by match and priority on device. So Operational/DS
+ has to simulate that behavior and contract between FlowId and match+priority
+ identification should represent Flow hashCode. Flow has to contain only
+ match priority and flowCookie for create a hashCode";
+ list flow-hash-id-map {
+ key "hash";
+ leaf hash {
+ type string;
+ }
+ leaf flow-id {
+ type flow-id;
+ }
+ }
+ }
+}
--- /dev/null
+module flow-topology-discovery {
+ namespace "urn:opendaylight:flow:topology:discovery";
+ prefix flow-node-topo;
+
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+
+ revision "2013-08-19" {
+ description "Flow Capable Node extensions to the Inventory model";
+ }
+
+
+ grouping link {
+ leaf source {
+ type inv:node-connector-ref;
+ }
+ leaf destination {
+ type inv:node-connector-ref;
+ }
+ }
+
+
+ notification link-discovered {
+ uses link;
+ }
+
+ notification link-overutilized {
+ uses link;
+ }
+
+ notification link-removed {
+ uses link;
+ }
+
+ notification link-utilization-normal {
+ uses link;
+ }
+
+ rpc solicit-refresh {
+
+ }
+
+}
--- /dev/null
+module node-config {
+ namespace "urn:opendaylight:module:config";
+ prefix node-config;
+
+ import flow-capable-transaction {prefix tr;}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+
+ revision "2014-10-15" {
+ description "Initial revision of node configuration service";
+ }
+
+ grouping node-ref {
+ uses "inv:node-context-ref";
+ }
+
+
+
+ /** Base configuration structure **/
+ grouping node-config {
+ leaf flag {
+ type string;
+ description "Switch config flag. Expected values FRAGNORMAL, OFPCFRAGDROP, OFPCFRAGREASM, OFPCFRAGMASK";
+ }
+ leaf miss-search-length{
+ type uint16;
+ }
+ }
+
+ rpc set-config {
+ input {
+ uses node-config;
+ uses tr:transaction-aware;
+ uses node-ref;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+}
--- /dev/null
+module node-error {
+ namespace "urn:opendaylight:node:error:service";
+ prefix node-error;
+
+ import sal-flow {prefix flow; revision-date "2013-08-19";}
+ import flow-errors {prefix error;}
+ import flow-capable-transaction {prefix tr;}
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-flow-types {prefix types;revision-date "2013-10-26";}
+ import opendaylight-group-types {prefix group-type;revision-date 2013-10-18;}
+ import opendaylight-meter-types {prefix meter-type;revision-date "2013-09-18";}
+
+ revision "2014-04-10" {
+ description "Initial revision of errors received from a node";
+ }
+
+ notification hello-failed-error-notification {
+ description "Model for ofp_error-Type=0, Hello protocol failed";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification bad-request-error-notification {
+ description "Model for ofp_error-Type=1, Request was not understood.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification bad-action-error-notification {
+ description "Model for ofp_error-Type=2, Error in action description.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification bad-instruction-error-notification {
+ description "Model for ofp_error-Type=3, Error in instruction list.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification bad-match-error-notification {
+ description "Model for ofp_error-Type=4, Error in match.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification flow-mod-error-notification {
+ description "Model for ofp_error-Type=5 - Problem modifying flow entry.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification group-mod-error-notification {
+ description "Model for ofp_error-Type=6 - Problem modifying group entry.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification port-mod-error-notification {
+ description "Model for ofp_error-Type=7 - Port mod request failed.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification table-mod-error-notification {
+ description "Model for ofp_error-Type=8 - Table mod request failed.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification queue-op-error-notification {
+ description "Model for ofp_error-Type=9 - Queue operation failed.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification switch-config-error-notification {
+ description "Model for ofp_error-Type=10 - Switch Config request failed.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification role-request-error-notification {
+ description "Model for ofp_error-Type=11 - Controller Role request failed.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification meter-mod-error-notification {
+ description "Model for ofp_error-Type=12 - Error in meter.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification table-features-error-notification {
+ description "Model for ofp_error-Type=13 - Setting table features failed.";
+
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+
+ notification experimenter-error-notification {
+ description "Model for ofp_error-Type=65535 - Experimenter Error Messages";
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses flow:base-node-error-notification;
+ uses flow:node-error-reference;
+ }
+}
+
--- /dev/null
+module packet-processing {
+ namespace "urn:opendaylight:packet:service";
+ prefix flow;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import ietf-yang-types {prefix yang;revision-date "2010-09-24";}
+ import opendaylight-l2-types {prefix types;revision-date "2013-08-27";}
+ import opendaylight-match-types {prefix match-type;revision-date "2013-10-26";}
+ import opendaylight-table-types {prefix table-type;revision-date "2013-10-26";}
+ import opendaylight-action-types {prefix action-type;revision-date "2013-11-12";}
+ import opendaylight-flow-types {prefix flow-type;revision-date "2013-10-26";}
+
+
+ revision "2013-07-09" {
+ description "";
+ }
+
+ typedef connection-cookie {
+ type uint32;
+ }
+
+ grouping raw-packet {
+ leaf ingress {
+ type inv:node-connector-ref;
+ }
+ leaf payload {
+ type binary;
+ }
+ }
+
+ grouping ethernet-packet {
+ leaf source {
+ type yang:mac-address;
+ }
+
+ leaf destination {
+ type yang:mac-address;
+ }
+ }
+
+ identity packet-in-reason {
+ description "Base identity for all the available packet in reason";
+ }
+
+ identity no-match {
+ base packet-in-reason;
+ description "No matching flow in the classifier";
+ }
+
+ identity send-to-controller {
+ base packet-in-reason;
+ description "Explicit instruction to send packet to controller";
+ }
+
+ identity invalid-ttl {
+ base packet-in-reason;
+ description "Packet with invalid TTL";
+ }
+
+ notification packet-received {
+ leaf connection-cookie {
+ type connection-cookie;
+ }
+
+ leaf flow-cookie {
+ type flow-type:flow-cookie;
+ }
+
+ leaf table-id {
+ type table-type:table-id;
+ }
+
+ leaf packet-in-reason {
+ type identityref {
+ base packet-in-reason;
+ }
+ }
+
+ container match {
+ uses match-type:match;
+ }
+
+ uses raw-packet;
+ }
+
+ rpc transmit-packet {
+ input {
+ uses inv:node-context-ref;
+
+ leaf connection-cookie {
+ type connection-cookie;
+ }
+
+ leaf egress {
+ type inv:node-connector-ref;
+ }
+ leaf buffer-id {
+ type uint32;
+ }
+
+ uses raw-packet;
+ uses action-type:action-list;
+ }
+ }
+}
--- /dev/null
+module sal-flow {
+ namespace "urn:opendaylight:flow:service";
+ prefix flow;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-flow-types {prefix types;revision-date "2013-10-26";}
+ import opendaylight-group-types {prefix group-type;revision-date 2013-10-18;}
+ import opendaylight-meter-types {prefix meter-type;revision-date "2013-09-18";}
+ import flow-capable-transaction {prefix tr;}
+ import flow-errors {prefix error;}
+
+ revision "2013-08-19" {
+ description "Initial revision of flow service";
+ }
+
+ typedef flow-table-ref {
+ type instance-identifier;
+ }
+
+ grouping node-flow-removed {
+ leaf node {
+ ext:context-reference "inv:node-context";
+ type inv:node-ref;
+ }
+ leaf flow-table {
+ type flow-table-ref;
+ }
+ uses types:flow-mod-removed;
+ }
+
+ grouping node-flow {
+ uses "inv:node-context-ref";
+
+ leaf flow-table {
+ type flow-table-ref;
+ }
+ uses types:flow;
+ }
+
+ grouping base-node-error-notification {
+ leaf node {
+ ext:context-reference "inv:node-context";
+ type inv:node-ref;
+ }
+ }
+
+ grouping node-error-reference {
+ choice object-reference {
+ case flow-ref{
+ leaf flow-ref {
+ type types:flow-ref;
+ }
+ }
+ case group-ref{
+ leaf group-ref {
+ type group-type:group-ref;
+ }
+ }
+ case meter-ref{
+ leaf meter-ref {
+ type meter-type:meter-ref;
+ }
+ }
+ }
+ }
+
+ /** Base configuration structure **/
+ grouping flow-update {
+ uses "inv:node-context-ref";
+
+ container original-flow {
+ uses types:flow;
+ }
+ container updated-flow {
+ uses types:flow;
+ }
+ }
+
+ rpc add-flow {
+ input {
+ uses tr:transaction-metadata;
+ leaf flow-ref {
+ type types:flow-ref;
+ }
+ uses node-flow;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc remove-flow {
+ input {
+ uses tr:transaction-metadata;
+ leaf flow-ref {
+ type types:flow-ref;
+ }
+ uses node-flow;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc update-flow {
+ input {
+ uses tr:transaction-metadata;
+ leaf flow-ref {
+ type types:flow-ref;
+ }
+ uses flow-update;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ notification flow-added {
+ uses tr:transaction-metadata;
+ leaf flow-ref {
+ type types:flow-ref;
+ }
+ uses node-flow;
+ uses tr:transaction-aware;
+ }
+
+ notification flow-updated {
+ uses tr:transaction-metadata;
+ leaf flow-ref {
+ type types:flow-ref;
+ }
+ uses node-flow;
+ uses tr:transaction-aware;
+ }
+
+ notification flow-removed {
+ uses tr:transaction-metadata;
+ leaf flow-ref {
+ type types:flow-ref;
+ }
+ uses node-flow;
+ uses tr:transaction-aware;
+ }
+
+ notification switch-flow-removed {
+ uses node-flow-removed;
+ }
+
+ notification node-error-notification {
+ uses error:error-message;
+ uses tr:transaction-aware;
+ uses tr:transaction-metadata;
+ uses node-error-reference;
+ uses base-node-error-notification;
+ }
+
+ notification node-experimenter-error-notification {
+ uses error:experimenter-error-message;
+ uses tr:transaction-aware;
+ }
+}
--- /dev/null
+module sal-group {
+ namespace "urn:opendaylight:group:service";
+ prefix group;
+
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-group-types {prefix group-type;revision-date 2013-10-18;}
+ import flow-capable-transaction {prefix tr;}
+
+ revision "2013-09-18" {
+ description "Initial revision of group service";
+ }
+
+ grouping node-group {
+ uses "inv:node-context-ref";
+ uses group-type:group;
+ }
+
+ /** Base configuration structure **/
+ grouping group-update {
+ uses "inv:node-context-ref";
+
+ container original-group {
+ uses group-type:group;
+ }
+ container updated-group {
+ uses group-type:group;
+ }
+ }
+
+ rpc add-group {
+ input {
+ uses tr:transaction-metadata;
+ leaf group-ref {
+ type group-type:group-ref;
+ }
+ uses node-group;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc remove-group {
+ input {
+ uses tr:transaction-metadata;
+ leaf group-ref {
+ type group-type:group-ref;
+ }
+ uses node-group;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc update-group {
+ input {
+ uses tr:transaction-metadata;
+ leaf group-ref {
+ type group-type:group-ref;
+ }
+ uses group-update;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ notification group-added {
+ uses tr:transaction-metadata;
+ leaf group-ref {
+ type group-type:group-ref;
+ }
+ uses node-group;
+ uses tr:transaction-aware;
+ }
+
+ notification group-updated {
+ uses tr:transaction-metadata;
+ leaf group-ref {
+ type group-type:group-ref;
+ }
+ uses node-group;
+ uses tr:transaction-aware;
+ }
+
+ notification group-removed {
+ uses tr:transaction-metadata;
+ leaf group-ref {
+ type group-type:group-ref;
+ }
+ uses node-group;
+ uses tr:transaction-aware;
+ }
+}
--- /dev/null
+module sal-meter {
+ namespace "urn:opendaylight:meter:service";
+ prefix meter;
+
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-meter-types {prefix meter-type;revision-date "2013-09-18";}
+ import flow-capable-transaction {prefix tr;}
+
+ revision "2013-09-18" {
+ description "Initial revision of meter service";
+ }
+
+ grouping node-meter {
+ uses "inv:node-context-ref";
+
+ uses meter-type:meter;
+ }
+
+ /** Base configuration structure **/
+ grouping meter-update {
+ uses "inv:node-context-ref";
+
+ container original-meter {
+ uses meter-type:meter;
+ }
+ container updated-meter {
+ uses meter-type:meter;
+ }
+ }
+
+ rpc add-meter {
+ input {
+ uses tr:transaction-metadata;
+ leaf meter-ref {
+ type meter-type:meter-ref;
+ }
+ uses node-meter;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc remove-meter {
+ input {
+ uses tr:transaction-metadata;
+ leaf meter-ref {
+ type meter-type:meter-ref;
+ }
+
+ uses node-meter;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc update-meter {
+ input {
+ uses tr:transaction-metadata;
+ leaf meter-ref {
+ type meter-type:meter-ref;
+ }
+
+ uses meter-update;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ notification meter-added {
+ uses tr:transaction-metadata;
+ leaf meter-ref {
+ type meter-type:meter-ref;
+ }
+ uses node-meter;
+ uses tr:transaction-aware;
+ }
+
+ notification meter-updated {
+ uses tr:transaction-metadata;
+ leaf meter-ref {
+ type meter-type:meter-ref;
+ }
+ uses node-meter;
+ uses tr:transaction-aware;
+ }
+
+ notification meter-removed {
+ uses tr:transaction-metadata;
+ leaf meter-ref {
+ type meter-type:meter-ref;
+ }
+ uses node-meter;
+ uses tr:transaction-aware;
+ }
+}
--- /dev/null
+module sal-port {
+ namespace "urn:opendaylight:port:service";
+ prefix port;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-port-types {prefix port-type;revision-date "2013-09-25";}
+ import flow-capable-transaction {prefix tr;}
+
+ revision "2013-11-07" {
+ description "Initial revision of port service";
+ }
+
+ grouping node-port {
+ uses "inv:node-context-ref";
+
+ uses port-type:flow-port-status;
+ }
+
+ /** Base configuration structure **/
+ grouping port-update {
+ uses "inv:node-context-ref";
+
+ container original-port {
+ uses port-type:port-mod;
+ }
+ container updated-port {
+ uses port-type:port-mod;
+ }
+ }
+
+ rpc update-port {
+ input {
+ uses port-update;
+ uses tr:transaction-aware;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ notification port-updated {
+ uses port-update;
+ uses tr:transaction-aware;
+ }
+
+ notification port-removed {
+ uses node-port;
+ }
+}
--- /dev/null
+module sal-queue {
+ namespace "urn:opendaylight:queue:service";
+ prefix queue;
+
+ import opendaylight-inventory {prefix inv; revision-date "2013-08-19";}
+ import opendaylight-queue-types {prefix queue-type; revision-date "2013-09-25";}
+
+ revision "2013-11-07" {
+ description "Initial revision of queue service";
+ }
+
+ grouping node-queue {
+ uses "inv:node-context-ref";
+
+ uses queue-type:queue-packet;
+ }
+
+
+ rpc get-queue {
+ output {
+ uses queue-type:queue-packet;
+ }
+ }
+
+ notification queue-get-config-reply {
+ uses node-queue;
+ }
+}
--- /dev/null
+module sal-table {
+ namespace "urn:opendaylight:table:service";
+ prefix table;
+
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-table-types {prefix table-type;revision-date "2013-10-26";}
+ import flow-capable-transaction {prefix tr;}
+
+ revision "2013-10-26" {
+ description "Initial revision of table service";
+ }
+
+ /** Base configuration structure **/
+ grouping table-update {
+ uses "inv:node-context-ref";
+ container original-table {
+ uses table-type:table-features;
+ }
+ container updated-table {
+ uses table-type:table-features;
+ }
+ }
+
+ rpc update-table {
+ input {
+ uses table-update;
+ uses tr:transaction-aware;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+
+ notification table-updated {
+ uses "inv:node-context-ref";
+ uses tr:multipart-transaction-aware;
+ uses table-type:table-features;
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>flow-model-parent</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>model-flow-statistics</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>model-flow-service</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>model-inventory</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>opendaylight-l2-types</artifactId>
+ </dependency>
+ </dependencies>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+ </scm>
+</project>
--- /dev/null
+module opendaylight-flow-statistics {
+ namespace "urn:opendaylight:flow:statistics";
+ prefix flowstat;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-flow-types {prefix flow-types;revision-date "2013-10-26";}
+ import opendaylight-statistics-types {prefix stat-types;revision-date "2013-09-25";}
+ import opendaylight-table-types {prefix table-types;revision-date "2013-10-26";}
+ import flow-node-inventory {prefix flow-node;revision-date "2013-08-19";}
+ import flow-capable-transaction {prefix tr;}
+ import ietf-inet-types {prefix inet; revision-date "2010-09-24";}
+
+
+ revision "2013-08-19" {
+ description "Initial revision of flow statistics service";
+ }
+
+ //Augment flow statistics data to the flow-capable-node->table->flow
+ augment "/inv:nodes/inv:node/flow-node:table/flow-node:flow" {
+ ext:augment-identifier "flow-statistics-data";
+ uses flow-statistics;
+ }
+
+ grouping flow-statistics {
+ container flow-statistics {
+ //config "false";
+ uses stat-types:generic-statistics;
+ }
+ }
+
+ typedef flow-id {
+ description "flow id";
+ type inet:uri;
+ }
+
+ grouping flow-and-statistics-map-list {
+ description "List of flow and statistics map";
+ list flow-and-statistics-map-list {
+ key "flow-id";
+ leaf flow-id {
+ type flow-id;
+ }
+ uses flow-and-statistics-map;
+ }
+ }
+
+ grouping flow-and-statistics-map{
+ description "Mapping between flow and its statistics";
+ uses flow-types:flow;
+ uses stat-types:generic-statistics;
+ }
+
+ // RPC calls to fetch flow statistics
+ rpc get-all-flows-statistics-from-all-flow-tables {
+ description "Fetch statistics of all the flow present in all the flow tables of the switch";
+ input {
+ uses inv:node-context-ref;
+ }
+ output {
+ uses flow-and-statistics-map-list;
+ uses tr:transaction-aware;
+ }
+
+ }
+
+ rpc get-all-flow-statistics-from-flow-table {
+ description "Fetch statistics of all the flow present in the specific flow table of the switch";
+ input {
+ uses inv:node-context-ref;
+ leaf table-id {
+ type table-types:table-id;
+ }
+ }
+ output {
+ uses flow-and-statistics-map-list;
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc get-flow-statistics-from-flow-table {
+ description "Fetch statistics of the specific flow present in the specific flow table of the switch";
+ input {
+ uses inv:node-context-ref;
+ uses flow-types:flow;
+ }
+ output {
+ uses flow-and-statistics-map-list;
+ uses tr:transaction-aware;
+ }
+ }
+
+ notification flows-statistics-update {
+ description "Flows statistics sent by switch";
+ leaf moreReplies {
+ type boolean;
+ }
+ uses inv:node;
+ uses flow-and-statistics-map-list;
+ uses tr:transaction-aware;
+ }
+
+ //Models for aggregate flow statistics collection
+ augment "/inv:nodes/inv:node/flow-node:table" {
+ ext:augment-identifier "aggregate-flow-statistics-data";
+ uses aggregate-flow-statistics;
+ }
+
+ grouping aggregate-flow-statistics {
+ container aggregate-flow-statistics {
+ //config "false";
+ uses stat-types:aggregate-flow-statistics;
+ }
+ }
+
+ // RPC calls to fetch aggregate flow statistics
+ rpc get-aggregate-flow-statistics-from-flow-table-for-all-flows {
+ description "Fetch aggregate statistics for all the flows present in the specific flow table of the switch";
+ input {
+ uses inv:node-context-ref;
+ leaf table-id {
+ type table-types:table-id;
+ }
+ }
+ output {
+ uses stat-types:aggregate-flow-statistics;
+ uses tr:transaction-aware;
+ }
+ }
+ rpc get-aggregate-flow-statistics-from-flow-table-for-given-match {
+ description "Fetch aggregate statistics for all the flow matches to the given match from the given table of the switch";
+ input {
+ uses inv:node-context-ref;
+ uses flow-types:flow;
+ }
+ output {
+ uses stat-types:aggregate-flow-statistics;
+ uses tr:transaction-aware;
+ }
+ }
+
+ notification aggregate-flow-statistics-update {
+ description "Aggregate flow statistics for a table, sent by switch";
+ uses inv:node;
+ uses stat-types:aggregate-flow-statistics;
+ uses tr:multipart-transaction-aware;
+ }
+}
--- /dev/null
+module opendaylight-flow-table-statistics {
+ namespace "urn:opendaylight:flow:table:statistics";
+ prefix flowtablestat;
+
+ import flow-capable-transaction {prefix tr;}
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import flow-node-inventory {prefix flow-node;revision-date "2013-08-19";}
+ import opendaylight-table-types {prefix table-types;revision-date "2013-10-26";}
+ import opendaylight-statistics-types {prefix stat-types;revision-date "2013-09-25";}
+
+
+ contact
+ "Anilkumar Vishnoi
+ Email: avishnoi@in.ibm.com";
+
+ revision "2013-12-15" {
+ description "Initial revision of flow table statistics model";
+ }
+
+ //Augment flow table statistics data to the table
+ augment "/inv:nodes/inv:node/flow-node:table" {
+ ext:augment-identifier "flow-table-statistics-data";
+ uses flow-table-statistics;
+ }
+
+ grouping flow-table-statistics {
+ container flow-table-statistics {
+ //config "false";
+ uses stat-types:generic-table-statistics;
+ }
+ }
+
+ //RPC calls to fetch flow table statistics
+ grouping flow-table-and-statistics-map {
+ list flow-table-and-statistics-map {
+ key "table-id";
+ leaf table-id {
+ type table-types:table-id;
+ }
+ uses stat-types:generic-table-statistics;
+ }
+ }
+
+ rpc get-flow-tables-statistics {
+ description "Fetch statistics of all the flow tables present on the tarnet node";
+ input {
+ uses inv:node-context-ref;
+ }
+ output {
+ uses flow-table-and-statistics-map;
+ uses tr:transaction-aware;
+ }
+ }
+
+ //Notification to receive table statistics update
+
+ notification flow-table-statistics-update {
+ description "Receive flow table statistics update";
+
+ uses inv:node;
+ uses flow-table-and-statistics-map;
+ uses tr:multipart-transaction-aware;
+ }
+}
--- /dev/null
+module opendaylight-group-statistics {
+ namespace "urn:opendaylight:group:statistics";
+ prefix groupstat;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-group-types {prefix group-types;revision-date "2013-10-18";}
+ import flow-capable-transaction {prefix tr;}
+ import flow-node-inventory {prefix fni;}
+
+ contact
+ "Anilkumar Vishnoi
+ Email: avishnoi@in.ibm.com";
+
+ revision "2013-11-11" {
+ description "Initial revision of group statistics service";
+ }
+
+ grouping group-statistics {
+ container group-statistics {
+ //config "false";
+ uses group-types:group-statistics;
+ }
+ }
+
+ augment "/inv:nodes/inv:node/fni:group" {
+ ext:augment-identifier "node-group-statistics";
+ uses group-statistics;
+ }
+
+ grouping group-desc {
+ container group-desc {
+ //config "false";
+ uses group-types:group;
+ }
+ }
+
+ augment "/inv:nodes/inv:node/fni:group" {
+ ext:augment-identifier "node-group-desc-stats";
+ uses group-desc;
+ }
+
+ grouping group-features {
+ container group-features {
+ //config "false";
+ uses group-types:group-features-reply;
+ }
+ }
+
+ augment "/inv:nodes/inv:node" {
+ ext:augment-identifier "node-group-features";
+ uses group-features;
+ }
+
+ // RPC calls
+ rpc get-all-group-statistics {
+ input {
+ uses inv:node-context-ref;
+ }
+ output {
+ uses group-types:group-statistics-reply;
+ uses tr:transaction-aware;
+ }
+
+ }
+
+ rpc get-group-statistics {
+ input {
+ uses inv:node-context-ref;
+ leaf group-id{
+ type group-types:group-id;
+ }
+
+ }
+ output {
+ uses group-types:group-statistics-reply;
+ uses tr:transaction-aware;
+ }
+
+ }
+
+ rpc get-group-description {
+ input {
+ uses inv:node-context-ref;
+ }
+ output {
+ uses group-types:group-desc-stats-reply;
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc get-group-features {
+ input {
+ uses inv:node-context-ref;
+ }
+ output {
+ uses group-types:group-features-reply;
+ uses tr:transaction-aware;
+ }
+ }
+
+
+ //Notification calls
+
+ notification group-statistics-updated {
+ uses inv:node;
+ uses group-types:group-statistics-reply;
+ uses tr:multipart-transaction-aware;
+ }
+
+ notification group-desc-stats-updated {
+ uses inv:node;
+ uses group-types:group-desc-stats-reply;
+ uses tr:multipart-transaction-aware;
+ }
+
+ notification group-features-updated {
+ uses inv:node;
+ uses group-types:group-features-reply;
+ uses tr:multipart-transaction-aware;
+ }
+}
--- /dev/null
+module opendaylight-meter-statistics {
+ namespace "urn:opendaylight:meter:statistics";
+ prefix meterstat;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import flow-node-inventory {prefix flow-node;revision-date "2013-08-19";}
+ import opendaylight-meter-types {prefix meter-types;revision-date "2013-09-18";}
+ import flow-capable-transaction {prefix tr;}
+
+ contact
+ "Anilkumar Vishnoi
+ Email: avishnoi@in.ibm.com";
+
+ revision "2013-11-11" {
+ description "Initial revision of meter statistics service";
+ }
+
+ augment "/inv:nodes/inv:node/flow-node:meter" {
+ ext:augment-identifier "node-meter-statistics";
+ container meter-statistics {
+ //config "false";
+ uses meter-types:meter-statistics;
+ }
+ }
+
+ augment "/inv:nodes/inv:node/flow-node:meter" {
+ ext:augment-identifier "node-meter-config-stats";
+ container meter-config-stats {
+ //config "false";
+ uses meter-types:meter;
+ }
+ }
+
+ augment "/inv:nodes/inv:node" {
+ ext:augment-identifier "node-meter-features";
+ container meter-features {
+ //config "false";
+ uses meter-types:meter-features-reply;
+ }
+ }
+
+ // RPC calls
+ rpc get-all-meter-statistics {
+ input {
+ uses inv:node-context-ref;
+ }
+ output {
+ uses meter-types:meter-statistics-reply;
+ uses tr:transaction-aware;
+ }
+
+ }
+
+ rpc get-meter-statistics {
+ input {
+ uses inv:node-context-ref;
+ leaf meter-id{
+ type meter-types:meter-id;
+ }
+ }
+ output {
+ uses meter-types:meter-statistics-reply;
+ uses tr:transaction-aware;
+ }
+
+ }
+
+ rpc get-all-meter-config-statistics {
+ input {
+ uses inv:node-context-ref;
+ }
+ output {
+ uses meter-types:meter-config-stats-reply;
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc get-meter-features {
+ input {
+ uses inv:node-context-ref;
+ }
+ output {
+ uses meter-types:meter-features-reply;
+ uses tr:transaction-aware;
+ }
+ }
+
+
+ //Notification calls
+
+ notification meter-statistics-updated {
+ uses inv:node;
+ uses meter-types:meter-statistics-reply;
+ uses tr:multipart-transaction-aware;
+ }
+
+ notification meter-config-stats-updated {
+ uses inv:node;
+ uses meter-types:meter-config-stats-reply;
+ uses tr:multipart-transaction-aware;
+ }
+
+ notification meter-features-updated {
+ uses inv:node;
+ uses meter-types:meter-features-reply;
+ uses tr:multipart-transaction-aware;
+ }
+}
--- /dev/null
+module opendaylight-port-statistics {
+ namespace "urn:opendaylight:port:statistics";
+ prefix portstat;
+
+ import flow-capable-transaction {prefix tr;}
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-statistics-types {prefix stat-types;revision-date "2013-09-25";}
+
+ contact
+ "Anilkumar Vishnoi
+ Email: avishnoi@in.ibm.com";
+
+ revision "2013-12-14" {
+ description "Initial revision of port statistics model";
+ }
+
+ //Augment port statistics data to the flow-capable-node-connector
+ augment "/inv:nodes/inv:node/inv:node-connector" {
+ ext:augment-identifier "flow-capable-node-connector-statistics-data";
+ uses flow-capable-node-connector-statistics;
+ }
+
+ grouping flow-capable-node-connector-statistics {
+ container flow-capable-node-connector-statistics {
+ //config "false";
+ uses stat-types:node-connector-statistics;
+ }
+ }
+
+ // RPC calls
+ rpc get-all-node-connectors-statistics {
+ description "Get statistics for all node connectors from the node";
+ input {
+ uses inv:node-context-ref;
+ }
+ output {
+ uses node-connector-statistics-and-port-number-map;
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc get-node-connector-statistics {
+ description "Get statistics for given node connector from the node";
+ input {
+ uses inv:node-context-ref;
+ leaf node-connector-id {
+ type inv:node-connector-id;
+ }
+ }
+ output {
+ uses stat-types:node-connector-statistics;
+ uses tr:transaction-aware;
+ }
+ }
+
+ //Notification for node connector statistics update
+ grouping node-connector-statistics-and-port-number-map {
+ description "List of map - node connectors and their statistics";
+ list node-connector-statistics-and-port-number-map {
+ key "node-connector-id";
+ leaf node-connector-id {
+ type inv:node-connector-id;
+ }
+ uses stat-types:node-connector-statistics;
+ }
+ }
+
+ notification node-connector-statistics-update {
+ uses inv:node;
+ uses node-connector-statistics-and-port-number-map;
+ uses tr:multipart-transaction-aware;
+ }
+}
--- /dev/null
+module opendaylight-queue-statistics {
+ namespace "urn:opendaylight:queue:statistics";
+ prefix queuestat;
+
+ import flow-capable-transaction {prefix tr;}
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import flow-node-inventory {prefix flow-node;revision-date "2013-08-19";}
+ import opendaylight-queue-types {prefix queue-types;revision-date "2013-09-25";}
+ import opendaylight-statistics-types {prefix stat-types;revision-date "2013-09-25";}
+
+ contact
+ "Anilkumar Vishnoi
+ Email: avishnoi@in.ibm.com";
+
+ revision "2013-12-16" {
+ description "Initial revision of queue statistics model";
+ }
+
+ //Augment queue statistics data to the flow-capable-node-connector
+ augment "/inv:nodes/inv:node/inv:node-connector/flow-node:queue" {
+ ext:augment-identifier "flow-capable-node-connector-queue-statistics-data";
+ uses flow-capable-node-connector-queue-statistics;
+ }
+
+ grouping flow-capable-node-connector-queue-statistics {
+ container flow-capable-node-connector-queue-statistics {
+ //config "false";
+ uses stat-types:generic-queue-statistics;
+ }
+ }
+
+ //RPC calls to fetch queue statistics
+ grouping queue-id-and-statistics-map {
+ list queue-id-and-statistics-map {
+ key "queue-id node-connector-id";
+ leaf queue-id {
+ type queue-types:queue-id;
+ }
+ leaf node-connector-id {
+ type inv:node-connector-id;
+ }
+
+ uses stat-types:generic-queue-statistics;
+ }
+ }
+
+ rpc get-all-queues-statistics-from-all-ports {
+ description "Get statistics for all the queues attached to all the ports from the node";
+ input {
+ uses inv:node-context-ref;
+ }
+ output {
+ uses queue-id-and-statistics-map;
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc get-all-queues-statistics-from-given-port {
+ description "Get statistics for all queues for given port of the node";
+ input {
+ uses inv:node-context-ref;
+ leaf node-connector-id {
+ type inv:node-connector-id;
+ }
+ }
+ output {
+ uses queue-id-and-statistics-map;
+ uses tr:transaction-aware;
+ }
+ }
+
+ rpc get-queue-statistics-from-given-port {
+ description "Get statistics for given queues from given port of the node";
+ input {
+ uses inv:node-context-ref;
+ leaf node-connector-id {
+ type inv:node-connector-id;
+ }
+ leaf queue-id {
+ type queue-types:queue-id;
+ }
+ }
+ output {
+ uses queue-id-and-statistics-map;
+ uses tr:transaction-aware;
+ }
+ }
+
+ //Notification for port statistics update
+
+ notification queue-statistics-update {
+ uses inv:node;
+ uses queue-id-and-statistics-map;
+ uses tr:multipart-transaction-aware;
+ }
+}
--- /dev/null
+module opendaylight-statistics-types {
+ namespace "urn:opendaylight:model:statistics:types";
+ prefix stat-types;
+
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
+
+ revision "2013-09-25" {
+ description "Initial revision of flow service";
+ }
+
+ grouping duration {
+ container duration {
+ leaf second {
+ type yang:counter32;
+ }
+ leaf nanosecond {
+ type yang:counter32;
+ }
+ }
+ }
+
+ grouping node-connector-statistics {
+ container packets {
+ leaf received {
+ type uint64;
+ }
+ leaf transmitted {
+ type uint64;
+ }
+ }
+ container bytes {
+ leaf received {
+ type uint64;
+ }
+ leaf transmitted {
+ type uint64;
+ }
+ }
+ leaf receive-drops {
+ type uint64;
+ }
+ leaf transmit-drops {
+ type uint64;
+ }
+ leaf receive-errors {
+ type uint64;
+ }
+ leaf transmit-errors {
+ type uint64;
+ }
+ leaf receive-frame-error {
+ type uint64;
+ }
+ leaf receive-over-run-error {
+ type uint64;
+ }
+ leaf receive-crc-error {
+ type uint64;
+ }
+ leaf collision-count {
+ type uint64;
+ }
+ uses duration;
+ }
+
+ grouping generic-statistics {
+ description "Generic grouping for statistics";
+ leaf packet-count {
+ type yang:counter64;
+ }
+
+ leaf byte-count {
+ type yang:counter64;
+ }
+ uses duration;
+ }
+
+ grouping generic-table-statistics {
+ description "Generic grouping holding generic statistics related to switch table";
+ leaf active-flows {
+ type yang:counter32;
+ }
+ leaf packets-looked-up {
+ type yang:counter64;
+ }
+ leaf packets-matched {
+ type yang:counter64;
+ }
+ }
+
+ grouping aggregate-flow-statistics {
+ description "Aggregate flow statistics";
+ leaf packet-count {
+ type yang:counter64;
+ }
+
+ leaf byte-count {
+ type yang:counter64;
+ }
+ leaf flow-count {
+ type yang:counter32;
+ }
+ }
+
+ grouping generic-queue-statistics {
+ description "Generic statistics of switch port attached queues.";
+ leaf transmitted-bytes {
+ type yang:counter64;
+ }
+
+ leaf transmitted-packets {
+ type yang:counter64;
+ }
+
+ leaf transmission-errors {
+ type yang:counter64;
+ }
+ uses duration;
+ }
+
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>applications</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>flow-model-parent</artifactId>
+ <packaging>pom</packaging>
+
+ <modules>
+ <module>model-flow-base</module>
+ <module>model-flow-service</module>
+ <module>model-flow-statistics</module>
+ </modules>
+
+ <properties>
+ <bundle.plugin.version>2.4.0</bundle.plugin.version>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-binding</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>yang-ext</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
+ <Import-Package>org.opendaylight.yangtools.yang.binding.annotations, *</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <type>jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-binding</artifactId>
+ <version>${yangtools.version}</version>
+ <type>jar</type>
+ </dependency>
+ </dependencies>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <yangFilesRootDir>src/main/yang</yangFilesRootDir>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.wadl.generator.maven.WadlGenerator</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+ </scm>
+
+</project>
--- /dev/null
+*.yang
\ No newline at end of file
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>of-switch-config-pusher</artifactId>
<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.openflowplugin</groupId>
- <artifactId>openflowplugin-parent</artifactId>
- <version>0.1.0-SNAPSHOT</version>
- </parent>
+ <parent>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>openflowplugin-parent</artifactId>
+ <version>0.1.0-SNAPSHOT</version>
+ </parent>
- <groupId>org.opendaylight.openflowplugin</groupId>
- <artifactId>applications</artifactId>
- <version>0.1.0-SNAPSHOT</version>
- <name>applications</name>
- <url>http://maven.apache.org</url>
- <packaging>pom</packaging>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>applications</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ <name>applications</name>
+ <url>http://maven.apache.org</url>
+ <packaging>pom</packaging>
+<!--
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>openflowplugin-api</artifactId>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+ -->
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>${yang.binding.version}</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>
+ org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ </codeGeneratorClass>
+ <outputBaseDir>${project.build.directory}/generated-sources/config</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>
+ urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang
+ </namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>
+ org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl
+ </codeGeneratorClass>
+ <outputBaseDir>${project.build.directory}/generated-sources/sal</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.parent.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yang.binding.version}</version>
+ <type>jar</type>
+ </dependency>
+ </dependencies>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>1.8</version>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${project.build.directory}/generated-sources/config</source>
+ ;
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <!--executions> <execution> <id>bundle-manifest</id> <phase>process-classes</phase>
+ <goals> <goal>manifest</goal> </goals> </execution> </executions -->
+ <configuration>
+ <instructions>
+ <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
+ </instructions>
+ <manifestLocation>${project.basedir}/META-INF</manifestLocation>
+ </configuration>
+ </plugin>
+ <!--This plugin's configuration is used to store Eclipse m2e settings
+ only. It has no influence on the Maven build itself. -->
+ <plugin>
+ <groupId>org.eclipse.m2e</groupId>
+ <artifactId>lifecycle-mapping</artifactId>
+ <version>${lifecycle.mapping.version}</version>
+ <configuration>
+ <lifecycleMappingMetadata>
+ <pluginExecutions>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>net.alchim31.maven</groupId>
+ <artifactId>scala-maven-plugin</artifactId>
+ <versionRange>[0,)</versionRange>
+ <goals>
+ <goal>compile</goal>
+ <goal>testCompile</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore />
+ </action>
+ </pluginExecution>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.jacoco</groupId>
+ <artifactId>jacoco-maven-plugin</artifactId>
+ <versionRange>[0,)</versionRange>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore />
+ </action>
+ </pluginExecution>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <versionRange>[0.5,)</versionRange>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <execute></execute>
+ </action>
+ </pluginExecution>
+ </pluginExecutions>
+ </lifecycleMappingMetadata>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.jacoco</groupId>
+ <artifactId>jacoco-maven-plugin</artifactId>
+ <version>${jacoco.version}</version>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ </plugin>
+ <!--
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ -->
+ </plugins>
+ </build>
- <dependencyManagement>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.openflowplugin</groupId>
- <artifactId>openflowplugin-api</artifactId>
- <version>${project.version}</version>
- </dependency>
- </dependencies>
- </dependencyManagement>
- <build>
- <pluginManagement>
- <plugins>
- <plugin>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-maven-plugin</artifactId>
- <version>${yang.binding.version}</version>
- <executions>
- <execution>
- <goals>
- <goal>generate-sources</goal>
- </goals>
- <configuration>
- <codeGenerators>
- <generator>
- <codeGeneratorClass>
- org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
- </codeGeneratorClass>
- <outputBaseDir>${project.build.directory}/generated-sources/config</outputBaseDir>
- <additionalConfiguration>
- <namespaceToPackage1>
- urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang
- </namespaceToPackage1>
- </additionalConfiguration>
- </generator>
- <generator>
- <codeGeneratorClass>
- org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl
- </codeGeneratorClass>
- <outputBaseDir>${project.build.directory}/generated-sources/sal</outputBaseDir>
- </generator>
- </codeGenerators>
- <inspectDependencies>true</inspectDependencies>
- </configuration>
- </execution>
- </executions>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>yang-jmx-generator-plugin</artifactId>
- <version>${config.parent.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>maven-sal-api-gen-plugin</artifactId>
- <version>${yang.binding.version}</version>
- <type>jar</type>
- </dependency>
- </dependencies>
+ <reporting>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>findbugs-maven-plugin</artifactId>
+ <version>${findbugs.maven.plugin.version}</version>
+ <configuration>
+ <effort>Max</effort>
+ <threshold>Low</threshold>
+ <goal>site</goal>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>jdepend-maven-plugin</artifactId>
+ <version>${jdepend.maven.plugin.version}</version>
+ </plugin>
+ </plugins>
+ </reporting>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</developerConnection>
+ <tag>HEAD</tag>
+ </scm>
- </plugin>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>build-helper-maven-plugin</artifactId>
- <version>1.8</version>
- <executions>
- <execution>
- <id>add-source</id>
- <phase>generate-sources</phase>
- <goals>
- <goal>add-source</goal>
- </goals>
- <configuration>
- <sources>
- <source>${project.build.directory}/generated-sources/config</source>;
- </sources>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </pluginManagement>
- </build>
-
- <modules>
- <module>table-miss-enforcer</module>
- <module>of-switch-config-pusher</module>
- <module>lldp-speaker</module>
- </modules>
+ <modules>
+ <module>table-miss-enforcer</module>
+ <module>of-switch-config-pusher</module>
+ <module>lldp-speaker</module>
+ <!-- Base Models -->
+ <module>model</module>
+ <module>inventory-manager</module>
+ <module>statistics-manager</module>
+ <module>statistics-manager-config</module>
+ <module>topology-manager</module>
+ <module>forwardingrules-manager</module>
+ <module>topology-lldp-discovery</module>
+ </modules>
</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>applications</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+
+ <groupId>org.opendaylight.openflowplugin.applications</groupId>
+ <artifactId>statistics-manager-config</artifactId>
+ <description>Configuration files for statistics manager</description>
+ <packaging>jar</packaging>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/30-statistics-manager.xml</file>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<snapshot>
+ <configuration>
+ <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:statsmanager="urn:opendaylight:params:xml:ns:yang:controller:md:sal:statistics-manager">
+ statsmanager:statistics-manager
+ </type>
+ <name>statistics-manager</name>
+
+ <rpc-registry>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
+ <name>binding-rpc-broker</name>
+ </rpc-registry>
+
+ <data-broker>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+ <name>binding-data-broker</name>
+ </data-broker>
+
+ <notification-service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">
+ binding:binding-notification-service
+ </type>
+ <name>binding-notification-broker</name>
+ </notification-service>
+
+ <statistics-manager-settings>
+ <min-request-net-monitor-interval>3000</min-request-net-monitor-interval>
+ <max-nodes-for-collector>16</max-nodes-for-collector>
+ </statistics-manager-settings>
+
+ </module>
+ </modules>
+ </data>
+ </configuration>
+
+ <required-capabilities>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:statistics-manager?module=statistics-manager&revision=2014-09-25</capability>
+ </required-capabilities>
+
+</snapshot>
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>applications</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+ <groupId>org.opendaylight.openflowplugin.applications</groupId>
+ <artifactId>statistics-manager</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-broker-impl</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-base</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-statistics</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.core</artifactId>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <type>jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-binding</artifactId>
+ <version>${yangtools.version}</version>
+ <type>jar</type>
+ </dependency>
+ </dependencies>
+
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</developerConnection>
+ <tag>HEAD</tag>
+ </scm>
+</project>
--- /dev/null
+package org.opendaylight.controller.config.yang.md.sal.statistics_manager;
+
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.impl.StatisticsManagerConfig;
+import org.opendaylight.controller.md.statistics.manager.impl.StatisticsManagerImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class StatisticsManagerModule extends org.opendaylight.controller.config.yang.md.sal.statistics_manager.AbstractStatisticsManagerModule {
+ private final static Logger LOG = LoggerFactory.getLogger(StatisticsManagerModule.class);
+
+ private final static int MAX_NODES_FOR_COLLECTOR_DEFAULT = 16;
+ private final static int MIN_REQUEST_NET_MONITOR_INTERVAL_DEFAULT = 3000;
+
+ private StatisticsManager statisticsManagerProvider;
+
+ public StatisticsManagerModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public StatisticsManagerModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, final StatisticsManagerModule oldModule, final java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ LOG.info("StatisticsManager module initialization.");
+ final StatisticsManagerConfig config = createConfig();
+ statisticsManagerProvider = new StatisticsManagerImpl(getDataBrokerDependency(), config);
+ statisticsManagerProvider.start(getNotificationServiceDependency(), getRpcRegistryDependency());
+ LOG.info("StatisticsManager started successfully.");
+ return new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ try {
+ statisticsManagerProvider.close();
+ }
+ catch (final Exception e) {
+ LOG.error("Unexpected error by stopping StatisticsManager module", e);
+ }
+ LOG.info("StatisticsManager module stopped.");
+ }
+ };
+ }
+
+ public StatisticsManagerConfig createConfig() {
+ final StatisticsManagerConfig.StatisticsManagerConfigBuilder builder = StatisticsManagerConfig.builder();
+ if (getStatisticsManagerSettings() != null && getStatisticsManagerSettings().getMaxNodesForCollector() != null) {
+ builder.setMaxNodesForCollector(getStatisticsManagerSettings().getMaxNodesForCollector());
+ } else {
+ LOG.warn("Load the xml ConfigSubsystem input value fail! MaxNodesForCollector value is set to {} ",
+ MAX_NODES_FOR_COLLECTOR_DEFAULT);
+ builder.setMaxNodesForCollector(MAX_NODES_FOR_COLLECTOR_DEFAULT);
+ }
+ if (getStatisticsManagerSettings() != null &&
+ getStatisticsManagerSettings().getMinRequestNetMonitorInterval() != null) {
+ builder.setMinRequestNetMonitorInterval(getStatisticsManagerSettings().getMinRequestNetMonitorInterval());
+ } else {
+ LOG.warn("Load the xml CofnigSubsystem input value fail! MinRequestNetMonitorInterval value is set to {} ",
+ MIN_REQUEST_NET_MONITOR_INTERVAL_DEFAULT);
+ builder.setMinRequestNetMonitorInterval(MIN_REQUEST_NET_MONITOR_INTERVAL_DEFAULT);
+ }
+ return builder.build();
+ }
+
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: statistics-manager yang module local name: statistics-manager
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Tue Oct 07 14:09:47 CEST 2014
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.statistics_manager;
+public class StatisticsManagerModuleFactory extends org.opendaylight.controller.config.yang.md.sal.statistics_manager.AbstractStatisticsManagerModuleFactory {
+
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ * StatListeningCommiter
+ * Definition Interface for {@link DataChangeListener} implementer class rule.
+ * Interface represent a contract between Config/DataStore changes and
+ * Operational/DataStore commits. All Operational/DataStore commit have
+ * to by represent as RPC Device response Notification processing. So
+ * Operational/DS could contains only real mirror of OF Device
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 27, 2014
+ */
+public interface StatListeningCommiter<T extends DataObject, N extends NotificationListener> extends DataChangeListener, StatNotifyCommiter<N> {
+
+
+ /**
+ * All StatListeningCommiter implementer has to clean its actual state
+ * for all cached data related to disconnected node.
+ * Method prevents unwanted dataStore changes.
+ *
+ * @param nodeIdent
+ */
+ void cleanForDisconnect(InstanceIdentifier<Node> nodeIdent);
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.SwitchFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.OpendaylightInventoryListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatNodeRegistration
+ * Class represents {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}
+ * {@link org.opendaylight.controller.md.sal.binding.api.DataChangeListener} in Operational/DataStore for ADD / REMOVE
+ * actions which are represented connect / disconnect OF actions. Connect functionality are expecting
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Sep 5, 2014
+ */
+public interface StatNodeRegistration extends OpendaylightInventoryListener, AutoCloseable {
+
+ /**
+ * Method contains {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode} registration to {@link StatisticsManager}
+ * for permanently collecting statistics by {@link StatPermCollector} and
+ * as a prevention to use a validation check to the Operational/DS for identify
+ * connected {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}.
+ *
+ * @param InstanceIdentifier<SwitchFeatures> keyIdent
+ * @param FlowCapableNode data
+ * @param InstanceIdentifier<Node> nodeIdent
+ */
+ void connectFlowCapableNode(InstanceIdentifier<SwitchFeatures> keyIdent,
+ SwitchFeatures data, InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Method cut {@link Node} registration for {@link StatPermCollector}
+ *
+ * @param InstanceIdentifier<Node> keyIdent
+ */
+ void disconnectFlowCapableNode(InstanceIdentifier<Node> keyIdent);
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ * StatNotifyCommiter
+ * Definition Interface for notification implementer class rule
+ * Interface represent a contract between RPC Device Notification
+ * and Operational/DataStore commits.
+ *
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 28, 2014
+ */
+public interface StatNotifyCommiter<N extends NotificationListener> extends AutoCloseable, NotificationListener {
+
+
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import java.util.List;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ * StatPermCollector
+ * Class implement {@link Runnable} and inside is running statistic collecting
+ * process DataObject statistics by DataObject statistics for every {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}.
+ * Every statistics wait to finish previous statistics. Only if all statistics finish,
+ * next {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}
+ * Statistics should be collecting. We are able to set minimal time for start next round cross all Network,
+ * but all depends on network possibility.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 28, 2014
+ */
+public interface StatPermCollector extends Runnable, AutoCloseable {
+
+ /**
+ * StatCapType
+ * Enum class refers ofp_statistics capabilities fields from OF Switch
+ * capabilities specification which have to come as a post HandShake
+ * information from OF Switch and Inventory Manager adds all to the
+ * Operational/DS.
+ * If the capabilities are not add (for any reason) NodeRegistrator
+ * adds all StatCapTypes for the {@link Node}.
+ */
+ public enum StatCapabTypes {
+ /**
+ * OFPC_FLOW_STATS
+ */
+ FLOW_STATS,
+ /**
+ * OFPC_TABLE_STATS
+ */
+ TABLE_STATS,
+ /**
+ * OFPC_PORT_STATS
+ */
+ PORT_STATS,
+ /**
+ * OFPC_GROUP_STATS
+ */
+ GROUP_STATS,
+ /**
+ * OFPC_QUEUE_STATS
+ */
+ QUEUE_STATS,
+ /**
+ * Meter statistics has no support from OF Switch capabilities
+ * so we have to try get statistics for it and wait for response
+ * Error or response package with results.
+ */
+ METER_STATS
+ }
+
+ /**
+ * Add new connected node for permanent statistics collecting process
+ *
+ * @param flowNode
+ * @param statTypes
+ * @param nrOfSwitchTables
+ * @return true/false if the {@link Node} added successful
+ */
+ boolean connectedNodeRegistration(InstanceIdentifier<Node> nodeIdent,
+ List<StatCapabTypes> statTypes, Short nrOfSwitchTables);
+
+ /**
+ * All disconnected Nodes need be removed from stat list Nodes
+ *
+ * @param flowNode
+ * @return true/false if the {@link Node} removed successful
+ */
+ boolean disconnectedNodeUnregistration(InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Method add new feature {@link StatCapabTypes} to Node identified by
+ * nodeIdent -> InstanceIdentifier<Node>
+ *
+ * @param flowNode
+ * @return true/false if the {@link StatCapabTypes} add successful
+ */
+ boolean registerAdditionalNodeFeature(InstanceIdentifier<Node> nodeIdent, StatCapabTypes statCapab);
+
+ /**
+ * Method return true only and only if {@link StatPermCollector} contain
+ * valid node registration in its internal {@link Node} map.
+ * Otherwise return false.
+ *
+ * @param flowNode
+ * @return
+ */
+ boolean isProvidedFlowNodeActive(InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Object notification for continue statistics collecting process.
+ * It is call from collecting allStatistics methods as a future result for
+ * Operational/DS statistic store call (does not matter in the outcome).
+ */
+ void collectNextStatistics(TransactionId xid);
+
+ /**
+ * Method returns true if collector has registered some active nodes
+ * otherwise return false.
+ *
+ * @return
+ */
+ boolean hasActiveNodes();
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Future;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.SettableFuture;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ * StatRpcMsgManager
+ * It represent access point for Device statistics RPC services which are
+ * filtered for needed methods only and they are wrapped in simply way.
+ * Many statistics responses are Multipart messages, so StatRpcMsgManager
+ * provide a functionality to add all multipart msg and provides back whole
+ * stack to listener when listener catch the last Multipart msg.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 29, 2014
+ */
+public interface StatRpcMsgManager extends Runnable, AutoCloseable {
+
+ interface RpcJobsQueue extends Callable<Void> {}
+
+ /**
+ * Transaction container is definition for Multipart transaction
+ * join container for all Multipart msg with same TransactionId
+ * Input {@link DataObject} is a possible light-weight DataObject
+ * which is used for identification (e.g. Flow-> Priority,Match,Cookie,FlowId)
+ *
+ * @param <T> extends TransactionAware -
+ */
+ interface TransactionCacheContainer<T extends TransactionAware> {
+
+ void addNotif(T notification);
+
+ TransactionId getId();
+
+ NodeId getNodeId();
+
+ Optional<? extends DataObject> getConfInput();
+
+ List<T> getNotifications();
+ }
+
+ /**
+ * Method is used for check a transaction registration
+ * for multipart cache holder
+ *
+ * @param TransactionId id
+ * @return true if the transaction has been correctly registered
+ */
+ Future<Boolean> isExpectedStatistics(TransactionId id, NodeId nodeId);
+
+ /**
+ * Method converts {@link java.util.concurrent.Future} object to listenenable future which
+ * is registered for Multipart Notification Statistics Collecting processing.
+ *
+ * @param future - result every Device RPC call
+ */
+ <T extends TransactionAware, D extends DataObject> void registrationRpcFutureCallBack(
+ Future<RpcResult<T>> future, D inputObj, NodeRef ref, SettableFuture<TransactionId> resultTransId);
+
+ /**
+ * Method adds Notification which is marked as Multipart to the transaction cash
+ * to wait for the last one.
+ *
+ * @param notification
+ */
+ <T extends TransactionAware> void addNotification(T notification, NodeId nodeId);
+
+ /**
+ * The last Multipart should inform code about possibility to take all previous
+ * messages for next processing. The method take all msg and possible input object
+ * and build all to TransactionCacheContainer Object to return. This process clean
+ * all instances in Cache.
+ *
+ * @param TransactionId id
+ * @return TransactionCacheContainer
+ */
+ Future<Optional<TransactionCacheContainer<?>>> getTransactionCacheContainer(TransactionId id, NodeId nodeId);
+
+ /**
+ * Method wraps OpendaylightGroupStatisticsService.getAllGroupStatistics
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ Future<TransactionId> getAllGroupsStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightGroupStatisticsService.getGroupDescription
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ Future<TransactionId> getAllGroupsConfStats(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightMeterStatisticsService.getGroupFeatures
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getGroupFeaturesStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightMeterStatisticsService.getAllMeterStatistics
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ Future<TransactionId> getAllMetersStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightMeterStatisticsService.getAllMeterConfigStatistics
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ Future<TransactionId> getAllMeterConfigStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightMeterStatisticsService.getMeterFeatures
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getMeterFeaturesStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightFlowStatisticsService.getAllFlowsStatisticsFromAllFlowTables
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ Future<TransactionId> getAllFlowsStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightFlowStatisticsService.getAggregateFlowStatisticsFromFlowTableForAllFlows
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ * @param TableId tableId
+ */
+ void getAggregateFlowStat(NodeRef nodeRef, TableId tableId);
+
+ /**
+ * Method wraps OpendaylightPortStatisticsService.getAllNodeConnectorsStatistics
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ Future<TransactionId> getAllPortsStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightFlowTableStatisticsService.getFlowTablesStatistics
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ Future<TransactionId> getAllTablesStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightQueueStatisticsService.getAllQueuesStatisticsFromAllPorts
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ Future<TransactionId> getAllQueueStat(NodeRef nodeRef);
+
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
+import org.opendaylight.controller.md.statistics.manager.impl.StatisticsManagerConfig;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsListener;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ * StatisticsManager
+ * It represent a central point for whole module. Implementation
+ * StatisticsManager registers all Operation/DS {@link StatNotifyCommiter} and
+ * Config/DS {@StatListeningCommiter}, as well as {@link StatPermCollector}
+ * for statistic collecting and {@link StatRpcMsgManager} as Device RPCs provider.
+ * In next, StatisticsManager provides all DS contact Transaction services.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 27, 2014
+ */
+public interface StatisticsManager extends AutoCloseable, TransactionChainListener {
+
+ /**
+ * StatDataStoreOperation
+ * Interface represent functionality to submit changes to DataStore.
+ * Internal {@link TransactionChainListener} joining all DS commits
+ * to Set of chained changes for prevent often DataStore touches.
+ */
+ public abstract class StatDataStoreOperation {
+ public enum StatsManagerOperationType {
+ /**
+ * Operation will carry out work related to new node addition /
+ * update
+ */
+ NODE_UPDATE,
+ /**
+ * Operation will carry out work related to node removal
+ */
+ NODE_REMOVAL,
+ /**
+ * Operation will commit data to the operational data store
+ */
+ DATA_COMMIT_OPER_DS
+ }
+
+ private NodeId nodeId;
+ private StatsManagerOperationType operationType = StatsManagerOperationType.DATA_COMMIT_OPER_DS;
+
+ public StatDataStoreOperation(final StatsManagerOperationType operType, final NodeId id){
+ if(operType != null){
+ operationType = operType;
+ }
+ nodeId = id;
+ }
+
+ public final StatsManagerOperationType getType() {
+ return operationType;
+ }
+
+ public final NodeId getNodeId(){
+ return nodeId;
+ }
+
+ /**
+ * Apply all read / write (put|merge) operation for DataStore
+ *
+ * @param {@link ReadWriteTransaction} tx
+ */
+ public abstract void applyOperation(ReadWriteTransaction tx);
+
+ }
+
+ /**
+ * Method starts whole StatisticManager functionality
+ *
+ * @param {@link NotificationProviderService} notifService
+ * @param {@link RpcConsumerRegistry} rpcRegistry
+ * @param minReqNetMonitInt
+ */
+ void start(final NotificationProviderService notifService,
+ final RpcConsumerRegistry rpcRegistry);
+
+ /**
+ * Method provides read/write DataStore functionality cross applyOperation
+ * defined in {@link StatDataStoreOperation}
+ *
+ * @param inventoryOper - operation for DataStore
+ */
+ void enqueue(final StatDataStoreOperation inventoryOper);
+
+ /**
+ * Method wraps {@link StatisticCollector}.isProvidedFlowNodeActive method
+ * to provide parallel statCollection process for Set of Nodes. So it has to
+ * identify correct Node Set by NodeIdentifier
+ *
+ * @param nodeIdent
+ */
+ boolean isProvidedFlowNodeActive(InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Method wraps {@link StatPermCollector}.collectNextStatistics to provide
+ * parallel statCollection process for Set of Nodes. So it has to
+ * identify correct Node Set by NodeIdentifier.
+ *
+ * @param nodeIdent
+ */
+ void collectNextStatistics(InstanceIdentifier<Node> nodeIdent, TransactionId xid);
+
+ /**
+ * Method wraps {@link StatPermCollector}.connectedNodeRegistration to provide
+ * parallel statCollection process for Set of Nodes. So it has to
+ * connect node to new or not full Node statCollector Set.
+ *
+ * @param nodeIdent
+ * @param statTypes
+ * @param nrOfSwitchTables
+ */
+ void connectedNodeRegistration(InstanceIdentifier<Node> nodeIdent,
+ List<StatCapabTypes> statTypes, Short nrOfSwitchTables);
+
+ /**
+ * Method wraps {@link StatPermCollector}.disconnectedNodeUnregistration to provide
+ * parallel statCollection process for Set of Nodes. So it has to identify
+ * correct collector for disconnect node.
+ *
+ * @param nodeIdent
+ */
+ void disconnectedNodeUnregistration(InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Method wraps {@link StatPermCollector}.registerAdditionalNodeFeature to provide
+ * possibility to register additional Node Feature {@link StatCapabTypes} for
+ * statistics collecting.
+ *
+ * @param nodeIdent
+ * @param statCapab
+ */
+ void registerAdditionalNodeFeature(InstanceIdentifier<Node> nodeIdent, StatCapabTypes statCapab);
+
+ /**
+ * Method provides access to Device RPC methods by wrapped
+ * internal method. In next {@link StatRpcMsgManager} is registered all
+ * Multipart device msg response and joining all to be able run all
+ * collected statistics in one time (easy identification Data for delete)
+ *
+ * @return {@link StatRpcMsgManager}
+ */
+ StatRpcMsgManager getRpcMsgManager();
+
+ /**
+ * Define Method : {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}
+ * Operational/DS data change listener -> impl. target -> register FlowCapableNode to Statistic Collecting process
+ * @return {@link StatNodeRegistration}
+ */
+ StatNodeRegistration getNodeRegistrator();
+
+ /**
+ * Define Method : Flow Config/DS data change listener -> impl. target ->
+ * -> make pair between Config/DS FlowId and Device Flow response Hash
+ * @return
+ */
+ StatListeningCommiter<Flow, OpendaylightFlowStatisticsListener> getFlowListenComit();
+
+ /**
+ * Define Method : Meter Config/DS data change listener and Operation/DS notify commit
+ * functionality
+ * @return
+ */
+ StatListeningCommiter<Meter, OpendaylightMeterStatisticsListener> getMeterListenCommit();
+
+ /**
+ * Define Method : Group Config/DS data change listener and Operation/DS notify commit
+ * functionality
+ * @return
+ */
+ StatListeningCommiter<Group, OpendaylightGroupStatisticsListener> getGroupListenCommit();
+
+ /**
+ * Define Method : Queue Config/DS change listener and Operation/DS notify commit functionality
+ * @return
+ */
+ StatListeningCommiter<Queue, OpendaylightQueueStatisticsListener> getQueueNotifyCommit();
+
+ /**
+ * Define Method : Table Operation/DS notify commit functionality
+ * @return
+ */
+ StatNotifyCommiter<OpendaylightFlowTableStatisticsListener> getTableNotifCommit();
+
+ /**
+ * Define Method : Port Operation/DS notify commit functionality
+ * @return
+ */
+ StatNotifyCommiter<OpendaylightPortStatisticsListener> getPortNotifyCommit();
+
+ StatisticsManagerConfig getConfiguration();
+
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatListeningCommiter;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatAbstractListeneningCommiter
+ * Class is abstract implementation for all Configuration/DataStore DataChange
+ * listenable DataObjects like flows, groups, meters. It is a holder for common
+ * functionality needed by construction/destruction class and for DataChange
+ * event processing.
+ *
+ */
+public abstract class StatAbstractListenCommit<T extends DataObject, N extends NotificationListener>
+ extends StatAbstractNotifyCommit<N> implements StatListeningCommiter<T,N> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatAbstractListenCommit.class);
+
+ private ListenerRegistration<DataChangeListener> listenerRegistration;
+
+ protected final Map<InstanceIdentifier<Node>, Map<InstanceIdentifier<T>, Integer>> mapNodesForDelete = new ConcurrentHashMap<>();
+ protected final Map<InstanceIdentifier<Node>, Integer> mapNodeFeautureRepeater = new ConcurrentHashMap<>();
+
+ private final Class<T> clazz;
+
+ private final DataBroker dataBroker;
+
+ private volatile ReadOnlyTransaction currentReadTx;
+
+ /* Constructor has to make a registration */
+ public StatAbstractListenCommit(final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService nps, final Class<T> clazz) {
+ super(manager,nps);
+ this.clazz = Preconditions.checkNotNull(clazz, "Referenced Class can not be null");
+ Preconditions.checkArgument(db != null, "DataBroker can not be null!");
+ listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
+ getWildCardedRegistrationPath(), this, DataChangeScope.BASE);
+ this.dataBroker = db;
+ }
+
+ /**
+ * Method returns WildCarded Path which is used for registration as a listening path changes in
+ * {@link org.opendaylight.controller.md.sal.binding.api.DataChangeListener}
+ * @return
+ */
+ protected abstract InstanceIdentifier<T> getWildCardedRegistrationPath();
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> changeEvent) {
+ Preconditions.checkNotNull(changeEvent,"Async ChangeEvent can not be null!");
+ /*
+ * If we have opened read transaction for configuraiton data store,
+ * we will close and null it.
+ *
+ * Latest read transaction will be allocated on another read using readLatestConfiguration
+ */
+ if(currentReadTx != null) {
+ final ReadOnlyTransaction previous = currentReadTx;
+ currentReadTx = null;
+ previous.close();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ protected void removeData(final InstanceIdentifier<?> key, final Integer value) {
+ if (clazz.equals(key.getTargetType())) {
+ final InstanceIdentifier<Node> nodeIdent = key.firstIdentifierOf(Node.class);
+ Map<InstanceIdentifier<T>, Integer> map = null;
+ if (mapNodesForDelete.containsKey(nodeIdent)) {
+ map = mapNodesForDelete.get(nodeIdent);
+ }
+ if (map == null) {
+ map = new ConcurrentHashMap<>();
+ mapNodesForDelete.put(nodeIdent, map);
+ }
+ map.put((InstanceIdentifier<T>) key, value);
+ }
+ }
+
+ @Override
+ public void cleanForDisconnect(final InstanceIdentifier<Node> nodeIdent) {
+ mapNodesForDelete.remove(nodeIdent);
+ }
+
+ @Override
+ public void close() {
+ if (listenerRegistration != null) {
+ try {
+ listenerRegistration.close();
+ } catch (final Exception e) {
+ LOG.error("Error by stop {} DataChange StatListeningCommiter.", clazz.getSimpleName(), e);
+ }
+ listenerRegistration = null;
+ }
+
+ super.close();
+ }
+
+ /**
+ * Method return actual DataObject identified by InstanceIdentifier from Config/DS
+ * @param path
+ * @return
+ */
+ protected final <K extends DataObject> Optional<K> readLatestConfiguration(final InstanceIdentifier<K> path) {
+ if(currentReadTx == null) {
+ currentReadTx = dataBroker.newReadOnlyTransaction();
+ }
+ try {
+ return currentReadTx.read(LogicalDatastoreType.CONFIGURATION, path).checkedGet();
+ } catch (final ReadFailedException e) {
+ return Optional.absent();
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.opendaylight.controller.md.statistics.manager.StatNotifyCommiter;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatAbstratNotifiCommiter
+ * Class is abstract implementation for all no Configuration/DataStore DataObjects
+ * and represent common functionality for all DataObject Statistics Commiters.
+ * Class defines contract between DataObject and relevant Statistics NotificationListener.
+ *
+ */
+public abstract class StatAbstractNotifyCommit<N extends NotificationListener> implements StatNotifyCommiter<N> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatAbstractNotifyCommit.class);
+
+ protected final StatisticsManager manager;
+ private ListenerRegistration<NotificationListener> notifyListenerRegistration;
+
+ public StatAbstractNotifyCommit(final StatisticsManager manager,
+ final NotificationProviderService nps) {
+ Preconditions.checkArgument(nps != null, "NotificationProviderService can not be null!");
+ this.manager = Preconditions.checkNotNull(manager, "StatisticManager can not be null!");
+ notifyListenerRegistration = nps.registerNotificationListener(getStatNotificationListener());
+ }
+
+ @Override
+ public void close() {
+ if (notifyListenerRegistration != null) {
+ try {
+ notifyListenerRegistration.close();
+ }
+ catch (final Exception e) {
+ LOG.error("Error by stop {} StatNotificationListener.", this.getClass().getSimpleName());
+ }
+ notifyListenerRegistration = null;
+ }
+ }
+
+ /**
+ * Method returns Statistics Notification Listener for relevant DataObject implementation,
+ * which is declared for {@link StatNotifyCommiter} interface.
+ *
+ * @return
+ */
+ protected abstract N getStatNotificationListener();
+
+ /**
+ * PreConfigurationCheck - Node identified by input InstanceIdentifier<Node>
+ * has to be registered in {@link org.opendaylight.controller.md.statistics.manager.StatPermCollector}
+ *
+ * @param InstanceIdentifier<Node> nodeIdent
+ */
+ protected boolean preConfigurationCheck(final InstanceIdentifier<Node> nodeIdent) {
+ Preconditions.checkNotNull(nodeIdent, "FlowCapableNode ident can not be null!");
+ return manager.isProvidedFlowNodeActive(nodeIdent);
+ }
+
+ protected void notifyToCollectNextStatistics(final InstanceIdentifier<Node> nodeIdent, final TransactionId xid) {
+ Preconditions.checkNotNull(nodeIdent, "FlowCapableNode ident can not be null!");
+ manager.collectNextStatistics(nodeIdent, xid);
+ }
+
+ /**
+ * Wrapping Future object call for {@link org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager}
+ * getTransactionCacheContainer with 10sec TimeOut.
+ * Method has returned {@link Optional} which could contains a {@link TransactionCacheContainer}
+ *
+ * @param TransactionId transId
+ * @param NodeId nodeId
+ * @return
+ */
+ protected Optional<TransactionCacheContainer<?>> getTransactionCacheContainer(final TransactionId transId, final NodeId nodeId) {
+ Optional<TransactionCacheContainer<?>> txContainer;
+ try {
+ txContainer = manager.getRpcMsgManager().getTransactionCacheContainer(transId, nodeId).get(10, TimeUnit.SECONDS);
+ }
+ catch (InterruptedException | ExecutionException | TimeoutException e) {
+ LOG.warn("Get TransactionCacheContainer fail!", e);
+ txContainer = Optional.absent();
+ }
+ return txContainer;
+ }
+
+ /**
+ * Method validate TransactionCacheContainer. It needs to call before every txCacheContainer processing.
+ *
+ * @param txCacheContainer
+ * @return
+ */
+ protected boolean isTransactionCacheContainerValid(final Optional<TransactionCacheContainer<?>> txCacheContainer) {
+ if ( ! txCacheContainer.isPresent()) {
+ LOG.debug("Transaction Cache Container is not presented!");
+ return false;
+ }
+ if (txCacheContainer.get().getNodeId() == null) {
+ LOG.debug("Transaction Cache Container {} don't have Node ID!", txCacheContainer.get().getId());
+ return false;
+ }
+ if (txCacheContainer.get().getNotifications() == null) {
+ LOG.debug("Transaction Cache Container {} for {} node don't have Notifications!",
+ txCacheContainer.get().getId(), txCacheContainer.get().getNodeId());
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Wrapping Future object call to {@link org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager}
+ * isExpectedStatistics with 10sec TimeOut.
+ * Method has checked registration for provided {@link TransactionId} and {@link NodeId}
+ *
+ * @param TransactionId transId - Transaction identification
+ * @param NodeId nodeId - Node identification
+ * @return boolean
+ */
+ protected boolean isExpectedStatistics(final TransactionId transId, final NodeId nodeId) {
+ Boolean isExpectedStat = Boolean.FALSE;
+ try {
+ isExpectedStat = manager.getRpcMsgManager().isExpectedStatistics(transId, nodeId).get(10, TimeUnit.SECONDS);
+ }
+ catch (InterruptedException | ExecutionException | TimeoutException e) {
+ LOG.warn("Check Transaction registraion {} fail!", transId, e);
+ return false;
+ }
+ return isExpectedStat.booleanValue();
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
+import org.opendaylight.controller.md.statistics.manager.impl.helper.FlowComparator;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowHashIdMapping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowHashIdMappingBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.nodes.node.table.FlowHashIdMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.nodes.node.table.FlowHashIdMapBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.nodes.node.table.FlowHashIdMapKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsDataBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsDataBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowsStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.aggregate.flow.statistics.AggregateFlowStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.statistics.FlowStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.BiMap;
+import com.google.common.collect.HashBiMap;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatListenCommitFlow
+ * Class is a NotifyListener for FlowStatistics and DataChangeListener for Config/DataStore for Flow node.
+ * All expected (registered) FlowStatistics will be builded and commit to Operational/DataStore.
+ * DataChangeEven should call create/delete Flow in Operational/DS create process needs to pair
+ * Device Flow HashCode and FlowId from Config/DS
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatListenCommitFlow extends StatAbstractListenCommit<Flow, OpendaylightFlowStatisticsListener>
+ implements OpendaylightFlowStatisticsListener {
+
+ protected static final Logger LOG = LoggerFactory.getLogger(StatListenCommitFlow.class);
+
+ private static final String ALIEN_SYSTEM_FLOW_ID = "#UF$TABLE*";
+
+ private static final Integer REMOVE_AFTER_MISSING_COLLECTION = 1;
+
+ private final AtomicInteger unaccountedFlowsCounter = new AtomicInteger(0);
+
+ public StatListenCommitFlow (final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService nps){
+ super(manager, db, nps, Flow.class);
+ }
+
+ @Override
+ protected OpendaylightFlowStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ protected InstanceIdentifier<Flow> getWildCardedRegistrationPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class)
+ .augmentation(FlowCapableNode.class).child(Table.class).child(Flow.class);
+ }
+
+ @Override
+ public void onAggregateFlowStatisticsUpdate(final AggregateFlowStatisticsUpdate notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - AggregateFlowStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+ /* check flow Capable Node and write statistics */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (( ! txContainer.isPresent()) || txContainer.get().getNotifications() == null) {
+ return;
+ }
+ final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
+ if (( ! inputObj.isPresent()) || ( ! (inputObj.get() instanceof Table))) {
+ return;
+ }
+ final Table table = (Table) inputObj.get();
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if (notif instanceof AggregateFlowStatisticsUpdate) {
+ final AggregateFlowStatisticsData stats = new AggregateFlowStatisticsDataBuilder()
+ .setAggregateFlowStatistics(new AggregateFlowStatisticsBuilder(notification).build()).build();
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId)).augmentation(FlowCapableNode.class);
+ final InstanceIdentifier<Table> tableRef = fNodeIdent.child(Table.class, table.getKey());
+ final InstanceIdentifier<AggregateFlowStatisticsData> tableStatRef = tableRef
+ .augmentation(AggregateFlowStatisticsData.class);
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ } catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ return;
+ }
+ if (fNode.isPresent()) {
+ ensureTable(tx, table.getId(), tableRef);
+ tx.put(LogicalDatastoreType.OPERATIONAL, tableStatRef, stats);
+ }
+ }
+ }
+ }
+ });
+ }
+
+ public void ensureTable(final ReadWriteTransaction tx, final Short tableId, final InstanceIdentifier<Table> tableRef) {
+ final Table tableNew = new TableBuilder().setId(tableId).build();
+ tx.merge(LogicalDatastoreType.OPERATIONAL, tableRef, tableNew);
+ }
+
+ @Override
+ public void onFlowsStatisticsUpdate(final FlowsStatisticsUpdate notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - FlowsStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ LOG.trace("Next notification for join txId {}", transId);
+ return;
+ }
+ /* add flow's statistics */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (( ! txContainer.isPresent()) || txContainer.get().getNotifications() == null) {
+ return;
+ }
+ final List<FlowAndStatisticsMapList> flowStats = new ArrayList<FlowAndStatisticsMapList>(10);
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId));
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if (notif instanceof FlowsStatisticsUpdate) {
+ final List<FlowAndStatisticsMapList> notifList =
+ ((FlowsStatisticsUpdate) notif).getFlowAndStatisticsMapList();
+ if (notifList != null) {
+ flowStats.addAll(notifList);
+ }
+ }
+ }
+
+ statsFlowCommitAll(flowStats, nodeIdent, tx);
+ /* cleaning all not cached hash collisions */
+ final Map<InstanceIdentifier<Flow>, Integer> listAliens = mapNodesForDelete.get(nodeIdent);
+ if (listAliens != null) {
+ for (final Entry<InstanceIdentifier<Flow>, Integer> nodeForDelete : listAliens.entrySet()) {
+ final Integer lifeIndex = nodeForDelete.getValue();
+ if (nodeForDelete.getValue() > 0) {
+ nodeForDelete.setValue(Integer.valueOf(lifeIndex.intValue() - 1));
+ } else {
+ final InstanceIdentifier<Flow> flowNodeIdent = nodeForDelete.getKey();
+ mapNodesForDelete.get(nodeIdent).remove(flowNodeIdent);
+ tx.delete(LogicalDatastoreType.OPERATIONAL, flowNodeIdent);
+ }
+ }
+ }
+ /* Notification for continue collecting statistics */
+ notifyToCollectNextStatistics(nodeIdent, transId);
+ }
+
+ });
+ }
+
+ private void statsFlowCommitAll(final List<FlowAndStatisticsMapList> list,
+ final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction tx) {
+
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+
+ final Optional<FlowCapableNode> fNode;
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read FlowCapableNode {} in Operational/DS fail! Statistic scan not be updated.", nodeIdent, e);
+ return;
+ }
+ if ( ! fNode.isPresent()) {
+ LOG.trace("FlowCapableNode {} is not presented in Operational/DS. Statisticscan not be updated.", nodeIdent);
+ return;
+ }
+
+ final NodeUpdateState nodeState = new NodeUpdateState(fNodeIdent,fNode.get());
+
+ for (final FlowAndStatisticsMapList flowStat : list) {
+ final TableKey tableKey = new TableKey(flowStat.getTableId());
+ final TableFlowUpdateState tableState = nodeState.getTable(tableKey, tx);
+ tableState.reportFlow(flowStat,tx);
+ }
+
+ for (final TableFlowUpdateState table : nodeState.getTables()) {
+ table.removeUnreportedFlows(tx);
+ }
+ }
+
+ /**
+ * Method adds statistics to Flow
+ *
+ * @param flowBuilder
+ * @param deviceFlow
+ */
+ private void addStatistics(final FlowBuilder flowBuilder, final FlowAndStatisticsMapList deviceFlow) {
+ final FlowAndStatisticsMapListBuilder stats = new FlowAndStatisticsMapListBuilder(deviceFlow);
+ final FlowStatisticsBuilder flowStatisticsBuilder = new FlowStatisticsBuilder(stats.build());
+ final FlowStatisticsDataBuilder flowStatisticsData =new FlowStatisticsDataBuilder();
+ flowStatisticsData.setFlowStatistics(flowStatisticsBuilder.build());
+ flowBuilder.addAugmentation(FlowStatisticsData.class, flowStatisticsData.build());
+ }
+
+ /**
+ * build pseudoUnique hashCode for flow in table
+ * for future easy identification
+ *
+ * FIXME: we expect same version for YANG models for all clusters and that has to be fix
+ * FIXME: CREATE BETTER KEY - for flow (MATCH is the problem)
+ */
+ static String buildFlowIdOperKey(final FlowAndStatisticsMapList deviceFlow) {
+ return new StringBuffer().append(deviceFlow.getMatch())
+ .append(deviceFlow.getPriority()).append(deviceFlow.getCookie().getValue()).toString();
+ }
+
+ private class NodeUpdateState {
+ private final InstanceIdentifier<FlowCapableNode> nodeIdentifier;
+ private final Map<TableKey,TableFlowUpdateState> tables = new HashMap<>();
+
+ public NodeUpdateState(final InstanceIdentifier<FlowCapableNode> fNodeIdent, final FlowCapableNode flowCapableNode) {
+ nodeIdentifier = fNodeIdent;
+ final List<Table> tableList = flowCapableNode.getTable();
+ if(tableList != null) {
+ for (final Table table : tableList) {
+ final TableKey tableKey = table.getKey();
+ tables.put(tableKey, new TableFlowUpdateState(nodeIdentifier.child(Table.class,tableKey),table));
+ }
+ }
+ }
+
+ public Iterable<TableFlowUpdateState> getTables() {
+ return tables.values();
+ }
+
+ TableFlowUpdateState getTable(final TableKey key,final ReadWriteTransaction tx) {
+ TableFlowUpdateState table = tables.get(key);
+ if(table == null) {
+ table = new TableFlowUpdateState(nodeIdentifier.child(Table.class, key), null);
+ tables.put(key, table);
+ }
+ return table;
+ }
+ }
+
+ private class TableFlowUpdateState {
+
+ private boolean tableEnsured = false;
+ final KeyedInstanceIdentifier<Table, TableKey> tableRef;
+ final TableKey tableKey;
+ final BiMap<FlowHashIdMapKey, FlowId> flowIdByHash;
+ List<Flow> configFlows;
+
+ public TableFlowUpdateState(final KeyedInstanceIdentifier<Table, TableKey> tablePath, final Table table) {
+ tableRef = tablePath;
+ tableKey = tablePath.getKey();
+ flowIdByHash = HashBiMap.create();
+ if(table != null) {
+ final FlowHashIdMapping flowHashMapping = table.getAugmentation(FlowHashIdMapping.class);
+ if (flowHashMapping != null) {
+ final List<FlowHashIdMap> flowHashMap = flowHashMapping.getFlowHashIdMap() != null
+ ? flowHashMapping.getFlowHashIdMap() : Collections.<FlowHashIdMap> emptyList();
+ for (final FlowHashIdMap flowHashId : flowHashMap) {
+ try {
+ flowIdByHash.put(flowHashId.getKey(), flowHashId.getFlowId());
+ } catch (final Exception e) {
+ LOG.warn("flow hashing hit a duplicate for {} -> {}", flowHashId.getKey(), flowHashId.getFlowId());
+ }
+ }
+ }
+ }
+ }
+
+ private void ensureTableFowHashIdMapping(final ReadWriteTransaction tx) {
+ if( ! tableEnsured) {
+ ensureTable(tx, tableKey.getId(), tableRef);
+ final FlowHashIdMapping emptyMapping = new FlowHashIdMappingBuilder()
+ .setFlowHashIdMap(Collections.<FlowHashIdMap> emptyList()).build();
+ tx.merge(LogicalDatastoreType.OPERATIONAL, tableRef.augmentation(FlowHashIdMapping.class), emptyMapping);
+ tableEnsured = true;
+ }
+ }
+
+ private FlowKey searchInConfiguration(final FlowAndStatisticsMapList flowStat, final ReadWriteTransaction trans) {
+ initConfigFlows(trans);
+ final Iterator<Flow> it = configFlows.iterator();
+ while(it.hasNext()) {
+ final Flow cfgFlow = it.next();
+ final FlowKey cfgKey = cfgFlow.getKey();
+ if(flowIdByHash.inverse().containsKey(cfgKey)) {
+ it.remove();
+ } else if(FlowComparator.flowEquals(flowStat, cfgFlow)) {
+ it.remove();
+ return cfgKey;
+ }
+ }
+ return null;
+ }
+
+ private void initConfigFlows(final ReadWriteTransaction trans) {
+ final Optional<Table> table = readLatestConfiguration(tableRef);
+ List<Flow> localList = null;
+ if(table.isPresent()) {
+ localList = table.get().getFlow();
+ }
+ if(localList == null) {
+ configFlows = Collections.emptyList();
+ } else {
+ configFlows = new LinkedList<>(localList);
+ }
+ }
+
+ private FlowKey getFlowKeyAndRemoveHash(final FlowHashIdMapKey key) {
+ final FlowId ret = flowIdByHash.get(key);
+ if(ret != null) {
+ flowIdByHash.remove(key);
+ return new FlowKey(ret);
+ }
+ return null;
+ }
+
+ /* Returns FlowKey which doesn't exist in any DataStore for now */
+ private FlowKey makeAlienFlowKey() {
+ final StringBuilder sBuilder = new StringBuilder(ALIEN_SYSTEM_FLOW_ID)
+ .append(tableKey.getId()).append("-").append(unaccountedFlowsCounter.incrementAndGet());
+ final FlowId flowId = new FlowId(sBuilder.toString());
+ return new FlowKey(flowId);
+ }
+
+ private Map<FlowHashIdMapKey, FlowId> getRemovalList() {
+ return flowIdByHash;
+ }
+
+ void reportFlow(final FlowAndStatisticsMapList flowStat, final ReadWriteTransaction trans) {
+ ensureTableFowHashIdMapping(trans);
+ final FlowHashIdMapKey hashingKey = new FlowHashIdMapKey(buildFlowIdOperKey(flowStat));
+ FlowKey flowKey = getFlowKeyAndRemoveHash(hashingKey);
+ if (flowKey == null) {
+ flowKey = searchInConfiguration(flowStat, trans);
+ if ( flowKey == null) {
+ flowKey = makeAlienFlowKey();
+ }
+ updateHashCache(trans,flowKey,hashingKey);
+ }
+ final FlowBuilder flowBuilder = new FlowBuilder(flowStat);
+ flowBuilder.setKey(flowKey);
+ addStatistics(flowBuilder, flowStat);
+ final InstanceIdentifier<Flow> flowIdent = tableRef.child(Flow.class, flowKey);
+ trans.put(LogicalDatastoreType.OPERATIONAL, flowIdent, flowBuilder.build());
+ /* check life for Alien flows */
+ if (flowKey.getId().getValue().startsWith(ALIEN_SYSTEM_FLOW_ID)) {
+ removeData(flowIdent, REMOVE_AFTER_MISSING_COLLECTION);
+ }
+ }
+
+ /* Build and deploy new FlowHashId map */
+ private void updateHashCache(final ReadWriteTransaction trans, final FlowKey flowKey, final FlowHashIdMapKey hashingKey) {
+ final FlowHashIdMapBuilder flHashIdMap = new FlowHashIdMapBuilder();
+ flHashIdMap.setFlowId(flowKey.getId());
+ flHashIdMap.setKey(hashingKey);
+ final KeyedInstanceIdentifier<FlowHashIdMap, FlowHashIdMapKey> flHashIdent = tableRef
+ .augmentation(FlowHashIdMapping.class).child(FlowHashIdMap.class, hashingKey);
+ /* Add new FlowHashIdMap */
+ trans.put(LogicalDatastoreType.OPERATIONAL, flHashIdent, flHashIdMap.build());
+ }
+
+ void removeUnreportedFlows(final ReadWriteTransaction tx) {
+ final InstanceIdentifier<Node> nodeIdent = tableRef.firstIdentifierOf(Node.class);
+ final List<InstanceIdentifier<Flow>> listMissingConfigFlows = notStatReportedConfigFlows();
+ final Map<InstanceIdentifier<Flow>, Integer> nodeDeleteMap = mapNodesForDelete.get(nodeIdent);
+ final Map<FlowHashIdMapKey, FlowId> listForRemove = getRemovalList();
+ for (final Entry<FlowHashIdMapKey, FlowId> entryForRemove : listForRemove.entrySet()) {
+ final FlowKey flowKey = new FlowKey(entryForRemove.getValue());
+ final InstanceIdentifier<Flow> flowRef = tableRef.child(Flow.class, flowKey);
+ if (nodeDeleteMap != null && flowKey.getId().getValue().startsWith(ALIEN_SYSTEM_FLOW_ID)) {
+ final Integer lifeIndex = nodeDeleteMap.get(flowRef);
+ if (lifeIndex > 0) {
+ break;
+ } else {
+ nodeDeleteMap.remove(flowRef);
+ }
+ } else {
+ if (listMissingConfigFlows.remove(flowRef)) {
+ break; // we probably lost some multipart msg
+ }
+ }
+ final InstanceIdentifier<FlowHashIdMap> flHashIdent =
+ tableRef.augmentation(FlowHashIdMapping.class).child(FlowHashIdMap.class, entryForRemove.getKey());
+ tx.delete(LogicalDatastoreType.OPERATIONAL, flowRef);
+ tx.delete(LogicalDatastoreType.OPERATIONAL, flHashIdent);
+ }
+ }
+
+ List<InstanceIdentifier<Flow>> notStatReportedConfigFlows() {
+ if (configFlows != null) {
+ final List<InstanceIdentifier<Flow>> returnList = new ArrayList<>(configFlows.size());
+ for (final Flow confFlow : configFlows) {
+ final InstanceIdentifier<Flow> confFlowIdent = tableRef.child(Flow.class, confFlow.getKey());
+ returnList.add(confFlowIdent);
+ }
+ return returnList;
+ }
+ return Collections.emptyList();
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupDescStatsUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupFeaturesUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupStatisticsUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.desc.GroupDescBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.features.GroupFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.features.GroupFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.statistics.GroupStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.statistics.GroupStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatListenCommitGroup
+ * Class is a NotifyListener for GroupStatistics and DataChangeListener for Config/DataStore for Group node.
+ * All expected (registered) GroupStatistics will be builded and commit to Operational/DataStore.
+ * DataChangeEven should call create/delete Group in Operational/DS
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatListenCommitGroup extends StatAbstractListenCommit<Group, OpendaylightGroupStatisticsListener>
+ implements OpendaylightGroupStatisticsListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatListenCommitMeter.class);
+
+ public StatListenCommitGroup(final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService nps) {
+ super(manager, db, nps, Group.class);
+ }
+
+ @Override
+ protected OpendaylightGroupStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ protected InstanceIdentifier<Group> getWildCardedRegistrationPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class)
+ .augmentation(FlowCapableNode.class).child(Group.class);
+ }
+
+ @Override
+ public void onGroupDescStatsUpdated(final GroupDescStatsUpdated notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("Unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Validate exist FlowCapableNode */
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL,fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if ( ! fNode.isPresent()) {
+ return;
+ }
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ /* Prepare List actual Groups and not updated Groups will be removed */
+ final List<Group> existGroups = fNode.get().getGroup() != null
+ ? fNode.get().getGroup() : Collections.<Group> emptyList();
+ final List<GroupKey> existGroupKeys = new ArrayList<>();
+ for (final Group group : existGroups) {
+ existGroupKeys.add(group.getKey());
+ }
+ /* GroupDesc processing */
+ statGroupDescCommit(txContainer, tx, fNodeIdent, existGroupKeys);
+ /* Delete all not presented Group Nodes */
+ deleteAllNotPresentNode(fNodeIdent, tx, Collections.unmodifiableList(existGroupKeys));
+ /* Notification for continue collecting statistics */
+ notifyToCollectNextStatistics(nodeIdent, transId);
+ }
+ });
+ }
+
+ @Override
+ public void onGroupFeaturesUpdated(final GroupFeaturesUpdated notification) {
+ Preconditions.checkNotNull(notification);
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("Unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof GroupFeaturesUpdated)) {
+ break;
+ }
+ final GroupFeatures stats = new GroupFeaturesBuilder((GroupFeaturesUpdated)notif).build();
+ final InstanceIdentifier<NodeGroupFeatures> nodeGroupFeatureIdent =
+ nodeIdent.augmentation(NodeGroupFeatures.class);
+ final InstanceIdentifier<GroupFeatures> groupFeatureIdent = nodeGroupFeatureIdent
+ .child(GroupFeatures.class);
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if (node.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeGroupFeatureIdent, new NodeGroupFeaturesBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, groupFeatureIdent, stats);
+ manager.registerAdditionalNodeFeature(nodeIdent, StatCapabTypes.GROUP_STATS);
+ }
+ }
+ }
+ });
+ }
+
+ @Override
+ public void onGroupStatisticsUpdated(final GroupStatisticsUpdated notification) {
+ Preconditions.checkNotNull(notification);
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - GroupStatisticsUpdated: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Node exist check */
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if ( ! node.isPresent()) {
+ return;
+ }
+
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+
+ Optional<Group> notifGroup = Optional.absent();
+ final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
+ if (inputObj.isPresent() && inputObj.get() instanceof Group) {
+ notifGroup = Optional.<Group> of((Group)inputObj.get());
+ }
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof GroupStatisticsUpdated)) {
+ break;
+ }
+ statGroupCommit(((GroupStatisticsUpdated) notif).getGroupStats(), nodeIdent, tx);
+ }
+ if ( ! notifGroup.isPresent()) {
+ notifyToCollectNextStatistics(nodeIdent, transId);
+ }
+ }
+ });
+ }
+
+ private void statGroupCommit(final List<GroupStats> groupStats, final InstanceIdentifier<Node> nodeIdent,
+ final ReadWriteTransaction tx) {
+
+ Preconditions.checkNotNull(groupStats);
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+
+ for (final GroupStats gStat : groupStats) {
+ final GroupStatistics stats = new GroupStatisticsBuilder(gStat).build();
+
+ final InstanceIdentifier<Group> groupIdent = fNodeIdent.child(Group.class, new GroupKey(gStat.getGroupId()));
+ final InstanceIdentifier<NodeGroupStatistics> nGroupStatIdent =groupIdent
+ .augmentation(NodeGroupStatistics.class);
+ final InstanceIdentifier<GroupStatistics> gsIdent = nGroupStatIdent.child(GroupStatistics.class);
+ /* Statistics Writing */
+ Optional<Group> group = Optional.absent();
+ try {
+ group = tx.read(LogicalDatastoreType.OPERATIONAL, groupIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Group node fail! {}", groupIdent, e);
+ }
+ if (group.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nGroupStatIdent, new NodeGroupStatisticsBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, gsIdent, stats);
+ }
+ }
+ }
+
+ private void statGroupDescCommit(final Optional<TransactionCacheContainer<?>> txContainer, final ReadWriteTransaction tx,
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent, final List<GroupKey> existGroupKeys) {
+
+ Preconditions.checkNotNull(existGroupKeys);
+ Preconditions.checkNotNull(txContainer);
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof GroupDescStatsUpdated)) {
+ break;
+ }
+ final List<GroupDescStats> groupStats = ((GroupDescStatsUpdated) notif).getGroupDescStats();
+ if (groupStats == null) {
+ break;
+ }
+ for (final GroupDescStats group : groupStats) {
+ if (group.getGroupId() != null) {
+ final GroupBuilder groupBuilder = new GroupBuilder(group);
+ final GroupKey groupKey = new GroupKey(group.getGroupId());
+ final InstanceIdentifier<Group> groupRef = fNodeIdent.child(Group.class,groupKey);
+
+ final NodeGroupDescStatsBuilder groupDesc= new NodeGroupDescStatsBuilder();
+ groupDesc.setGroupDesc(new GroupDescBuilder(group).build());
+ //Update augmented data
+ groupBuilder.addAugmentation(NodeGroupDescStats.class, groupDesc.build());
+ existGroupKeys.remove(groupKey);
+ tx.put(LogicalDatastoreType.OPERATIONAL, groupRef, groupBuilder.build());
+ }
+ }
+ }
+ }
+
+ private void deleteAllNotPresentNode(final InstanceIdentifier<FlowCapableNode> fNodeIdent,
+ final ReadWriteTransaction trans, final List<GroupKey> deviceGroupKeys) {
+
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(trans);
+
+ if (deviceGroupKeys == null) {
+ return;
+ }
+
+ for (final GroupKey key : deviceGroupKeys) {
+ final InstanceIdentifier<Group> delGroupIdent = fNodeIdent.child(Group.class, key);
+ LOG.trace("Group {} has to removed.", key);
+ Optional<Group> delGroup = Optional.absent();
+ try {
+ delGroup = trans.read(LogicalDatastoreType.OPERATIONAL, delGroupIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ // NOOP - probably another transaction delete that node
+ }
+ if (delGroup.isPresent()) {
+ trans.delete(LogicalDatastoreType.OPERATIONAL, delGroupIdent);
+ }
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterConfigStatsUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterFeaturesUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterStatisticsUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.meter.MeterConfigStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.meter.MeterStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.meter.MeterStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.config.stats.reply.MeterConfigStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStats;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatListenCommitMeter
+ * Class is a NotifyListener for MeterStatistics and DataChangeListener for Config/DataStore for Meter node.
+ * All expected (registered) MeterStatistics will be builded and commit to Operational/DataStore.
+ * DataChangeEven should call create/delete Meter in Operational/DS
+ *
+ */
+public class StatListenCommitMeter extends StatAbstractListenCommit<Meter, OpendaylightMeterStatisticsListener>
+ implements OpendaylightMeterStatisticsListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatListenCommitMeter.class);
+
+ public StatListenCommitMeter(final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService nps) {
+ super(manager, db, nps, Meter.class);
+ }
+
+ @Override
+ protected InstanceIdentifier<Meter> getWildCardedRegistrationPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class)
+ .augmentation(FlowCapableNode.class).child(Meter.class);
+ }
+
+ @Override
+ protected OpendaylightMeterStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ public void onMeterConfigStatsUpdated(final MeterConfigStatsUpdated notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - MeterConfigStatsUpdated: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Validate exist FlowCapableNode */
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL,fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if ( ! fNode.isPresent()) {
+ return;
+ }
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ /* Prepare List actual Meters and not updated Meters will be removed */
+ final List<Meter> existMeters = fNode.get().getMeter() != null
+ ? fNode.get().getMeter() : Collections.<Meter> emptyList();
+ final List<MeterKey> existMeterKeys = new ArrayList<>();
+ for (final Meter meter : existMeters) {
+ existMeterKeys.add(meter.getKey());
+ }
+ /* MeterConfig processing */
+ comitConfMeterStats(txContainer, tx, fNodeIdent, existMeterKeys);
+ /* Delete all not presented Meter Nodes */
+ deleteAllNotPresentedNodes(fNodeIdent, tx, Collections.unmodifiableList(existMeterKeys));
+ /* Notification for continue collecting statistics */
+ notifyToCollectNextStatistics(nodeIdent, transId);
+ }
+ });
+ }
+
+ @Override
+ public void onMeterFeaturesUpdated(final MeterFeaturesUpdated notification) {
+ Preconditions.checkNotNull(notification);
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - MeterFeaturesUpdated: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof MeterFeaturesUpdated)) {
+ break;
+ }
+ final MeterFeatures stats = new MeterFeaturesBuilder((MeterFeaturesUpdated)notif).build();
+ final InstanceIdentifier<NodeMeterFeatures> nodeMeterFeatureIdent =
+ nodeIdent.augmentation(NodeMeterFeatures.class);
+ final InstanceIdentifier<MeterFeatures> meterFeatureIdent = nodeMeterFeatureIdent
+ .child(MeterFeatures.class);
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if (node.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeMeterFeatureIdent, new NodeMeterFeaturesBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, meterFeatureIdent, stats);
+ manager.registerAdditionalNodeFeature(nodeIdent, StatCapabTypes.METER_STATS);
+ }
+ }
+ }
+ });
+ }
+
+ @Override
+ public void onMeterStatisticsUpdated(final MeterStatisticsUpdated notification) {
+ Preconditions.checkNotNull(notification);
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - MeterStatisticsUpdated: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Node exist check */
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if ( ! node.isPresent()) {
+ return;
+ }
+
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+
+ Optional<Meter> notifMeter = Optional.absent();
+ final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
+ if (inputObj.isPresent() && inputObj.get() instanceof Meter) {
+ notifMeter = Optional.<Meter> of((Meter)inputObj.get());
+ }
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof MeterStatisticsUpdated)) {
+ break;
+ }
+ statMeterCommit(((MeterStatisticsUpdated) notif).getMeterStats(), nodeIdent, tx);
+ }
+ if ( ! notifMeter.isPresent()) {
+ notifyToCollectNextStatistics(nodeIdent, transId);
+ }
+ }
+ });
+ }
+
+ private void statMeterCommit(final List<MeterStats> meterStats,
+ final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction tx) {
+
+ Preconditions.checkNotNull(meterStats);
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+
+ for (final MeterStats mStat : meterStats) {
+ final MeterStatistics stats = new MeterStatisticsBuilder(mStat).build();
+
+ final InstanceIdentifier<Meter> meterIdent = fNodeIdent.child(Meter.class, new MeterKey(mStat.getMeterId()));
+ final InstanceIdentifier<NodeMeterStatistics> nodeMeterStatIdent = meterIdent
+ .augmentation(NodeMeterStatistics.class);
+ final InstanceIdentifier<MeterStatistics> msIdent = nodeMeterStatIdent.child(MeterStatistics.class);
+ /* Meter Statistics commit */
+ Optional<Meter> meter = Optional.absent();
+ try {
+ meter = tx.read(LogicalDatastoreType.OPERATIONAL, meterIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if (meter.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeMeterStatIdent, new NodeMeterStatisticsBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, msIdent, stats);
+ }
+ }
+ }
+
+ private void comitConfMeterStats(final Optional<TransactionCacheContainer<?>> txContainer, final ReadWriteTransaction tx,
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent, final List<MeterKey> existMeterKeys) {
+
+ Preconditions.checkNotNull(existMeterKeys);
+ Preconditions.checkNotNull(txContainer);
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof MeterConfigStatsUpdated)) {
+ break;
+ }
+ final List<MeterConfigStats> meterStats = ((MeterConfigStatsUpdated) notif).getMeterConfigStats();
+ if (meterStats == null) {
+ break;
+ }
+ for (final MeterConfigStats meterStat : meterStats) {
+ if (meterStat.getMeterId() != null) {
+ final MeterBuilder meterBuilder = new MeterBuilder(meterStat);
+ final MeterKey meterKey = new MeterKey(meterStat.getMeterId());
+ final InstanceIdentifier<Meter> meterRef = fNodeIdent.child(Meter.class, meterKey);
+
+ final NodeMeterConfigStatsBuilder meterConfig = new NodeMeterConfigStatsBuilder();
+ meterConfig.setMeterConfigStats(new MeterConfigStatsBuilder(meterStat).build());
+ //Update augmented data
+ meterBuilder.addAugmentation(NodeMeterConfigStats.class, meterConfig.build());
+ existMeterKeys.remove(meterKey);
+ tx.put(LogicalDatastoreType.OPERATIONAL, meterRef, meterBuilder.build());
+ }
+ }
+ }
+ }
+
+ private void deleteAllNotPresentedNodes(final InstanceIdentifier<FlowCapableNode> fNodeIdent,
+ final ReadWriteTransaction tx, final List<MeterKey> deviceMeterKeys) {
+
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ if (deviceMeterKeys == null) {
+ return;
+ }
+
+ for (final MeterKey key : deviceMeterKeys) {
+ final InstanceIdentifier<Meter> delMeterIdent = fNodeIdent.child(Meter.class, key);
+ LOG.trace("Meter {} has to removed.", key);
+ Optional<Meter> delMeter = Optional.absent();
+ try {
+ delMeter = tx.read(LogicalDatastoreType.OPERATIONAL, delMeterIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ // NOOP - probably another transaction delete that node
+ }
+ if (delMeter.isPresent()) {
+ tx.delete(LogicalDatastoreType.OPERATIONAL, delMeterIdent);
+ }
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.FlowCapableNodeConnectorQueueStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.FlowCapableNodeConnectorQueueStatisticsDataBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.QueueStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.flow.capable.node.connector.queue.statistics.FlowCapableNodeConnectorQueueStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.flow.capable.node.connector.queue.statistics.FlowCapableNodeConnectorQueueStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.queue.id.and.statistics.map.QueueIdAndStatisticsMap;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatNotifyCommitQueue
+ * Class is a NotifyListner for Queues Statistics
+ * All expected (registered) queueStatistics will be builded and
+ * commit to Operational/DataStore
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatListenCommitQueue extends StatAbstractListenCommit<Queue, OpendaylightQueueStatisticsListener>
+ implements OpendaylightQueueStatisticsListener {
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatListenCommitQueue.class);
+
+ public StatListenCommitQueue(final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService nps) {
+ super(manager, db, nps, Queue.class);
+ }
+
+ @Override
+ protected OpendaylightQueueStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ protected InstanceIdentifier<Queue> getWildCardedRegistrationPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class).child(NodeConnector.class)
+ .augmentation(FlowCapableNodeConnector.class).child(Queue.class);
+ }
+
+ @Override
+ public void onQueueStatisticsUpdate(final QueueStatisticsUpdate notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - QueueStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId));
+
+ /* Validate exist Node */
+ Optional<Node> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if ( ! fNode.isPresent()) {
+ LOG.trace("Read Operational/DS for Node fail! Node {} doesn't exist.", nodeIdent);
+ return;
+ }
+
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ /* Prepare List actual Queues and not updated Queues will be removed */
+ final List<NodeConnector> existConnectors = fNode.get().getNodeConnector() != null
+ ? fNode.get().getNodeConnector() : Collections.<NodeConnector> emptyList();
+ final Map<QueueKey, NodeConnectorKey> existQueueKeys = new HashMap<>();
+ for (final NodeConnector connect : existConnectors) {
+ final List<Queue> listQueues = connect.getAugmentation(FlowCapableNodeConnector.class).getQueue();
+ if (listQueues != null) {
+ for (final Queue queue : listQueues) {
+ existQueueKeys.put(queue.getKey(), connect.getKey());
+ }
+ }
+ }
+ /* Queue processing */
+ statQueueCommit(txContainer, tx, nodeIdent, existQueueKeys);
+ /* Delete all not presented Group Nodes */
+ deleteAllNotPresentedNodes(nodeIdent, tx, Collections.unmodifiableMap(existQueueKeys));
+ /* Notification for continue collecting statistics */
+ notifyToCollectNextStatistics(nodeIdent, transId);
+ }
+ });
+ }
+
+ private void statQueueCommit(
+ final Optional<TransactionCacheContainer<?>> txContainer, final ReadWriteTransaction tx,
+ final InstanceIdentifier<Node> nodeIdent, final Map<QueueKey, NodeConnectorKey> existQueueKeys) {
+
+ Preconditions.checkNotNull(existQueueKeys);
+ Preconditions.checkNotNull(txContainer);
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof QueueStatisticsUpdate)) {
+ break;
+ }
+ final List<QueueIdAndStatisticsMap> queueStats = ((QueueStatisticsUpdate) notif).getQueueIdAndStatisticsMap();
+ if (queueStats == null) {
+ break;
+ }
+ for (final QueueIdAndStatisticsMap queueStat : queueStats) {
+ if (queueStat.getQueueId() != null) {
+ final FlowCapableNodeConnectorQueueStatistics statChild =
+ new FlowCapableNodeConnectorQueueStatisticsBuilder(queueStat).build();
+ final FlowCapableNodeConnectorQueueStatisticsDataBuilder statBuild =
+ new FlowCapableNodeConnectorQueueStatisticsDataBuilder();
+ statBuild.setFlowCapableNodeConnectorQueueStatistics(statChild);
+ final QueueKey qKey = new QueueKey(queueStat.getQueueId());
+ final InstanceIdentifier<Queue> queueIdent = nodeIdent
+ .child(NodeConnector.class, new NodeConnectorKey(queueStat.getNodeConnectorId()))
+ .augmentation(FlowCapableNodeConnector.class)
+ .child(Queue.class, qKey);
+ final InstanceIdentifier<FlowCapableNodeConnectorQueueStatisticsData> queueStatIdent = queueIdent.augmentation(FlowCapableNodeConnectorQueueStatisticsData.class);
+ existQueueKeys.remove(qKey);
+ tx.merge(LogicalDatastoreType.OPERATIONAL, queueIdent, new QueueBuilder().setKey(qKey).build());
+ tx.put(LogicalDatastoreType.OPERATIONAL, queueStatIdent, statBuild.build());
+ }
+ }
+ }
+ }
+
+ private void deleteAllNotPresentedNodes(final InstanceIdentifier<Node> nodeIdent,
+ final ReadWriteTransaction tx, final Map<QueueKey, NodeConnectorKey> existQueueKeys) {
+
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ if (existQueueKeys == null) {
+ return;
+ }
+
+ for (final Entry<QueueKey, NodeConnectorKey> entry : existQueueKeys.entrySet()) {
+ final InstanceIdentifier<Queue> queueIdent = nodeIdent.child(NodeConnector.class, entry.getValue())
+ .augmentation(FlowCapableNodeConnector.class).child(Queue.class, entry.getKey());
+ LOG.trace("Queue {} has to removed.", queueIdent);
+ Optional<Queue> delQueue = Optional.absent();
+ try {
+ delQueue = tx.read(LogicalDatastoreType.OPERATIONAL, queueIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ // NOOP - probably another transaction delete that node
+ }
+ if (delQueue.isPresent()) {
+ tx.delete(LogicalDatastoreType.OPERATIONAL, queueIdent);
+ }
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.statistics.manager.StatNodeRegistration;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FeatureCapability;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityFlowStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityGroupStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityPortStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityQueueStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityTableStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.SwitchFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatNodeRegistrationImpl
+ * {@link FlowCapableNode} Registration Implementation contains two method for registration/unregistration
+ * {@link FeatureCapability} for every connect/disconnect {@link FlowCapableNode}. Process of connection/disconnection
+ * is substituted by listening Operation/DS for add/delete {@link FeatureCapability}.
+ * All statistic capabilities are reading from new Node directly without contacting device or DS.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 28, 2014
+ */
+public class StatNodeRegistrationImpl implements StatNodeRegistration, DataChangeListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatNodeRegistrationImpl.class);
+
+ private final StatisticsManager manager;
+ private ListenerRegistration<DataChangeListener> listenerRegistration;
+ private ListenerRegistration<?> notifListenerRegistration;
+
+ public StatNodeRegistrationImpl(final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService notificationService) {
+ this.manager = Preconditions.checkNotNull(manager, "StatisticManager can not be null!");
+ Preconditions.checkArgument(db != null, "DataBroker can not be null!");
+ Preconditions.checkArgument(notificationService != null, "NotificationProviderService can not be null!");
+ notifListenerRegistration = notificationService.registerNotificationListener(this);
+ /* Build Path */
+ final InstanceIdentifier<FlowCapableNode> flowNodeWildCardIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class).augmentation(FlowCapableNode.class);
+ listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ flowNodeWildCardIdentifier, StatNodeRegistrationImpl.this, DataChangeScope.BASE);
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ if (notifListenerRegistration != null) {
+ try {
+ notifListenerRegistration.close();
+ }
+ catch (final Exception e) {
+ LOG.warn("Error by stop FlowCapableNode Notification StatNodeRegistration.");
+ }
+ notifListenerRegistration = null;
+ }
+
+ if (listenerRegistration != null) {
+ try {
+ listenerRegistration.close();
+ } catch (final Exception e) {
+ LOG.warn("Error by stop FlowCapableNode DataChange StatListeningCommiter.", e);
+ }
+ listenerRegistration = null;
+ }
+ }
+
+ @Override
+ public void connectFlowCapableNode(final InstanceIdentifier<SwitchFeatures> keyIdent,
+ final SwitchFeatures data, final InstanceIdentifier<Node> nodeIdent) {
+ Preconditions.checkNotNull(keyIdent, "InstanceIdentifier can not be null!");
+ Preconditions.checkNotNull(data, "SwitchFeatures data for {} can not be null!", keyIdent);
+ Preconditions.checkArgument(( ! keyIdent.isWildcarded()), "InstanceIdentifier is WildCarded!");
+
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.NODE_UPDATE,nodeIdent.firstKeyOf(Node.class, NodeKey.class).getId()) {
+
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+
+ final List<StatCapabTypes> statCapabTypes = new ArrayList<>();
+ Short maxCapTables = Short.valueOf("1");
+
+ final List<Class<? extends FeatureCapability>> capabilities = data.getCapabilities() != null
+ ? data.getCapabilities() : Collections.<Class<? extends FeatureCapability>> emptyList();
+ for (final Class<? extends FeatureCapability> capability : capabilities) {
+ if (capability == FlowFeatureCapabilityTableStats.class) {
+ statCapabTypes.add(StatCapabTypes.TABLE_STATS);
+ } else if (capability == FlowFeatureCapabilityFlowStats.class) {
+ statCapabTypes.add(StatCapabTypes.FLOW_STATS);
+ } else if (capability == FlowFeatureCapabilityGroupStats.class) {
+ statCapabTypes.add(StatCapabTypes.GROUP_STATS);
+ } else if (capability == FlowFeatureCapabilityPortStats.class) {
+ statCapabTypes.add(StatCapabTypes.PORT_STATS);
+ } else if (capability == FlowFeatureCapabilityQueueStats.class) {
+ statCapabTypes.add(StatCapabTypes.QUEUE_STATS);
+ }
+ }
+ maxCapTables = data.getMaxTables();
+
+ final Optional<Short> maxTables = Optional.<Short> of(maxCapTables);
+ manager.connectedNodeRegistration(nodeIdent,
+ Collections.unmodifiableList(statCapabTypes), maxTables.get());
+ }
+ });
+ }
+
+ @Override
+ public void disconnectFlowCapableNode(final InstanceIdentifier<Node> nodeIdent) {
+ Preconditions.checkArgument(nodeIdent != null, "InstanceIdentifier can not be NULL!");
+ Preconditions.checkArgument(( ! nodeIdent.isWildcarded()),
+ "InstanceIdentifier {} is WildCarded!", nodeIdent);
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.NODE_REMOVAL,nodeIdent.firstKeyOf(Node.class, NodeKey.class).getId()) {
+
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ manager.disconnectedNodeUnregistration(nodeIdent);
+ }
+ });
+ }
+
+
+ @Override
+ public void onNodeConnectorRemoved(final NodeConnectorRemoved notification) {
+ // NOOP
+ }
+
+ @Override
+ public void onNodeConnectorUpdated(final NodeConnectorUpdated notification) {
+ // NOOP
+ }
+
+ @Override
+ public void onNodeRemoved(final NodeRemoved notification) {
+ Preconditions.checkNotNull(notification);
+ final NodeRef nodeRef = notification.getNodeRef();
+ final InstanceIdentifier<?> nodeRefIdent = nodeRef.getValue();
+ final InstanceIdentifier<Node> nodeIdent =
+ nodeRefIdent.firstIdentifierOf(Node.class);
+ if (nodeIdent != null) {
+ disconnectFlowCapableNode(nodeIdent);
+ }
+ }
+
+ @Override
+ public void onNodeUpdated(final NodeUpdated notification) {
+ Preconditions.checkNotNull(notification);
+ final FlowCapableNodeUpdated newFlowNode =
+ notification.getAugmentation(FlowCapableNodeUpdated.class);
+ if (newFlowNode != null && newFlowNode.getSwitchFeatures() != null) {
+ final NodeRef nodeRef = notification.getNodeRef();
+ final InstanceIdentifier<?> nodeRefIdent = nodeRef.getValue();
+ final InstanceIdentifier<Node> nodeIdent =
+ nodeRefIdent.firstIdentifierOf(Node.class);
+
+ final InstanceIdentifier<SwitchFeatures> swichFeaturesIdent =
+ nodeIdent.augmentation(FlowCapableNode.class).child(SwitchFeatures.class);
+ final SwitchFeatures switchFeatures = newFlowNode.getSwitchFeatures();
+ connectFlowCapableNode(swichFeaturesIdent, switchFeatures, nodeIdent);
+ }
+ }
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> changeEvent) {
+ Preconditions.checkNotNull(changeEvent,"Async ChangeEvent can not be null!");
+ /* All DataObjects for create */
+ final Set<InstanceIdentifier<?>> createdData = changeEvent.getCreatedData() != null
+ ? changeEvent.getCreatedData().keySet() : Collections.<InstanceIdentifier<?>> emptySet();
+
+ for (final InstanceIdentifier<?> entryKey : createdData) {
+ final InstanceIdentifier<Node> nodeIdent = entryKey
+ .firstIdentifierOf(Node.class);
+ if ( ! nodeIdent.isWildcarded()) {
+ final NodeRef nodeRef = new NodeRef(nodeIdent);
+ // FIXME: these calls is a job for handshake or for inventory manager
+ /* check Group and Meter future */
+ manager.getRpcMsgManager().getGroupFeaturesStat(nodeRef);
+ manager.getRpcMsgManager().getMeterFeaturesStat(nodeRef);
+ }
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.FlowCapableNodeConnectorStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.FlowCapableNodeConnectorStatisticsDataBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.NodeConnectorStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.flow.capable.node.connector.statistics.FlowCapableNodeConnectorStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.flow.capable.node.connector.statistics.FlowCapableNodeConnectorStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMap;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatNotifyCommitPort
+ * Class is a NotifyListener for PortStatistics
+ * All expected (registered) portStatistics will be builded and
+ * commit to Operational/DataStore
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatNotifyCommitPort extends StatAbstractNotifyCommit<OpendaylightPortStatisticsListener>
+ implements OpendaylightPortStatisticsListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatNotifyCommitPort.class);
+
+ public StatNotifyCommitPort(final StatisticsManager manager,
+ final NotificationProviderService nps) {
+ super(manager, nps);
+ }
+
+ @Override
+ protected OpendaylightPortStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ public void onNodeConnectorStatisticsUpdate(final NodeConnectorStatisticsUpdate notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - NodeConnectorStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId));
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction trans) {
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (( ! txContainer.isPresent()) || txContainer.get().getNotifications() == null) {
+ return;
+ }
+ final List<NodeConnectorStatisticsAndPortNumberMap> portStats =
+ new ArrayList<NodeConnectorStatisticsAndPortNumberMap>(10);
+ final List<? extends TransactionAware> cachedNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cachedNotifs) {
+ if (notif instanceof NodeConnectorStatisticsUpdate) {
+ final List<NodeConnectorStatisticsAndPortNumberMap> notifStat =
+ ((NodeConnectorStatisticsUpdate) notif).getNodeConnectorStatisticsAndPortNumberMap();
+ if (notifStat != null) {
+ portStats.addAll(notifStat);
+ }
+ }
+ }
+ /* write stat to trans */
+ statPortCommit(portStats, nodeIdent, trans);
+ /* Notification for continue collecting statistics - Port statistics are still same size
+ * and they are small - don't need to wait for whole apply operation*/
+ notifyToCollectNextStatistics(nodeIdent, transId);
+ }
+ });
+ }
+
+ private void statPortCommit(final List<NodeConnectorStatisticsAndPortNumberMap> portStats,
+ final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction tx) {
+
+ /* check exist FlowCapableNode and write statistics probable with parent */
+ Optional<Node> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ return;
+ }
+ if ( ! fNode.isPresent()) {
+ LOG.trace("Read Operational/DS for Node fail! Node {} doesn't exist.", nodeIdent);
+ return;
+ }
+ for (final NodeConnectorStatisticsAndPortNumberMap nConnectPort : portStats) {
+ final FlowCapableNodeConnectorStatistics stats = new FlowCapableNodeConnectorStatisticsBuilder(nConnectPort).build();
+ final NodeConnectorKey key = new NodeConnectorKey(nConnectPort.getNodeConnectorId());
+ final InstanceIdentifier<NodeConnector> nodeConnectorIdent = nodeIdent.child(NodeConnector.class, key);
+ final InstanceIdentifier<FlowCapableNodeConnectorStatisticsData> nodeConnStatIdent = nodeConnectorIdent
+ .augmentation(FlowCapableNodeConnectorStatisticsData.class);
+ final InstanceIdentifier<FlowCapableNodeConnectorStatistics> flowCapNodeConnStatIdent =
+ nodeConnStatIdent.child(FlowCapableNodeConnectorStatistics.class);
+ Optional<NodeConnector> fNodeConector;
+ try {
+ fNodeConector = tx.read(LogicalDatastoreType.OPERATIONAL, nodeConnectorIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read NodeConnector {} in Operational/DS fail!", nodeConnectorIdent, e);
+ fNodeConector = Optional.absent();
+ }
+ if (fNodeConector.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeConnectorIdent, new NodeConnectorBuilder().setId(key.getId()).build());
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeConnStatIdent, new FlowCapableNodeConnectorStatisticsDataBuilder().build());
+ tx.put(LogicalDatastoreType.OPERATIONAL, flowCapNodeConnStatIdent, stats);
+ }
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsDataBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.statistics.FlowTableStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.statistics.FlowTableStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatNotifyCommitTable
+ * Class is a NotifyListener for TableStatistics
+ * All expected (registered) tableStatistics will be builded and
+ * commit to Operational/DataStore
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatNotifyCommitTable extends StatAbstractNotifyCommit<OpendaylightFlowTableStatisticsListener>
+ implements OpendaylightFlowTableStatisticsListener {
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatNotifyCommitTable.class);
+
+ public StatNotifyCommitTable(final StatisticsManager manager,
+ final NotificationProviderService nps) {
+ super(manager, nps);
+ }
+
+ @Override
+ protected OpendaylightFlowTableStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ public void onFlowTableStatisticsUpdate(final FlowTableStatisticsUpdate notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - FlowTableStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
+ @Override
+ public void applyOperation(final ReadWriteTransaction trans) {
+ final List<FlowTableAndStatisticsMap> tableStats = new ArrayList<FlowTableAndStatisticsMap>(10);
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId));
+ if (( ! txContainer.isPresent()) || txContainer.get().getNodeId() == null) {
+ return;
+ }
+ final List<? extends TransactionAware> cachedNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cachedNotifs) {
+ if (notif instanceof FlowTableStatisticsUpdate) {
+ final List<FlowTableAndStatisticsMap> statNotif =
+ ((FlowTableStatisticsUpdate) notif).getFlowTableAndStatisticsMap();
+ if (statNotif != null) {
+ tableStats.addAll(statNotif);
+ }
+ }
+ }
+ /* write stat to trans */
+ statTableCommit(tableStats, nodeIdent, trans);
+ /* Notification for continue collecting statistics - Tables statistics are still same size
+ * and they are small - don't need to wait to whole apply operation */
+ notifyToCollectNextStatistics(nodeIdent, transId);
+ }
+ });
+ }
+
+ private void statTableCommit(final List<FlowTableAndStatisticsMap> tableStats, final InstanceIdentifier<Node> nodeIdent,
+ final ReadWriteTransaction trans) {
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ /* check flow Capable Node and write statistics */
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ return;
+ }
+ if ( ! fNode.isPresent()) {
+ LOG.trace("Read Operational/DS for FlowCapableNode fail! Node {} doesn't exist.", fNodeIdent);
+ return;
+ }
+ for (final FlowTableAndStatisticsMap tableStat : tableStats) {
+ final InstanceIdentifier<Table> tableIdent = fNodeIdent
+ .child(Table.class, new TableKey(tableStat.getTableId().getValue()));
+ final Table table = new TableBuilder().setId(tableStat.getTableId().getValue()).build();
+ trans.merge(LogicalDatastoreType.OPERATIONAL, tableIdent, table);
+ final InstanceIdentifier<FlowTableStatisticsData> tableStatIdent = tableIdent
+ .augmentation(FlowTableStatisticsData.class);
+ trans.merge(LogicalDatastoreType.OPERATIONAL, tableStatIdent, new FlowTableStatisticsDataBuilder().build());
+
+ final FlowTableStatistics stats = new FlowTableStatisticsBuilder(tableStat).build();
+ final InstanceIdentifier<FlowTableStatistics> tStatIdent = tableStatIdent.child(FlowTableStatistics.class);
+ trans.put(LogicalDatastoreType.OPERATIONAL, tStatIdent, stats);
+ }
+ }
+}
+
--- /dev/null
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatPermCollectorImpl
+ * Thread base statistic collector. Class holds internal map for all registered
+ * (means connected) nodes with List of Switch capabilities;
+ * Statistics collecting process get cross whole Network Device by device
+ * and statistic by statistic (follow Switch capabilities to prevent unnecessary
+ * ask) Next statistic start collecting by notification or by timeout.
+ *
+ * @author @author avishnoi@in.ibm.com <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatPermCollectorImpl implements StatPermCollector {
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatPermCollectorImpl.class);
+
+ private final static long STAT_COLLECT_TIME_OUT = 3000L;
+
+ private final ExecutorService statNetCollectorServ;
+ private final StatisticsManager manager;
+
+ private final int maxNodeForCollector;
+ private final long minReqNetInterval;
+ private final String name;
+
+ private final Object statCollectorLock = new Object();
+ private final Object statNodeHolderLock = new Object();
+ private final Object transNotifyLock = new Object();
+
+ private Map<InstanceIdentifier<Node>, StatNodeInfoHolder> statNodeHolder =
+ Collections.<InstanceIdentifier<Node>, StatNodeInfoHolder> emptyMap();
+
+ private volatile boolean wakeMe = false;
+ private volatile boolean finishing = false;
+ private TransactionId actualTransactionId;
+
+ public StatPermCollectorImpl(final StatisticsManager manager, final long minReqNetInterv, final int nr,
+ final int maxNodeForCollectors) {
+ this.manager = Preconditions.checkNotNull(manager, "StatisticsManager can not be null!");
+ name = "odl-stat-collector-" + nr;
+ minReqNetInterval = minReqNetInterv;
+ final ThreadFactory threadFact = new ThreadFactoryBuilder()
+ .setNameFormat(name + "-thread-%d").build();
+ statNetCollectorServ = Executors.newSingleThreadExecutor(threadFact);
+ maxNodeForCollector = maxNodeForCollectors;
+ LOG.trace("StatCollector {} start successfull!", name);
+ }
+
+ /**
+ * finish collecting statistics
+ */
+ @Override
+ public void close() {
+ statNodeHolder = Collections.<InstanceIdentifier<Node>, StatNodeInfoHolder> emptyMap();
+ finishing = true;
+ collectNextStatistics(actualTransactionId);
+ statNetCollectorServ.shutdown();
+ }
+
+ @Override
+ public boolean hasActiveNodes() {
+ return ( ! statNodeHolder.isEmpty());
+ }
+
+ @Override
+ public boolean isProvidedFlowNodeActive(
+ final InstanceIdentifier<Node> flowNode) {
+ return statNodeHolder.containsKey(flowNode);
+ }
+
+ @Override
+ public boolean connectedNodeRegistration(final InstanceIdentifier<Node> ident,
+ final List<StatCapabTypes> statTypes, final Short nrOfSwitchTables) {
+ if (isNodeIdentValidForUse(ident)) {
+ if ( ! statNodeHolder.containsKey(ident)) {
+ synchronized (statNodeHolderLock) {
+ final boolean startStatCollecting = statNodeHolder.size() == 0;
+ if ( ! statNodeHolder.containsKey(ident)) {
+ if (statNodeHolder.size() >= maxNodeForCollector) {
+ return false;
+ }
+ final Map<InstanceIdentifier<Node>, StatNodeInfoHolder> statNode =
+ new HashMap<>(statNodeHolder);
+ final NodeRef nodeRef = new NodeRef(ident);
+ final StatNodeInfoHolder nodeInfoHolder = new StatNodeInfoHolder(nodeRef,
+ statTypes, nrOfSwitchTables);
+ statNode.put(ident, nodeInfoHolder);
+ statNodeHolder = Collections.unmodifiableMap(statNode);
+ }
+ if (startStatCollecting) {
+ finishing = false;
+ statNetCollectorServ.execute(this);
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public boolean disconnectedNodeUnregistration(final InstanceIdentifier<Node> ident) {
+ if (isNodeIdentValidForUse(ident)) {
+ if (statNodeHolder.containsKey(ident)) {
+ synchronized (statNodeHolderLock) {
+ if (statNodeHolder.containsKey(ident)) {
+ final Map<InstanceIdentifier<Node>, StatNodeInfoHolder> statNode =
+ new HashMap<>(statNodeHolder);
+ statNode.remove(ident);
+ statNodeHolder = Collections.unmodifiableMap(statNode);
+ }
+ if (statNodeHolder.isEmpty()) {
+ finishing = true;
+ collectNextStatistics(actualTransactionId);
+ statNetCollectorServ.shutdown();
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public boolean registerAdditionalNodeFeature(final InstanceIdentifier<Node> ident,
+ final StatCapabTypes statCapab) {
+ if (isNodeIdentValidForUse(ident)) {
+ if ( ! statNodeHolder.containsKey(ident)) {
+ return false;
+ }
+ final StatNodeInfoHolder statNode = statNodeHolder.get(ident);
+ if ( ! statNode.getStatMarkers().contains(statCapab)) {
+ synchronized (statNodeHolderLock) {
+ if ( ! statNode.getStatMarkers().contains(statCapab)) {
+ final List<StatCapabTypes> statCapabForEdit = new ArrayList<>(statNode.getStatMarkers());
+ statCapabForEdit.add(statCapab);
+ final StatNodeInfoHolder nodeInfoHolder = new StatNodeInfoHolder(statNode.getNodeRef(),
+ Collections.unmodifiableList(statCapabForEdit), statNode.getMaxTables());
+
+ final Map<InstanceIdentifier<Node>, StatNodeInfoHolder> statNodes =
+ new HashMap<>(statNodeHolder);
+ statNodes.put(ident, nodeInfoHolder);
+ statNodeHolder = Collections.unmodifiableMap(statNodes);
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public void collectNextStatistics(final TransactionId xid) {
+ if (checkTransactionId(xid)) {
+ if (wakeMe) {
+ synchronized (statCollectorLock) {
+ if (wakeMe) {
+ LOG.trace("STAT-COLLECTOR is notified to conntinue");
+ statCollectorLock.notify();
+ }
+ }
+ }
+ }
+ }
+
+ @Override
+ public void run() {
+ try {
+ // sleep 5 second before collecting all statistics cycles is important
+ // for loading all Nodes to Operational/DS
+ Thread.sleep(5000);
+ }
+ catch (final InterruptedException e1) {
+ // NOOP
+ }
+ LOG.debug("StatCollector {} Start collecting!", name);
+ /* Neverending cyle - wait for finishing */
+ while ( ! finishing) {
+ boolean collecting = false;
+ final long startTime = System.currentTimeMillis();
+
+ if ( ! statNodeHolder.isEmpty()) {
+ collecting = true;
+ collectStatCrossNetwork();
+ collecting = false;
+ }
+
+ if ( ! collecting) {
+ final long statFinalTime = System.currentTimeMillis() - startTime;
+ LOG.debug("STAT-MANAGER {}: last all NET statistics collection cost {} ms", name, statFinalTime);
+ if (statFinalTime < minReqNetInterval) {
+ LOG.trace("statCollector is about to make a collecting sleep");
+ synchronized (statCollectorLock) {
+ wakeMe = true;
+ try {
+ final long waitTime = minReqNetInterval - statFinalTime;
+ statCollectorLock.wait(waitTime);
+ LOG.trace("STAT-MANAGER : statCollector {} is waking up from a collecting sleep for {} ms", name, waitTime);
+ } catch (final InterruptedException e) {
+ LOG.warn("statCollector has been interrupted during collecting sleep", e);
+ } finally {
+ wakeMe = false;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ private void waitingForNotification() {
+ synchronized (statCollectorLock) {
+ wakeMe = true;
+ try {
+ statCollectorLock.wait(STAT_COLLECT_TIME_OUT);
+ LOG.trace("statCollector is waking up from a wait stat Response sleep");
+ } catch (final InterruptedException e) {
+ LOG.warn("statCollector has been interrupted waiting stat Response sleep", e);
+ } finally {
+ setActualTransactionId(null);
+ wakeMe = false;
+ }
+ }
+ }
+
+
+ private void collectStatCrossNetwork() {
+ for (final Entry<InstanceIdentifier<Node>, StatNodeInfoHolder> nodeEntity : statNodeHolder.entrySet()) {
+ final List<StatCapabTypes> listNeededStat = nodeEntity.getValue().getStatMarkers();
+ final NodeRef actualNodeRef = nodeEntity.getValue().getNodeRef();
+ final Short maxTables = nodeEntity.getValue().getMaxTables();
+ for (final StatCapabTypes statMarker : listNeededStat) {
+ if ( ! isProvidedFlowNodeActive(nodeEntity.getKey())) {
+ break;
+ }
+ try {
+ switch (statMarker) {
+ case PORT_STATS:
+ LOG.trace("STAT-MANAGER-collecting PORT-STATS for NodeRef {}", actualNodeRef);
+ setActualTransactionId(manager.getRpcMsgManager().getAllPortsStat(actualNodeRef).get());
+ waitingForNotification();
+ break;
+ case QUEUE_STATS:
+ LOG.trace("STAT-MANAGER-collecting QUEUE-STATS for NodeRef {}", actualNodeRef);
+ setActualTransactionId(manager.getRpcMsgManager().getAllQueueStat(actualNodeRef).get());
+ waitingForNotification();
+ break;
+ case TABLE_STATS:
+ LOG.trace("STAT-MANAGER-collecting TABLE-STATS for NodeRef {}", actualNodeRef);
+ setActualTransactionId(manager.getRpcMsgManager().getAllTablesStat(actualNodeRef).get());
+ waitingForNotification();
+ break;
+ case GROUP_STATS:
+ LOG.trace("STAT-MANAGER-collecting GROUP-STATS for NodeRef {}", actualNodeRef);
+ setActualTransactionId(manager.getRpcMsgManager().getAllGroupsConfStats(actualNodeRef).get());
+ waitingForNotification();
+ setActualTransactionId(manager.getRpcMsgManager().getAllGroupsStat(actualNodeRef).get());
+ waitingForNotification();
+ break;
+ case METER_STATS:
+ LOG.trace("STAT-MANAGER-collecting METER-STATS for NodeRef {}", actualNodeRef);
+ setActualTransactionId(manager.getRpcMsgManager().getAllMeterConfigStat(actualNodeRef).get());
+ waitingForNotification();
+ setActualTransactionId(manager.getRpcMsgManager().getAllMetersStat(actualNodeRef).get());
+ waitingForNotification();
+ break;
+ case FLOW_STATS:
+ LOG.trace("STAT-MANAGER-collecting FLOW-STATS-ALL_FLOWS for NodeRef {}", actualNodeRef);
+ setActualTransactionId(manager.getRpcMsgManager().getAllFlowsStat(actualNodeRef).get());
+ waitingForNotification();
+ LOG.trace("STAT-MANAGER-collecting FLOW-AGGREGATE-STATS for NodeRef {}", actualNodeRef);
+ for (short i = 0; i < maxTables; i++) {
+ final TableId tableId = new TableId(i);
+ manager.getRpcMsgManager().getAggregateFlowStat(actualNodeRef, tableId);
+ }
+ break;
+ default:
+ /* Exception for programmers in implementation cycle */
+ throw new IllegalStateException("Not implemented ASK for " + statMarker);
+ }
+ } catch (InterruptedException | ExecutionException ex) {
+ LOG.warn("Unexpected RPC exception by call RPC Future!", ex);
+ continue;
+ }
+ }
+ }
+ }
+
+ private class StatNodeInfoHolder {
+ private final NodeRef nodeRef;
+ private final List<StatCapabTypes> statMarkers;
+ private final Short maxTables;
+
+ public StatNodeInfoHolder(final NodeRef nodeRef,
+ final List<StatCapabTypes> statMarkers, final Short maxTables) {
+ this.nodeRef = nodeRef;
+ this.maxTables = maxTables;
+ this.statMarkers = statMarkers;
+ }
+
+ public final NodeRef getNodeRef() {
+ return nodeRef;
+ }
+
+ public final List<StatCapabTypes> getStatMarkers() {
+ return statMarkers;
+ }
+
+ public final Short getMaxTables() {
+ return maxTables;
+ }
+ }
+
+ private boolean isNodeIdentValidForUse(final InstanceIdentifier<Node> ident) {
+ if (ident == null) {
+ LOG.warn("FlowCapableNode InstanceIdentifier {} can not be null!");
+ return false;
+ }
+ if (ident.isWildcarded()) {
+ LOG.warn("FlowCapableNode InstanceIdentifier {} can not be wildcarded!", ident);
+ return false;
+ }
+ return true;
+ }
+
+ private boolean checkTransactionId(final TransactionId xid) {
+ synchronized (transNotifyLock) {
+ return actualTransactionId != null && actualTransactionId.equals(xid);
+ }
+ }
+
+ private void setActualTransactionId(final TransactionId transactionId) {
+ synchronized (transNotifyLock) {
+ actualTransactionId = transactionId;
+ }
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import org.opendaylight.yangtools.yang.common.RpcError;
+
+import java.util.Collection;
+
+public final class StatRPCFailedException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+ private final Collection<RpcError> errors;
+
+ public StatRPCFailedException(final String message, final Collection<RpcError> errors) {
+ super(message);
+ this.errors = errors;
+ }
+
+ @Override
+ public String toString() {
+ return "RPCFailedException [errors=" + errors + ", message=" + getMessage() + ']';
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.GetFlowTablesStatisticsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetAllGroupStatisticsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupDescriptionInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupFeaturesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterConfigStatisticsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterStatisticsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterFeaturesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetAllNodeConnectorsStatisticsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetAllNodeConnectorsStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromAllPortsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.SettableFuture;
+
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatRpcMsgManagerImpl
+ * Class register and provide all RPC Statistics Device Services and implement pre-defined
+ * wrapped methods for prepare easy access to RPC Statistics Device Services like getAllStatisticsFor...
+ *
+ * In next Class implement process for joining multipart messages.
+ * Class internally use two WeakHashMap and GuavaCache for holding values for joining multipart msg.
+ * One Weak map is used for holding all Multipart Messages and second is used for possible input
+ * Config/DS light-weight DataObject (DataObject contains only necessary identification fields as
+ * TableId, GroupId, MeterId or for flow Match, Priority, FlowCookie, TableId and FlowId ...
+ *
+ * @author avishnoi@in.ibm.com <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatRpcMsgManagerImpl implements StatRpcMsgManager {
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatRpcMsgManagerImpl.class);
+
+ private final Cache<String, TransactionCacheContainer<? super TransactionAware>> txCache;
+
+ private final int queueCapacity = 5000;
+
+ private final OpendaylightGroupStatisticsService groupStatsService;
+ private final OpendaylightMeterStatisticsService meterStatsService;
+ private final OpendaylightFlowStatisticsService flowStatsService;
+ private final OpendaylightPortStatisticsService portStatsService;
+ private final OpendaylightFlowTableStatisticsService flowTableStatsService;
+ private final OpendaylightQueueStatisticsService queueStatsService;
+
+ private BlockingQueue<RpcJobsQueue> statsRpcJobQueue;
+
+ private volatile boolean finishing = false;
+
+ public StatRpcMsgManagerImpl (final StatisticsManager manager,
+ final RpcConsumerRegistry rpcRegistry, final long maxNodeForCollector) {
+ Preconditions.checkArgument(manager != null, "StatisticManager can not be null!");
+ Preconditions.checkArgument(rpcRegistry != null, "RpcConsumerRegistry can not be null !");
+ groupStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightGroupStatisticsService.class),
+ "OpendaylightGroupStatisticsService can not be null!");
+ meterStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightMeterStatisticsService.class),
+ "OpendaylightMeterStatisticsService can not be null!");
+ flowStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightFlowStatisticsService.class),
+ "OpendaylightFlowStatisticsService can not be null!");
+ portStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightPortStatisticsService.class),
+ "OpendaylightPortStatisticsService can not be null!");
+ flowTableStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightFlowTableStatisticsService.class),
+ "OpendaylightFlowTableStatisticsService can not be null!");
+ queueStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightQueueStatisticsService.class),
+ "OpendaylightQueueStatisticsService can not be null!");
+
+ statsRpcJobQueue = new LinkedBlockingQueue<>(queueCapacity);
+ /* nr. 7 is here nr. of possible statistic which are waiting for notification
+ * - check it in StatPermCollectorImpl method collectStatCrossNetwork */
+ txCache = CacheBuilder.newBuilder().expireAfterWrite((maxNodeForCollector * 7), TimeUnit.SECONDS)
+ .maximumSize(10000).build();
+ }
+
+ @Override
+ public void close() {
+ finishing = true;
+ statsRpcJobQueue = null;
+ }
+
+ @Override
+ public void run() {
+ /* Neverending cyle - wait for finishing */
+ while ( ! finishing) {
+ try {
+ statsRpcJobQueue.take().call();
+ }
+ catch (final Exception e) {
+ LOG.warn("Stat Element RPC executor fail!", e);
+ }
+ }
+ // Drain all rpcCall, making sure any blocked threads are unblocked
+ while ( ! statsRpcJobQueue.isEmpty()) {
+ statsRpcJobQueue.poll();
+ }
+ }
+
+ private void addGetAllStatJob(final RpcJobsQueue getAllStatJob) {
+ final boolean success = statsRpcJobQueue.offer(getAllStatJob);
+ if ( ! success) {
+ LOG.warn("Put RPC request getAllStat fail! Queue is full.");
+ }
+ }
+
+ private void addStatJob(final RpcJobsQueue getStatJob) {
+ final boolean success = statsRpcJobQueue.offer(getStatJob);
+ if ( ! success) {
+ LOG.debug("Put RPC request for getStat fail! Queue is full.");
+ }
+ }
+
+ @Override
+ public <T extends TransactionAware, D extends DataObject> void registrationRpcFutureCallBack(
+ final Future<RpcResult<T>> future, final D inputObj, final NodeRef nodeRef,
+ final SettableFuture<TransactionId> resultTransId) {
+
+ Futures.addCallback(JdkFutureAdapters.listenInPoolThread(future),
+ new FutureCallback<RpcResult<? extends TransactionAware>>() {
+
+ @Override
+ public void onSuccess(final RpcResult<? extends TransactionAware> result) {
+ final TransactionId id = result.getResult().getTransactionId();
+ final NodeKey nodeKey = nodeRef.getValue().firstKeyOf(Node.class, NodeKey.class);
+ if (id == null) {
+ String[] multipartRequestName = result.getResult().getClass().getSimpleName().split("(?=\\p{Upper})");
+ LOG.warn("Node [{}] does not support statistics request type : {}",
+ nodeKey.getId(),Joiner.on(" ").join(Arrays.copyOfRange(multipartRequestName, 2, multipartRequestName.length-2)));
+ } else {
+ if (resultTransId != null) {
+ resultTransId.set(id);
+ }
+ final String cacheKey = buildCacheKey(id, nodeKey.getId());
+ final TransactionCacheContainer<? super TransactionAware> container =
+ new TransactionCacheContainerImpl<>(id, inputObj, nodeKey.getId());
+ txCache.put(cacheKey, container);
+ }
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ LOG.warn("Response Registration for Statistics RPC call fail!", t);
+ }
+
+ });
+ }
+
+ private String buildCacheKey(final TransactionId id, final NodeId nodeId) {
+ return String.valueOf(id.getValue()) + "-" + nodeId.getValue();
+ }
+
+ @Override
+ public Future<Optional<TransactionCacheContainer<?>>> getTransactionCacheContainer(
+ final TransactionId id, final NodeId nodeId) {
+ Preconditions.checkArgument(id != null, "TransactionId can not be null!");
+ Preconditions.checkArgument(nodeId != null, "NodeId can not be null!");
+
+ final String key = buildCacheKey(id, nodeId);
+ final SettableFuture<Optional<TransactionCacheContainer<?>>> result = SettableFuture.create();
+
+ final RpcJobsQueue getTransactionCacheContainer = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final Optional<TransactionCacheContainer<?>> resultContainer =
+ Optional.<TransactionCacheContainer<?>> fromNullable(txCache.getIfPresent(key));
+ if (resultContainer.isPresent()) {
+ txCache.invalidate(key);
+ }
+ result.set(resultContainer);
+ return null;
+ }
+ };
+ addStatJob(getTransactionCacheContainer);
+ return result;
+ }
+
+ @Override
+ public Future<Boolean> isExpectedStatistics(final TransactionId id, final NodeId nodeId) {
+ Preconditions.checkArgument(id != null, "TransactionId can not be null!");
+ Preconditions.checkArgument(nodeId != null, "NodeId can not be null!");
+
+ final String key = buildCacheKey(id, nodeId);
+ final SettableFuture<Boolean> checkStatId = SettableFuture.create();
+
+ final RpcJobsQueue isExpecedStatistics = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final Optional<TransactionCacheContainer<?>> result =
+ Optional.<TransactionCacheContainer<?>> fromNullable(txCache.getIfPresent(key));
+ checkStatId.set(Boolean.valueOf(result.isPresent()));
+ return null;
+ }
+ };
+ addStatJob(isExpecedStatistics);
+ return checkStatId;
+ }
+
+ @Override
+ public void addNotification(final TransactionAware notification, final NodeId nodeId) {
+ Preconditions.checkArgument(notification != null, "TransactionAware can not be null!");
+ Preconditions.checkArgument(nodeId != null, "NodeId can not be null!");
+
+ final RpcJobsQueue addNotification = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final TransactionId txId = notification.getTransactionId();
+ final String key = buildCacheKey(txId, nodeId);
+ final TransactionCacheContainer<? super TransactionAware> container = (txCache.getIfPresent(key));
+ if (container != null) {
+ container.addNotif(notification);
+ }
+ return null;
+ }
+ };
+ addStatJob(addNotification);
+ }
+
+ @Override
+ public Future<TransactionId> getAllGroupsStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final SettableFuture<TransactionId> result = SettableFuture.create();
+ final RpcJobsQueue getAllGroupStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllGroupStatisticsInputBuilder builder =
+ new GetAllGroupStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(groupStatsService
+ .getAllGroupStatistics(builder.build()), null, nodeRef, result);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllGroupStat);
+ return result;
+ }
+
+ @Override
+ public Future<TransactionId> getAllMetersStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final SettableFuture<TransactionId> result = SettableFuture.create();
+ final RpcJobsQueue getAllMeterStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllMeterStatisticsInputBuilder builder =
+ new GetAllMeterStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(meterStatsService
+ .getAllMeterStatistics(builder.build()), null, nodeRef, result);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllMeterStat);
+ return result;
+ }
+
+ @Override
+ public Future<TransactionId> getAllFlowsStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final SettableFuture<TransactionId> result = SettableFuture.create();
+ final RpcJobsQueue getAllFlowStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllFlowsStatisticsFromAllFlowTablesInputBuilder builder =
+ new GetAllFlowsStatisticsFromAllFlowTablesInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(flowStatsService
+ .getAllFlowsStatisticsFromAllFlowTables(builder.build()), null, nodeRef, result);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllFlowStat);
+ return result;
+ }
+
+ @Override
+ public void getAggregateFlowStat(final NodeRef nodeRef, final TableId tableId) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ Preconditions.checkArgument(tableId != null, "TableId can not be null!");
+ final RpcJobsQueue getAggregateFlowStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder builder =
+ new GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder();
+ builder.setNode(nodeRef);
+ builder.setTableId(tableId);
+
+ final TableBuilder tbuilder = new TableBuilder();
+ tbuilder.setId(tableId.getValue());
+ tbuilder.setKey(new TableKey(tableId.getValue()));
+ registrationRpcFutureCallBack(flowStatsService
+ .getAggregateFlowStatisticsFromFlowTableForAllFlows(builder.build()), tbuilder.build(), nodeRef, null);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAggregateFlowStat);
+ }
+
+ @Override
+ public Future<TransactionId> getAllPortsStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final SettableFuture<TransactionId> result = SettableFuture.create();
+ final RpcJobsQueue getAllPortsStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllNodeConnectorsStatisticsInputBuilder builder =
+ new GetAllNodeConnectorsStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ final Future<RpcResult<GetAllNodeConnectorsStatisticsOutput>> rpc =
+ portStatsService.getAllNodeConnectorsStatistics(builder.build());
+ registrationRpcFutureCallBack(rpc, null, nodeRef, result);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllPortsStat);
+ return result;
+ }
+
+ @Override
+ public Future<TransactionId> getAllTablesStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final SettableFuture<TransactionId> result = SettableFuture.create();
+ final RpcJobsQueue getAllTableStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetFlowTablesStatisticsInputBuilder builder =
+ new GetFlowTablesStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(flowTableStatsService
+ .getFlowTablesStatistics(builder.build()), null, nodeRef, result);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllTableStat);
+ return result;
+ }
+
+ @Override
+ public Future<TransactionId> getAllQueueStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final SettableFuture<TransactionId> result = SettableFuture.create();
+ final RpcJobsQueue getAllQueueStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllQueuesStatisticsFromAllPortsInputBuilder builder =
+ new GetAllQueuesStatisticsFromAllPortsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(queueStatsService
+ .getAllQueuesStatisticsFromAllPorts(builder.build()), null, nodeRef, result);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllQueueStat);
+ return result;
+ }
+
+ @Override
+ public Future<TransactionId> getAllMeterConfigStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final SettableFuture<TransactionId> result = SettableFuture.create();
+ final RpcJobsQueue qetAllMeterConfStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllMeterConfigStatisticsInputBuilder builder =
+ new GetAllMeterConfigStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(meterStatsService
+ .getAllMeterConfigStatistics(builder.build()), null, nodeRef, result);
+ return null;
+ }
+ };
+ addGetAllStatJob(qetAllMeterConfStat);
+ return result;
+ }
+
+ @Override
+ public void getGroupFeaturesStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getGroupFeaturesStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ /* RPC input */
+ final GetGroupFeaturesInputBuilder input = new GetGroupFeaturesInputBuilder();
+ input.setNode(nodeRef);
+ registrationRpcFutureCallBack(groupStatsService.getGroupFeatures(input.build()), null, nodeRef, null);
+ return null;
+ }
+ };
+ addStatJob(getGroupFeaturesStat);
+ }
+
+ @Override
+ public void getMeterFeaturesStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getMeterFeaturesStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ /* RPC input */
+ final GetMeterFeaturesInputBuilder input = new GetMeterFeaturesInputBuilder();
+ input.setNode(nodeRef);
+ registrationRpcFutureCallBack(meterStatsService.getMeterFeatures(input.build()), null, nodeRef, null);
+ return null;
+ }
+ };
+ addStatJob(getMeterFeaturesStat);
+ }
+
+ @Override
+ public Future<TransactionId> getAllGroupsConfStats(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final SettableFuture<TransactionId> result = SettableFuture.create();
+ final RpcJobsQueue getAllGropConfStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetGroupDescriptionInputBuilder builder =
+ new GetGroupDescriptionInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(groupStatsService
+ .getGroupDescription(builder.build()), null, nodeRef, result);
+
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllGropConfStat);
+ return result;
+ }
+
+ public class TransactionCacheContainerImpl<T extends TransactionAware> implements TransactionCacheContainer<T> {
+
+ private final TransactionId id;
+ private final NodeId nId;
+ private final List<T> notifications;
+ private final Optional<? extends DataObject> confInput;
+
+ public <D extends DataObject> TransactionCacheContainerImpl (final TransactionId id, final D input, final NodeId nodeId) {
+ this.id = Preconditions.checkNotNull(id, "TransactionId can not be null!");
+ notifications = new CopyOnWriteArrayList<T>();
+ confInput = Optional.fromNullable(input);
+ nId = nodeId;
+ }
+
+ @Override
+ public void addNotif(final T notif) {
+ notifications.add(notif);
+ }
+
+ @Override
+ public TransactionId getId() {
+ return id;
+ }
+
+ @Override
+ public NodeId getNodeId() {
+ return nId;
+ }
+
+ @Override
+ public List<T> getNotifications() {
+ return notifications;
+ }
+
+ @Override
+ public Optional<? extends DataObject> getConfInput() {
+ return confInput;
+ }
+ }
+}
+
--- /dev/null
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+public class StatisticsManagerConfig {
+ private final int maxNodesForCollector;
+ private final int minRequestNetMonitorInterval;
+
+ private StatisticsManagerConfig(StatisticsManagerConfigBuilder builder) {
+ this.maxNodesForCollector = builder.getMaxNodesForCollector();
+ this.minRequestNetMonitorInterval = builder.getMinRequestNetMonitorInterval();
+ }
+
+ public int getMaxNodesForCollector() {
+ return maxNodesForCollector;
+ }
+
+ public int getMinRequestNetMonitorInterval() {
+ return minRequestNetMonitorInterval;
+ }
+
+ public static StatisticsManagerConfigBuilder builder() {
+ return new StatisticsManagerConfigBuilder();
+ }
+
+ public static class StatisticsManagerConfigBuilder {
+ private int maxNodesForCollector;
+ private int minRequestNetMonitorInterval;
+
+ public int getMaxNodesForCollector() {
+ return maxNodesForCollector;
+ }
+
+ public void setMaxNodesForCollector(int maxNodesForCollector) {
+ this.maxNodesForCollector = maxNodesForCollector;
+ }
+
+ public int getMinRequestNetMonitorInterval() {
+ return minRequestNetMonitorInterval;
+ }
+
+ public void setMinRequestNetMonitorInterval(int minRequestNetMonitorInterval) {
+ this.minRequestNetMonitorInterval = minRequestNetMonitorInterval;
+ }
+
+ public StatisticsManagerConfig build() {
+ return new StatisticsManagerConfig(this);
+ }
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.ThreadFactory;
+
+import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
+import org.opendaylight.controller.md.statistics.manager.StatListeningCommiter;
+import org.opendaylight.controller.md.statistics.manager.StatNodeRegistration;
+import org.opendaylight.controller.md.statistics.manager.StatNotifyCommiter;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsListener;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+* statistics-manager
+* org.opendaylight.controller.md.statistics.manager.impl
+*
+* StatisticsManagerImpl
+* It represent a central point for whole module. Implementation
+* {@link StatisticsManager} registers all Operation/DS {@link StatNotifyCommiter} and
+* Config/DS {@StatListeningCommiter}, as well as {@link StatPermCollector}
+* for statistic collecting and {@link StatRpcMsgManager} as Device RPCs provider.
+* In next, StatisticsManager provides all DS contact Transaction services.
+*
+* @author avishnoi@in.ibm.com <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+*
+*/
+public class StatisticsManagerImpl implements StatisticsManager, Runnable {
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatisticsManagerImpl.class);
+
+ private static final int QUEUE_DEPTH = 5000;
+ private static final int MAX_BATCH = 100;
+
+ private final BlockingQueue<StatDataStoreOperation> dataStoreOperQueue = new LinkedBlockingDeque<>(QUEUE_DEPTH);
+
+ private final DataBroker dataBroker;
+ private final ExecutorService statRpcMsgManagerExecutor;
+ private final ExecutorService statDataStoreOperationServ;
+ private StatRpcMsgManager rpcMsgManager;
+ private List<StatPermCollector> statCollectors;
+ private final Object statCollectorLock = new Object();
+ private BindingTransactionChain txChain;
+ private volatile boolean finishing = false;
+
+ private StatNodeRegistration nodeRegistrator;
+ private StatListeningCommiter<Flow, OpendaylightFlowStatisticsListener> flowListeningCommiter;
+ private StatListeningCommiter<Meter, OpendaylightMeterStatisticsListener> meterListeningCommiter;
+ private StatListeningCommiter<Group, OpendaylightGroupStatisticsListener> groupListeningCommiter;
+ private StatListeningCommiter<Queue, OpendaylightQueueStatisticsListener> queueNotifyCommiter;
+ private StatNotifyCommiter<OpendaylightFlowTableStatisticsListener> tableNotifCommiter;
+ private StatNotifyCommiter<OpendaylightPortStatisticsListener> portNotifyCommiter;
+
+ private final StatisticsManagerConfig statManagerConfig;
+
+ public StatisticsManagerImpl (final DataBroker dataBroker, final StatisticsManagerConfig statManagerconfig) {
+ statManagerConfig = Preconditions.checkNotNull(statManagerconfig);
+ this.dataBroker = Preconditions.checkNotNull(dataBroker, "DataBroker can not be null!");
+ ThreadFactory threadFact;
+ threadFact = new ThreadFactoryBuilder().setNameFormat("odl-stat-rpc-oper-thread-%d").build();
+ statRpcMsgManagerExecutor = Executors.newSingleThreadExecutor(threadFact);
+ threadFact = new ThreadFactoryBuilder().setNameFormat("odl-stat-ds-oper-thread-%d").build();
+ statDataStoreOperationServ = Executors.newSingleThreadExecutor(threadFact);
+ txChain = dataBroker.createTransactionChain(this);
+ }
+
+ @Override
+ public void start(final NotificationProviderService notifService,
+ final RpcConsumerRegistry rpcRegistry) {
+ Preconditions.checkArgument(rpcRegistry != null, "RpcConsumerRegistry can not be null !");
+ rpcMsgManager = new StatRpcMsgManagerImpl(this, rpcRegistry, statManagerConfig.getMaxNodesForCollector());
+ statCollectors = Collections.emptyList();
+ nodeRegistrator = new StatNodeRegistrationImpl(this, dataBroker, notifService);
+ flowListeningCommiter = new StatListenCommitFlow(this, dataBroker, notifService);
+ meterListeningCommiter = new StatListenCommitMeter(this, dataBroker, notifService);
+ groupListeningCommiter = new StatListenCommitGroup(this, dataBroker, notifService);
+ tableNotifCommiter = new StatNotifyCommitTable(this, notifService);
+ portNotifyCommiter = new StatNotifyCommitPort(this, notifService);
+ queueNotifyCommiter = new StatListenCommitQueue(this, dataBroker, notifService);
+
+ statRpcMsgManagerExecutor.execute(rpcMsgManager);
+ statDataStoreOperationServ.execute(this);
+ LOG.info("Statistics Manager started successfully!");
+ }
+
+ @Override
+ public void close() throws Exception {
+ LOG.info("StatisticsManager close called");
+ finishing = true;
+ if (nodeRegistrator != null) {
+ nodeRegistrator.close();
+ nodeRegistrator = null;
+ }
+ if (flowListeningCommiter != null) {
+ flowListeningCommiter.close();
+ flowListeningCommiter = null;
+ }
+ if (meterListeningCommiter != null) {
+ meterListeningCommiter.close();
+ meterListeningCommiter = null;
+ }
+ if (groupListeningCommiter != null) {
+ groupListeningCommiter.close();
+ groupListeningCommiter = null;
+ }
+ if (tableNotifCommiter != null) {
+ tableNotifCommiter.close();
+ tableNotifCommiter = null;
+ }
+ if (portNotifyCommiter != null) {
+ portNotifyCommiter.close();
+ portNotifyCommiter = null;
+ }
+ if (queueNotifyCommiter != null) {
+ queueNotifyCommiter.close();
+ queueNotifyCommiter = null;
+ }
+ if (statCollectors != null) {
+ for (StatPermCollector collector : statCollectors) {
+ collector.close();
+ collector = null;
+ }
+ statCollectors = null;
+ }
+ if (rpcMsgManager != null) {
+ rpcMsgManager.close();
+ rpcMsgManager = null;
+ }
+ statRpcMsgManagerExecutor.shutdown();
+ statDataStoreOperationServ.shutdown();
+ if (txChain != null) {
+ txChain.close();
+ txChain = null;
+ }
+ }
+
+ @Override
+ public void enqueue(final StatDataStoreOperation op) {
+ // we don't need to block anything - next statistics come soon
+ final boolean success = dataStoreOperQueue.offer(op);
+ if ( ! success) {
+ LOG.debug("Stat DS/Operational submiter Queue is full!");
+ }
+ }
+
+ @Override
+ public void run() {
+ /* Neverending cyle - wait for finishing */
+ while ( ! finishing) {
+ try {
+ StatDataStoreOperation op = dataStoreOperQueue.take();
+ final ReadWriteTransaction tx = txChain.newReadWriteTransaction();
+ LOG.trace("New operations available, starting transaction {}", tx.getIdentifier());
+
+ int ops = 0;
+ do {
+ op.applyOperation(tx);
+
+ ops++;
+ if (ops < MAX_BATCH) {
+ op = dataStoreOperQueue.poll();
+ } else {
+ op = null;
+ }
+ } while (op != null);
+
+ LOG.trace("Processed {} operations, submitting transaction {}", ops, tx.getIdentifier());
+
+ tx.submit().checkedGet();
+ } catch (final InterruptedException e) {
+ LOG.warn("Stat Manager DS Operation thread interupted!", e);
+ finishing = true;
+ } catch (final Exception e) {
+ LOG.warn("Unhandled exception during processing statistics. Restarting transaction chain.", e);
+ txChain.close();
+ txChain = dataBroker.createTransactionChain(StatisticsManagerImpl.this);
+ cleanDataStoreOperQueue();
+ }
+ }
+ // Drain all events, making sure any blocked threads are unblocked
+ cleanDataStoreOperQueue();
+ }
+
+ private synchronized void cleanDataStoreOperQueue() {
+ // Drain all events, making sure any blocked threads are unblocked
+ while (! dataStoreOperQueue.isEmpty()) {
+ StatDataStoreOperation op = dataStoreOperQueue.poll();
+
+ // Execute the node removal clean up operation if queued in the
+ // operational queue.
+ if (op.getType() == StatsManagerOperationType.NODE_REMOVAL) {
+ try {
+ LOG.debug("Node {} disconnected. Cleaning internal data.",op.getNodeId());
+ op.applyOperation(null);
+ } catch (final Exception ex) {
+ LOG.warn("Unhandled exception while cleaning up internal data of node [{}]",op.getNodeId());
+ }
+ }
+ }
+ }
+
+ @Override
+ public void onTransactionChainFailed(final TransactionChain<?, ?> chain, final AsyncTransaction<?, ?> transaction,
+ final Throwable cause) {
+ LOG.warn("Failed to export Flow Capable Statistics, Transaction {} failed.",transaction.getIdentifier(),cause);
+ }
+
+ @Override
+ public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
+ // NOOP
+ }
+
+ @Override
+ public boolean isProvidedFlowNodeActive(final InstanceIdentifier<Node> nodeIdent) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.isProvidedFlowNodeActive(nodeIdent)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public void collectNextStatistics(final InstanceIdentifier<Node> nodeIdent, final TransactionId xid) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.isProvidedFlowNodeActive(nodeIdent)) {
+ collector.collectNextStatistics(xid);
+ }
+ }
+ }
+
+ @Override
+ public void connectedNodeRegistration(final InstanceIdentifier<Node> nodeIdent,
+ final List<StatCapabTypes> statTypes, final Short nrOfSwitchTables) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.connectedNodeRegistration(nodeIdent, statTypes, nrOfSwitchTables)) {
+ return;
+ }
+ }
+ synchronized (statCollectorLock) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.connectedNodeRegistration(nodeIdent, statTypes, nrOfSwitchTables)) {
+ return;
+ }
+ }
+ final StatPermCollectorImpl newCollector = new StatPermCollectorImpl(this,
+ statManagerConfig.getMinRequestNetMonitorInterval(), statCollectors.size() + 1,
+ statManagerConfig.getMaxNodesForCollector());
+ final List<StatPermCollector> statCollectorsNew = new ArrayList<>(statCollectors);
+ newCollector.connectedNodeRegistration(nodeIdent, statTypes, nrOfSwitchTables);
+ statCollectorsNew.add(newCollector);
+ statCollectors = Collections.unmodifiableList(statCollectorsNew);
+ }
+ }
+
+ @Override
+ public void disconnectedNodeUnregistration(final InstanceIdentifier<Node> nodeIdent) {
+ flowListeningCommiter.cleanForDisconnect(nodeIdent);
+
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.disconnectedNodeUnregistration(nodeIdent)) {
+ if ( ! collector.hasActiveNodes()) {
+ synchronized (statCollectorLock) {
+ if (collector.hasActiveNodes()) {
+ return;
+ }
+ final List<StatPermCollector> newStatColl =
+ new ArrayList<>(statCollectors);
+ newStatColl.remove(collector);
+ statCollectors = Collections.unmodifiableList(newStatColl);
+ }
+ }
+ return;
+ }
+ }
+ LOG.debug("Node {} has not been removed.", nodeIdent);
+ }
+
+ @Override
+ public void registerAdditionalNodeFeature(final InstanceIdentifier<Node> nodeIdent,
+ final StatCapabTypes statCapab) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.registerAdditionalNodeFeature(nodeIdent, statCapab)) {
+ return;
+ }
+ }
+ LOG.debug("Node {} has not been extended for feature {}!", nodeIdent, statCapab);
+ }
+
+ /* Getter internal Statistic Manager Job Classes */
+ @Override
+ public StatRpcMsgManager getRpcMsgManager() {
+ return rpcMsgManager;
+ }
+
+ @Override
+ public StatNodeRegistration getNodeRegistrator() {
+ return nodeRegistrator;
+ }
+
+ @Override
+ public StatListeningCommiter<Flow, OpendaylightFlowStatisticsListener> getFlowListenComit() {
+ return flowListeningCommiter;
+ }
+
+ @Override
+ public StatListeningCommiter<Meter, OpendaylightMeterStatisticsListener> getMeterListenCommit() {
+ return meterListeningCommiter;
+ }
+
+ @Override
+ public StatListeningCommiter<Group, OpendaylightGroupStatisticsListener> getGroupListenCommit() {
+ return groupListeningCommiter;
+ }
+
+ @Override
+ public StatListeningCommiter<Queue, OpendaylightQueueStatisticsListener> getQueueNotifyCommit() {
+ return queueNotifyCommiter;
+ }
+
+
+ @Override
+ public StatNotifyCommiter<OpendaylightFlowTableStatisticsListener> getTableNotifCommit() {
+ return tableNotifCommiter;
+ }
+
+ @Override
+ public StatNotifyCommiter<OpendaylightPortStatisticsListener> getPortNotifyCommit() {
+ return portNotifyCommiter;
+ }
+
+ @Override
+ public StatisticsManagerConfig getConfiguration() {
+ return statManagerConfig;
+ }
+}
+
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2013. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.statistics.manager.impl.helper;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.net.InetAddresses;
+import java.net.Inet4Address;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv4Prefix;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.MacAddressFilter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.Layer3Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4Match;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility class for comparing flows.
+ */
+public final class FlowComparator {
+ private final static Logger LOG = LoggerFactory.getLogger(FlowComparator.class);
+
+ private FlowComparator() {
+ throw new UnsupportedOperationException("Utilities class should not be instantiated");
+ }
+
+ public static boolean flowEquals(final Flow statsFlow, final Flow storedFlow) {
+ if (statsFlow == null || storedFlow == null) {
+ return false;
+ }
+ if (statsFlow.getContainerName()== null) {
+ if (storedFlow.getContainerName()!= null) {
+ return false;
+ }
+ } else if(!statsFlow.getContainerName().equals(storedFlow.getContainerName())) {
+ return false;
+ }
+ if (storedFlow.getPriority() == null) {
+ if (statsFlow.getPriority() != null && statsFlow.getPriority()!= 0x8000) {
+ return false;
+ }
+ } else if(!statsFlow.getPriority().equals(storedFlow.getPriority())) {
+ return false;
+ }
+ if (statsFlow.getMatch()== null) {
+ if (storedFlow.getMatch() != null) {
+ return false;
+ }
+ } else if(!matchEquals(statsFlow.getMatch(), storedFlow.getMatch())) {
+ return false;
+ }
+ if (statsFlow.getTableId() == null) {
+ if (storedFlow.getTableId() != null) {
+ return false;
+ }
+ } else if(!statsFlow.getTableId().equals(storedFlow.getTableId())) {
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Explicit equals method to compare the 'match' for flows stored in the data-stores and flow fetched from the switch.
+ * Flow installation process has three steps
+ * 1) Store flow in config data store
+ * 2) and send it to plugin for installation
+ * 3) Flow gets installed in switch
+ *
+ * The flow user wants to install and what finally gets installed in switch can be slightly different.
+ * E.g, If user installs flow with src/dst ip=10.0.0.1/24, when it get installed in the switch
+ * src/dst ip will be changes to 10.0.0.0/24 because of netmask of 24. When statistics manager fetch
+ * stats it gets 10.0.0.0/24 rather then 10.0.0.1/24. Custom match takes care of by using masked ip
+ * while comparing two ip addresses.
+ *
+ * Sometimes when user don't provide few values that is required by flow installation request, like
+ * priority,hard timeout, idle timeout, cookies etc, plugin usages default values before sending
+ * request to the switch. So when statistics manager gets flow statistics, it gets the default value.
+ * But the flow stored in config data store don't have those defaults value. I included those checks
+ * in the customer flow/match equal function.
+ *
+ *
+ * @param statsFlow
+ * @param storedFlow
+ * @return
+ */
+ public static boolean matchEquals(final Match statsFlow, final Match storedFlow) {
+ if (statsFlow == storedFlow) {
+ return true;
+ }
+ if (storedFlow == null && statsFlow != null) {
+ return false;
+ }
+ if (statsFlow == null && storedFlow != null) {
+ return false;
+ }
+ if (storedFlow.getEthernetMatch() == null) {
+ if (statsFlow.getEthernetMatch() != null) {
+ return false;
+ }
+ } else if(!ethernetMatchEquals(statsFlow.getEthernetMatch(),storedFlow.getEthernetMatch())) {
+ return false;
+ }
+ if (storedFlow.getIcmpv4Match()== null) {
+ if (statsFlow.getIcmpv4Match() != null) {
+ return false;
+ }
+ } else if(!storedFlow.getIcmpv4Match().equals(statsFlow.getIcmpv4Match())) {
+ return false;
+ }
+ if (storedFlow.getIcmpv6Match() == null) {
+ if (statsFlow.getIcmpv6Match() != null) {
+ return false;
+ }
+ } else if(!storedFlow.getIcmpv6Match().equals(statsFlow.getIcmpv6Match())) {
+ return false;
+ }
+ if (storedFlow.getInPhyPort() == null) {
+ if (statsFlow.getInPhyPort() != null) {
+ return false;
+ }
+ } else if(!storedFlow.getInPhyPort().equals(statsFlow.getInPhyPort())) {
+ return false;
+ }
+ if (storedFlow.getInPort()== null) {
+ if (statsFlow.getInPort() != null) {
+ return false;
+ }
+ } else if(!storedFlow.getInPort().equals(statsFlow.getInPort())) {
+ return false;
+ }
+ if (storedFlow.getIpMatch()== null) {
+ if (statsFlow.getIpMatch() != null) {
+ return false;
+ }
+ } else if(!storedFlow.getIpMatch().equals(statsFlow.getIpMatch())) {
+ return false;
+ }
+ if (storedFlow.getLayer3Match()== null) {
+ if (statsFlow.getLayer3Match() != null) {
+ return false;
+ }
+ } else if(!layer3MatchEquals(statsFlow.getLayer3Match(),storedFlow.getLayer3Match())) {
+ return false;
+ }
+ if (storedFlow.getLayer4Match()== null) {
+ if (statsFlow.getLayer4Match() != null) {
+ return false;
+ }
+ } else if(!storedFlow.getLayer4Match().equals(statsFlow.getLayer4Match())) {
+ return false;
+ }
+ if (storedFlow.getMetadata() == null) {
+ if (statsFlow.getMetadata() != null) {
+ return false;
+ }
+ } else if(!storedFlow.getMetadata().equals(statsFlow.getMetadata())) {
+ return false;
+ }
+ if (storedFlow.getProtocolMatchFields() == null) {
+ if (statsFlow.getProtocolMatchFields() != null) {
+ return false;
+ }
+ } else if(!storedFlow.getProtocolMatchFields().equals(statsFlow.getProtocolMatchFields())) {
+ return false;
+ }
+ if (storedFlow.getTunnel()== null) {
+ if (statsFlow.getTunnel() != null) {
+ return false;
+ }
+ } else if(!storedFlow.getTunnel().equals(statsFlow.getTunnel())) {
+ return false;
+ }
+ if (storedFlow.getVlanMatch()== null) {
+ if (statsFlow.getVlanMatch() != null) {
+ return false;
+ }
+ } else if(!storedFlow.getVlanMatch().equals(statsFlow.getVlanMatch())) {
+ return false;
+ }
+ return true;
+ }
+
+ /*
+ * Custom EthernetMatch is required because mac address string provided by user in EthernetMatch can be in
+ * any case (upper or lower or mix). Ethernet Match which controller receives from switch is always
+ * an upper case string. Default EthernetMatch equals doesn't use equalsIgnoreCase() and hence it fails.
+ * E.g User provided mac address string in flow match is aa:bb:cc:dd:ee:ff and when controller fetch
+ * statistic data, openflow driver library returns AA:BB:CC:DD:EE:FF and default eqauls fails here.
+ */
+ @VisibleForTesting
+ static boolean ethernetMatchEquals(final EthernetMatch statsEthernetMatch, final EthernetMatch storedEthernetMatch){
+ boolean verdict = true;
+ final Boolean checkNullValues = checkNullValues(statsEthernetMatch, storedEthernetMatch);
+ if (checkNullValues != null) {
+ verdict = checkNullValues;
+ } else {
+ if(verdict){
+ verdict = ethernetMatchFieldsEquals(statsEthernetMatch.getEthernetSource(),storedEthernetMatch.getEthernetSource());
+ }
+ if(verdict){
+ verdict = ethernetMatchFieldsEquals(statsEthernetMatch.getEthernetDestination(),storedEthernetMatch.getEthernetDestination());
+ }
+ if(verdict){
+ if(statsEthernetMatch.getEthernetType() == null){
+ if(storedEthernetMatch.getEthernetType() != null){
+ verdict = false;
+ }
+ }else{
+ verdict = statsEthernetMatch.getEthernetType().equals(storedEthernetMatch.getEthernetType());
+ }
+ }
+ }
+ return verdict;
+ }
+
+ private static boolean ethernetMatchFieldsEquals(final MacAddressFilter statsEthernetMatchFields,
+ final MacAddressFilter storedEthernetMatchFields){
+ boolean verdict = true;
+ final Boolean checkNullValues = checkNullValues(statsEthernetMatchFields, storedEthernetMatchFields);
+ if (checkNullValues != null) {
+ verdict = checkNullValues;
+ } else {
+ if(verdict){
+ verdict = macAddressEquals(statsEthernetMatchFields.getAddress(),storedEthernetMatchFields.getAddress());
+ }
+ if(verdict){
+ verdict = macAddressEquals(statsEthernetMatchFields.getMask(),storedEthernetMatchFields.getMask());
+ }
+ }
+ return verdict;
+ }
+
+ private static boolean macAddressEquals(final MacAddress statsMacAddress, final MacAddress storedMacAddress){
+ boolean verdict = true;
+ final Boolean checkNullValues = checkNullValues(statsMacAddress, storedMacAddress);
+ if (checkNullValues != null) {
+ verdict = checkNullValues;
+ } else {
+ verdict = statsMacAddress.getValue().equalsIgnoreCase(storedMacAddress.getValue());
+ }
+ return verdict;
+ }
+
+ @VisibleForTesting
+ static boolean layer3MatchEquals(final Layer3Match statsLayer3Match, final Layer3Match storedLayer3Match){
+ boolean verdict = true;
+ if(statsLayer3Match instanceof Ipv4Match && storedLayer3Match instanceof Ipv4Match){
+ final Ipv4Match statsIpv4Match = (Ipv4Match)statsLayer3Match;
+ final Ipv4Match storedIpv4Match = (Ipv4Match)storedLayer3Match;
+
+ if (verdict) {
+ verdict = compareNullSafe(
+ storedIpv4Match.getIpv4Destination(), statsIpv4Match.getIpv4Destination());
+ }
+ if (verdict) {
+ verdict = compareNullSafe(
+ statsIpv4Match.getIpv4Source(), storedIpv4Match.getIpv4Source());
+ }
+ } else {
+ final Boolean nullCheckOut = checkNullValues(storedLayer3Match, statsLayer3Match);
+ if (nullCheckOut != null) {
+ verdict = nullCheckOut;
+ } else {
+ verdict = storedLayer3Match.equals(statsLayer3Match);
+ }
+ }
+
+ return verdict;
+ }
+
+ private static boolean compareNullSafe(final Ipv4Prefix statsIpv4, final Ipv4Prefix storedIpv4) {
+ boolean verdict = true;
+ final Boolean checkDestNullValuesOut = checkNullValues(storedIpv4, statsIpv4);
+ if (checkDestNullValuesOut != null) {
+ verdict = checkDestNullValuesOut;
+ } else if(!IpAddressEquals(statsIpv4, storedIpv4)){
+ verdict = false;
+ }
+
+ return verdict;
+ }
+
+ private static Boolean checkNullValues(final Object v1, final Object v2) {
+ Boolean verdict = null;
+ if (v1 == null && v2 != null) {
+ verdict = Boolean.FALSE;
+ } else if (v1 != null && v2 == null) {
+ verdict = Boolean.FALSE;
+ } else if (v1 == null && v2 == null) {
+ verdict = Boolean.TRUE;
+ }
+
+ return verdict;
+ }
+
+ /**
+ * TODO: why don't we use the default Ipv4Prefix.equals()?
+ *
+ * @param statsIpAddress
+ * @param storedIpAddress
+ * @return true if IPv4prefixes equals
+ */
+ private static boolean IpAddressEquals(final Ipv4Prefix statsIpAddress, final Ipv4Prefix storedIpAddress) {
+ final IntegerIpAddress statsIpAddressInt = StrIpToIntIp(statsIpAddress.getValue());
+ final IntegerIpAddress storedIpAddressInt = StrIpToIntIp(storedIpAddress.getValue());
+
+ if(IpAndMaskBasedMatch(statsIpAddressInt,storedIpAddressInt)){
+ return true;
+ }
+ if(IpBasedMatch(statsIpAddressInt,storedIpAddressInt)){
+ return true;
+ }
+ return false;
+ }
+
+ private static boolean IpAndMaskBasedMatch(final IntegerIpAddress statsIpAddressInt,final IntegerIpAddress storedIpAddressInt){
+ return ((statsIpAddressInt.getIp() & statsIpAddressInt.getMask()) == (storedIpAddressInt.getIp() & storedIpAddressInt.getMask()));
+ }
+
+ private static boolean IpBasedMatch(final IntegerIpAddress statsIpAddressInt,final IntegerIpAddress storedIpAddressInt){
+ return (statsIpAddressInt.getIp() == storedIpAddressInt.getIp());
+ }
+
+ /**
+ * Method return integer version of ip address. Converted int will be mask if
+ * mask specified
+ */
+ private static IntegerIpAddress StrIpToIntIp(final String ipAddresss){
+
+ final String[] parts = ipAddresss.split("/");
+ final String ip = parts[0];
+ int prefix;
+
+ if (parts.length < 2) {
+ prefix = 32;
+ } else {
+ prefix = Integer.parseInt(parts[1]);
+ }
+
+ IntegerIpAddress integerIpAddress = null;
+
+ final Inet4Address addr = ((Inet4Address) InetAddresses.forString(ip));
+ final byte[] addrBytes = addr.getAddress();
+ final int ipInt = ((addrBytes[0] & 0xFF) << 24) |
+ ((addrBytes[1] & 0xFF) << 16) |
+ ((addrBytes[2] & 0xFF) << 8) |
+ ((addrBytes[3] & 0xFF) << 0);
+
+ // FIXME: Is this valid?
+ final int mask = 0xffffffff << 32 - prefix;
+
+ integerIpAddress = new IntegerIpAddress(ipInt, mask);
+
+
+ return integerIpAddress;
+ }
+
+ private static class IntegerIpAddress{
+ int ip;
+ int mask;
+ public IntegerIpAddress(final int ip, final int mask) {
+ this.ip = ip;
+ this.mask = mask;
+ }
+ public int getIp() {
+ return ip;
+ }
+ public int getMask() {
+ return mask;
+ }
+ }
+}
--- /dev/null
+module statistics-manager {
+
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:statistics-manager";
+ prefix "statistics-manager";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import opendaylight-md-sal-binding { prefix mdsal; revision-date 2013-10-28; }
+
+ description
+ "This module contains the base YANG definitions for
+ statitics-manager implementation.";
+
+ revision "2014-09-25" {
+ description
+ "Initial revision.";
+ }
+
+ identity statistics-manager {
+ base config:module-type;
+ config:java-name-prefix StatisticsManager;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case statistics-manager {
+ when "/config:modules/config:module/config:type = 'statistics-manager'";
+
+ container rpc-registry {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity mdsal:binding-rpc-registry;
+ }
+ }
+ }
+
+ container notification-service {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity mdsal:binding-notification-service;
+ }
+ }
+ }
+
+ container data-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity mdsal:binding-async-data-broker;
+ }
+ }
+ }
+
+ container statistics-manager-settings {
+ leaf min-request-net-monitor-interval {
+ type int32;
+ }
+ leaf max-nodes-for-collector {
+ type int32;
+ }
+ }
+ }
+ }
+
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl.helper;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv4Prefix;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.EtherType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.ethernet.match.fields.EthernetSourceBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.ethernet.match.fields.EthernetTypeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4MatchBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ *
+ */
+public class StatisticsUpdateCommiterTest {
+
+ private static final Logger LOG = LoggerFactory
+ .getLogger(StatisticsUpdateCommiterTest.class);
+
+ /**
+ * Test method for {@link org.opendaylight.controller.md.statistics.manager.StatisticsListener#layer3MatchEquals(org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.Layer3Match, org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.Layer3Match)}.
+ */
+ @Test
+ public void testLayer3MatchEquals() {
+ final String[][][] matchSeeds = new String[][][] {
+ {{"10.1.2.0/24", "10.1.2.0/24"}, {"10.1.2.0/24", "10.1.2.0/24"}},
+ {{"10.1.2.0/24", "10.1.2.0/24"}, {"10.1.2.0/24", "10.1.1.0/24"}},
+ {{"10.1.1.0/24", "10.1.2.0/24"}, {"10.1.2.0/24", "10.1.2.0/24"}},
+ {{"10.1.1.0/24", "10.1.1.0/24"}, {"10.1.2.0/24", "10.1.2.0/24"}},
+
+ {{"10.1.1.0/24", null}, {"10.1.1.0/24", "10.1.2.0/24"}},
+ {{"10.1.1.0/24", null}, {"10.1.2.0/24", "10.1.2.0/24"}},
+ {{"10.1.1.0/24", null}, {"10.1.2.0/24", null}},
+ {{"10.1.1.0/24", null}, {"10.1.1.0/24", null}},
+
+ {{null, "10.1.1.0/24"}, {"10.1.2.0/24", "10.1.1.0/24"}},
+ {{null, "10.1.1.0/24"}, {"10.1.2.0/24", "10.1.2.0/24"}},
+ {{null, "10.1.1.0/24"}, {null, "10.1.2.0/24"}},
+ {{null, "10.1.1.0/24"}, {null, "10.1.1.0/24"}},
+
+ {{null, null}, {null, "10.1.1.0/24"}},
+ {{null, null}, {null, null}},
+ };
+
+ final boolean[] matches = new boolean[] {
+ true,
+ false,
+ false,
+ false,
+
+ false,
+ false,
+ false,
+ true,
+
+ false,
+ false,
+ false,
+ true,
+
+ false,
+ true
+ };
+
+ for (int i = 0; i < matches.length; i++) {
+ checkComparisonOfL3Match(
+ matchSeeds[i][0][0], matchSeeds[i][0][1],
+ matchSeeds[i][1][0], matchSeeds[i][1][1],
+ matches[i]);
+ }
+ }
+
+ /**
+ * @param m1Source match1 - src
+ * @param m1Destination match1 - dest
+ * @param m2Source match2 - src
+ * @param msDestination match2 - dest
+ * @param matches expected match output
+ *
+ */
+ private static void checkComparisonOfL3Match(final String m1Source, final String m1Destination,
+ final String m2Source, final String msDestination, final boolean matches) {
+ final Ipv4Match m1Layer3 = prepareIPv4Match(m1Source, m1Destination);
+ final Ipv4Match m2Layer3 = prepareIPv4Match(m2Source, msDestination);
+ boolean comparisonResult;
+ try {
+ comparisonResult = FlowComparator.layer3MatchEquals(m1Layer3, m2Layer3);
+ Assert.assertEquals("failed to compare: "+m1Layer3+" vs. "+m2Layer3,
+ matches, comparisonResult);
+ } catch (final Exception e) {
+ LOG.error("failed to compare: {} vs. {}", m1Layer3, m2Layer3, e);
+ Assert.fail(e.getMessage());
+ }
+ }
+
+ private static Ipv4Match prepareIPv4Match(final String source, final String destination) {
+ final Ipv4MatchBuilder ipv4MatchBuilder = new Ipv4MatchBuilder();
+ if (source != null) {
+ ipv4MatchBuilder.setIpv4Source(new Ipv4Prefix(source));
+ }
+ if (destination != null) {
+ ipv4MatchBuilder.setIpv4Destination(new Ipv4Prefix(destination));
+ }
+
+ return ipv4MatchBuilder.build();
+ }
+ /**
+ * Test method for {@link org.opendaylight.controller.md.statistics.manager.impl.helper.FlowComparator#ethernetMatchEquals(EthernetMatch, EthernetMatch)
+ */
+ @Test
+ public void testEthernetMatchEquals() {
+ final String[][][] ethernetMatchSeeds = new String[][][] {
+ {{"aa:bb:cc:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}, {"aa:bb:cc:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}},
+ {{"aa:bb:cc:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}, {"aa:bb:bc:cd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}},
+ {{"aa:bb:cc:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}, {"AA:BB:CC:DD:EE:FF", "ff:ff:ff:ff:ff:ff","0800"}},
+ {{"AA:BB:CC:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}, {"aa:bb:cc:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}},
+ {{"AA:BB:CC:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}, {"aa:bb:cc:dd:ee:ff", "FF:FF:FF:FF:FF:FF","0800"}},
+ {{"AA:BB:CC:dd:ee:ff", "ff:ff:ff:ee:ee:ee","0800"}, {"aa:bb:cc:dd:ee:ff", "FF:FF:FF:FF:FF:FF","0800"}},
+
+ {{"AA:BB:CC:dd:ee:ff", null,"0800"}, {"aa:bb:cc:dd:ee:ff", null,"0800"}},
+ {{"AA:BB:CC:dd:ee:ff", null,"0800"}, {"aa:bb:cc:dd:ee:ff", null,"0806"}},
+ {{"AA:BB:CC:dd:ee:ff", null,"0800"}, {"aa:bb:cc:dd:ee:ff", "FF:FF:FF:FF:FF:FF","0800"}},
+ {{"AA:BB:CC:dd:ee:ff", null,"0800"}, {null, "FF:FF:FF:FF:FF:FF","0800"}},
+
+ {{"AA:BB:CC:dd:ee:ff", "ff:ff:ff:ff:ff:ff",null}, {null, "FF:FF:FF:FF:FF:FF","0800"}},
+ {{"AA:BB:CC:dd:ee:ff", "ff:ff:ff:ff:ff:ff",null}, {"aa:bb:cc:dd:ee:ff", "FF:FF:FF:FF:FF:FF",null}},
+ {{"AA:BB:CC:dd:ee:ff", "ff:ff:ff:ff:ff:ff",null}, {null, "FF:FF:FF:FF:FF:FF",null}},
+
+ {{null, null,null}, {null, null,"0800"}},
+ {{null, null,null}, {null, null,null}},
+ };
+
+ final boolean[] matches = new boolean[] {
+ true,
+ false,
+ true,
+ true,
+ true,
+ false,
+
+ true,
+ false,
+ false,
+ false,
+
+ false,
+ true,
+ false,
+
+ false,
+ true
+ };
+
+ for (int i = 0; i < matches.length; i++) {
+ checkComparisonOfEthernetMatch(
+ ethernetMatchSeeds[i][0][0], ethernetMatchSeeds[i][0][1],ethernetMatchSeeds[i][0][2],
+ ethernetMatchSeeds[i][1][0], ethernetMatchSeeds[i][1][1],ethernetMatchSeeds[i][1][2],
+ matches[i]);
+ }
+ }
+
+ /*
+ * @param ethernetMatch1
+ * @param ethernetMatch2
+ */
+ private static void checkComparisonOfEthernetMatch(final String macAddress1, final String macAddressMask1,final String etherType1,
+ final String macAddress2, final String macAddressMask2,final String etherType2, final boolean expectedResult) {
+ final EthernetMatch ethernetMatch1 = prepareEthernetMatch(macAddress1, macAddressMask1,etherType1);
+ final EthernetMatch ethernetMatch2 = prepareEthernetMatch(macAddress2, macAddressMask2,etherType2);
+ boolean comparisonResult;
+ try {
+ comparisonResult = FlowComparator.ethernetMatchEquals(ethernetMatch1, ethernetMatch2);
+ Assert.assertEquals("failed to compare: "+ethernetMatch1+" vs. "+ethernetMatch2,
+ expectedResult, comparisonResult);
+ } catch (final Exception e) {
+ LOG.error("failed to compare: {} vs. {}", ethernetMatch1, ethernetMatch2, e);
+ Assert.fail(e.getMessage());
+ }
+ }
+
+ private static EthernetMatch prepareEthernetMatch(final String macAddress, final String macAddressMask, final String etherType) {
+ final EthernetMatchBuilder ethernetMatchBuilder = new EthernetMatchBuilder();
+ final EthernetSourceBuilder ethernetSourceBuilder = new EthernetSourceBuilder();
+ if (macAddress != null) {
+ ethernetSourceBuilder.setAddress(new MacAddress(macAddress));
+ }
+ if (macAddressMask != null) {
+ ethernetSourceBuilder.setMask(new MacAddress(macAddressMask));
+ }
+ if(etherType != null){
+ final EthernetTypeBuilder ethernetType = new EthernetTypeBuilder();
+ ethernetType.setType(new EtherType(Long.parseLong(etherType,16)));
+ ethernetMatchBuilder.setEthernetType(ethernetType.build());
+ }
+ ethernetMatchBuilder.setEthernetSource(ethernetSourceBuilder.build());
+
+ return ethernetMatchBuilder.build();
+ }
+}
--- /dev/null
+package test.mock;
+
+import org.junit.Test;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import test.mock.util.StatisticsManagerTest;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class NodeRegistrationTest extends StatisticsManagerTest {
+
+ @Test
+ public void nodeRegistrationTest() throws ExecutionException, InterruptedException {
+ StatisticsManager statisticsManager = setupStatisticsManager();
+
+ addFlowCapableNode(s1Key);
+ Thread.sleep(2000);
+ final InstanceIdentifier<Node> nodeII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key);
+
+ assertTrue(statisticsManager.isProvidedFlowNodeActive(nodeII));
+ }
+
+ @Test
+ public void nodeUnregistrationTest() throws ExecutionException, InterruptedException {
+ StatisticsManager statisticsManager = setupStatisticsManager();
+
+ addFlowCapableNode(s1Key);
+ Thread.sleep(2000);
+ final InstanceIdentifier<Node> nodeII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key);
+
+ assertTrue(statisticsManager.isProvidedFlowNodeActive(nodeII));
+
+ removeNode(s1Key);
+ Thread.sleep(2000);
+ assertFalse(statisticsManager.isProvidedFlowNodeActive(nodeII));
+ }
+}
+
--- /dev/null
+package test.mock;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import com.google.common.base.Optional;
+import java.util.concurrent.ExecutionException;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityFlowStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityGroupStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityPortStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityQueueStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityTableStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.features.GroupFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.FlowCapableNodeConnectorStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.FlowCapableNodeConnectorQueueStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import test.mock.util.StatisticsManagerTest;
+
+public class StatCollectorTest extends StatisticsManagerTest {
+ private final Object waitObject = new Object();
+
+ @Test(timeout = 200000)
+ public void getAllFlowStatsTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ setupStatisticsManager();
+
+ addFlowCapableNodeWithFeatures(s1Key, false, FlowFeatureCapabilityFlowStats.class);
+
+ final Flow flow = getFlow();
+
+ final InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(flow.getTableId()));
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ tableII.child(Flow.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<Table> tableOptional = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(FlowCapableNode.class)
+ .child(Table.class, new TableKey(flow.getTableId()))).checkedGet();
+ assertTrue(tableOptional.isPresent());
+ final FlowStatisticsData flowStats = tableOptional.get().getFlow().get(0).getAugmentation(FlowStatisticsData.class);
+ assertTrue(flowStats != null);
+ assertEquals(COUNTER_64_TEST_VALUE, flowStats.getFlowStatistics().getByteCount());
+ }
+
+ @Test(timeout = 200000)
+ public void getAllGroupStatsFeatureNotAdvertisedTest() throws ExecutionException, InterruptedException {
+ setupStatisticsManager();
+
+ addFlowCapableNodeWithFeatures(s1Key, true);
+
+ final InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Group.class, getGroup().getKey());
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ groupII.augmentation(NodeGroupStatistics.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<Group> optionalGroup = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(FlowCapableNode.class)
+ .child(Group.class, getGroup().getKey())).get();
+
+ assertTrue(optionalGroup.isPresent());
+ assertTrue(optionalGroup.get().getAugmentation(NodeGroupDescStats.class) != null);
+ final NodeGroupStatistics groupStats = optionalGroup.get().getAugmentation(NodeGroupStatistics.class);
+ assertTrue(groupStats != null);
+ assertEquals(COUNTER_64_TEST_VALUE, groupStats.getGroupStatistics().getByteCount());
+
+ readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<GroupFeatures> optionalGroupFeatures = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(NodeGroupFeatures.class).child(GroupFeatures.class)).get();
+ assertTrue(optionalGroupFeatures.isPresent());
+ assertEquals(1, optionalGroupFeatures.get().getMaxGroups().size());
+ assertEquals(MAX_GROUPS_TEST_VALUE, optionalGroupFeatures.get().getMaxGroups().get(0));
+ }
+
+ @Test(timeout = 200000)
+ public void getAllGroupStatsFeatureAdvertisedTest() throws ExecutionException, InterruptedException {
+ setupStatisticsManager();
+
+ addFlowCapableNodeWithFeatures(s1Key, false, FlowFeatureCapabilityGroupStats.class);
+
+ final InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Group.class, getGroup().getKey());
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ groupII.augmentation(NodeGroupStatistics.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<Group> optionalGroup = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(FlowCapableNode.class)
+ .child(Group.class, getGroup().getKey())).get();
+
+ assertTrue(optionalGroup.isPresent());
+ assertTrue(optionalGroup.get().getAugmentation(NodeGroupDescStats.class) != null);
+ final NodeGroupStatistics groupStats = optionalGroup.get().getAugmentation(NodeGroupStatistics.class);
+ assertTrue(groupStats != null);
+ assertEquals(COUNTER_64_TEST_VALUE, groupStats.getGroupStatistics().getByteCount());
+
+ readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<GroupFeatures> optionalGroupFeatures = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(NodeGroupFeatures.class).child(GroupFeatures.class)).get();
+ assertTrue(optionalGroupFeatures.isPresent());
+ assertEquals(1, optionalGroupFeatures.get().getMaxGroups().size());
+ assertEquals(MAX_GROUPS_TEST_VALUE, optionalGroupFeatures.get().getMaxGroups().get(0));
+ }
+
+ @Test(timeout = 200000)
+ public void getAllMeterStatsTest() throws ExecutionException, InterruptedException {
+ setupStatisticsManager();
+
+ addFlowCapableNodeWithFeatures(s1Key, true);
+
+ final InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Meter.class, getMeter().getKey());
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ meterII.augmentation(NodeMeterStatistics.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<Meter> optionalMeter = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(FlowCapableNode.class)
+ .child(Meter.class, getMeter().getKey())).get();
+
+ assertTrue(optionalMeter.isPresent());
+ assertTrue(optionalMeter.get().getAugmentation(NodeMeterConfigStats.class) != null);
+ final NodeMeterStatistics meterStats = optionalMeter.get().getAugmentation(NodeMeterStatistics.class);
+ assertTrue(meterStats != null);
+ assertEquals(COUNTER_64_TEST_VALUE, meterStats.getMeterStatistics().getByteInCount());
+ assertEquals(COUNTER_64_TEST_VALUE, meterStats.getMeterStatistics().getPacketInCount());
+
+ readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<MeterFeatures> optionalMeterFeautures = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(NodeMeterFeatures.class).child(MeterFeatures.class)).get();
+ assertTrue(optionalMeterFeautures.isPresent());
+ assertEquals(COUNTER_32_TEST_VALUE, optionalMeterFeautures.get().getMaxMeter());
+ }
+
+ @Test(timeout = 200000)
+ public void getAllQueueStatsTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ setupStatisticsManager();
+
+ addFlowCapableNodeWithFeatures(s1Key, false, FlowFeatureCapabilityQueueStats.class);
+
+ final NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder();
+ final FlowCapableNodeConnectorBuilder fcncBuilder = new FlowCapableNodeConnectorBuilder();
+ ncBuilder.setKey(new NodeConnectorKey(getNodeConnectorId()));
+ ncBuilder.addAugmentation(FlowCapableNodeConnector.class, fcncBuilder.build());
+
+ final InstanceIdentifier<NodeConnector> nodeConnectorII = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key)
+ .child(NodeConnector.class, ncBuilder.getKey());
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, nodeConnectorII, ncBuilder.build());
+ final InstanceIdentifier<Queue> queueII = nodeConnectorII.augmentation(FlowCapableNodeConnector.class)
+ .child(Queue.class, getQueue().getKey());
+ final QueueBuilder qBuilder = new QueueBuilder(getQueue());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, queueII, qBuilder.build());
+ assertCommit(writeTx.submit());
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ queueII.augmentation(FlowCapableNodeConnectorQueueStatisticsData.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<Queue> queueOptional = readTx.read(LogicalDatastoreType.OPERATIONAL, queueII).checkedGet();
+ assertTrue(queueOptional.isPresent());
+ final FlowCapableNodeConnectorQueueStatisticsData queueStats =
+ queueOptional.get().getAugmentation(FlowCapableNodeConnectorQueueStatisticsData.class);
+ assertTrue(queueStats != null);
+ assertEquals(COUNTER_64_TEST_VALUE,
+ queueStats.getFlowCapableNodeConnectorQueueStatistics().getTransmittedBytes());
+ }
+
+ @Test(timeout = 200000)
+ public void getAllPortStatsTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ setupStatisticsManager();
+
+ addFlowCapableNodeWithFeatures(s1Key, false, FlowFeatureCapabilityPortStats.class);
+
+ final InstanceIdentifier<NodeConnector> nodeConnectorII = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).child(NodeConnector.class, new NodeConnectorKey(getNodeConnectorId()));
+
+ NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder();
+ ncBuilder.setKey(new NodeConnectorKey(getNodeConnectorId()));
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, nodeConnectorII, ncBuilder.build());
+ assertCommit(writeTx.submit());
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ nodeConnectorII.augmentation(FlowCapableNodeConnectorStatisticsData.class),
+ new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<FlowCapableNodeConnectorStatisticsData> flowCapableNodeConnectorStatisticsDataOptional =
+ readTx.read(LogicalDatastoreType.OPERATIONAL,
+ nodeConnectorII.augmentation(FlowCapableNodeConnectorStatisticsData.class)).checkedGet();
+ assertTrue(flowCapableNodeConnectorStatisticsDataOptional.isPresent());
+ assertEquals(BIG_INTEGER_TEST_VALUE,
+ flowCapableNodeConnectorStatisticsDataOptional.get().getFlowCapableNodeConnectorStatistics()
+ .getReceiveDrops());
+ assertEquals(BIG_INTEGER_TEST_VALUE,
+ flowCapableNodeConnectorStatisticsDataOptional.get().getFlowCapableNodeConnectorStatistics()
+ .getCollisionCount());
+ }
+
+ @Test(timeout = 200000)
+ public void getAllTableStatsTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ setupStatisticsManager();
+
+ addFlowCapableNodeWithFeatures(s1Key, false, FlowFeatureCapabilityTableStats.class);
+
+ final TableId tableId = getTableId();
+ final InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(tableId.getValue()));
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ tableII.augmentation(FlowTableStatisticsData.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<FlowTableStatisticsData> flowTableStatisticsDataOptional = readTx.read(
+ LogicalDatastoreType.OPERATIONAL, tableII.augmentation(FlowTableStatisticsData.class)).checkedGet();
+ assertTrue(flowTableStatisticsDataOptional.isPresent());
+ assertEquals(COUNTER_32_TEST_VALUE,
+ flowTableStatisticsDataOptional.get().getFlowTableStatistics().getActiveFlows());
+ assertEquals(COUNTER_64_TEST_VALUE,
+ flowTableStatisticsDataOptional.get().getFlowTableStatistics().getPacketsLookedUp());
+ }
+
+ public class ChangeListener implements DataChangeListener {
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
+ synchronized (waitObject) {
+ waitObject.notify();
+ }
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+public class AbstractDataBrokerTest extends AbstractSchemaAwareTest {
+
+ private DataBrokerTestCustomizer testCustomizer;
+ private DataBroker dataBroker;
+ private DOMDataBroker domBroker;
+
+
+ @Override
+ protected void setupWithSchema(final SchemaContext context) {
+ testCustomizer = createDataBrokerTestCustomizer();
+ dataBroker = testCustomizer.createDataBroker();
+ domBroker = testCustomizer.createDOMDataBroker();
+ testCustomizer.updateSchema(context);
+ setupWithDataBroker(dataBroker);
+ }
+
+ protected void setupWithDataBroker(final DataBroker dataBroker) {
+ // Intentionally left No-op, subclasses may customize it
+ }
+
+ protected DataBrokerTestCustomizer createDataBrokerTestCustomizer() {
+ return new DataBrokerTestCustomizer();
+ }
+
+ public DataBroker getDataBroker() {
+ return dataBroker;
+ }
+
+ public DOMDataBroker getDomBroker() {
+ return domBroker;
+ }
+
+ protected static final void assertCommit(final ListenableFuture<Void> commit) {
+ try {
+ commit.get(500, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException | ExecutionException | TimeoutException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import org.junit.Before;
+import org.opendaylight.yangtools.sal.binding.generator.impl.ModuleInfoBackedContext;
+import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
+import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public abstract class AbstractSchemaAwareTest {
+
+ private Iterable<YangModuleInfo> moduleInfos;
+ private SchemaContext schemaContext;
+
+
+ protected Iterable<YangModuleInfo> getModuleInfos() {
+ return BindingReflections.loadModuleInfos();
+ }
+
+
+ @Before
+ public final void setup() {
+ moduleInfos = getModuleInfos();
+ ModuleInfoBackedContext moduleContext = ModuleInfoBackedContext.create();
+ moduleContext.addModuleInfos(moduleInfos);
+ schemaContext = moduleContext.tryToCreateSchemaContext().get();
+ setupWithSchema(schemaContext);
+ }
+
+ /**
+ * Setups test with Schema context.
+ * This method is called before {@link #setupWithSchemaService(SchemaService)}
+ *
+ * @param context
+ */
+ protected abstract void setupWithSchema(SchemaContext context);
+
+}
--- /dev/null
+package test.mock.util;
+
+import org.osgi.framework.Bundle;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.BundleException;
+import org.osgi.framework.BundleListener;
+import org.osgi.framework.Filter;
+import org.osgi.framework.FrameworkListener;
+import org.osgi.framework.InvalidSyntaxException;
+import org.osgi.framework.ServiceListener;
+import org.osgi.framework.ServiceReference;
+import org.osgi.framework.ServiceRegistration;
+
+import java.io.File;
+import java.io.InputStream;
+import java.util.Collection;
+import java.util.Dictionary;
+
+public class BundleContextMock implements BundleContext {
+ @Override
+ public String getProperty(String s) {
+ return null;
+ }
+
+ @Override
+ public Bundle getBundle() {
+ return null;
+ }
+
+ @Override
+ public Bundle installBundle(String s, InputStream inputStream) throws BundleException {
+ return null;
+ }
+
+ @Override
+ public Bundle installBundle(String s) throws BundleException {
+ return null;
+ }
+
+ @Override
+ public Bundle getBundle(long l) {
+ return null;
+ }
+
+ @Override
+ public Bundle[] getBundles() {
+ return new Bundle[0];
+ }
+
+ @Override
+ public void addServiceListener(ServiceListener serviceListener, String s) throws InvalidSyntaxException {
+
+ }
+
+ @Override
+ public void addServiceListener(ServiceListener serviceListener) {
+
+ }
+
+ @Override
+ public void removeServiceListener(ServiceListener serviceListener) {
+
+ }
+
+ @Override
+ public void addBundleListener(BundleListener bundleListener) {
+
+ }
+
+ @Override
+ public void removeBundleListener(BundleListener bundleListener) {
+
+ }
+
+ @Override
+ public void addFrameworkListener(FrameworkListener frameworkListener) {
+
+ }
+
+ @Override
+ public void removeFrameworkListener(FrameworkListener frameworkListener) {
+
+ }
+
+ @Override
+ public ServiceRegistration<?> registerService(String[] strings, Object o, Dictionary<String, ?> stringDictionary) {
+ return null;
+ }
+
+ @Override
+ public ServiceRegistration<?> registerService(String s, Object o, Dictionary<String, ?> stringDictionary) {
+ return null;
+ }
+
+ @Override
+ public <S> ServiceRegistration<S> registerService(Class<S> sClass, S s, Dictionary<String, ?> stringDictionary) {
+ return null;
+ }
+
+ @Override
+ public ServiceReference<?>[] getServiceReferences(String s, String s2) throws InvalidSyntaxException {
+ return new ServiceReference<?>[0];
+ }
+
+ @Override
+ public ServiceReference<?>[] getAllServiceReferences(String s, String s2) throws InvalidSyntaxException {
+ return new ServiceReference<?>[0];
+ }
+
+ @Override
+ public ServiceReference<?> getServiceReference(String s) {
+ return null;
+ }
+
+ @Override
+ public <S> ServiceReference<S> getServiceReference(Class<S> sClass) {
+ return null;
+ }
+
+ @Override
+ public <S> Collection<ServiceReference<S>> getServiceReferences(Class<S> sClass, String s) throws InvalidSyntaxException {
+ return null;
+ }
+
+ @Override
+ public <S> S getService(ServiceReference<S> sServiceReference) {
+ return null;
+ }
+
+ @Override
+ public boolean ungetService(ServiceReference<?> serviceReference) {
+ return false;
+ }
+
+ @Override
+ public File getDataFile(String s) {
+ return null;
+ }
+
+ @Override
+ public Filter createFilter(String s) throws InvalidSyntaxException {
+ return null;
+ }
+
+ @Override
+ public Bundle getBundle(String s) {
+ return null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import javassist.ClassPool;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
+import org.opendaylight.controller.md.sal.binding.impl.ForwardedBackwardsCompatibleDataBroker;
+import org.opendaylight.controller.md.sal.binding.impl.ForwardedBindingDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.yangtools.binding.data.codec.gen.impl.DataObjectSerializerGenerator;
+import org.opendaylight.yangtools.binding.data.codec.gen.impl.StreamWriterGenerator;
+import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
+import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
+import org.opendaylight.yangtools.sal.binding.generator.impl.RuntimeGeneratedMappingServiceImpl;
+import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class DataBrokerTestCustomizer {
+
+ private DOMDataBroker domDataBroker;
+ private final RuntimeGeneratedMappingServiceImpl mappingService;
+ private final MockSchemaService schemaService;
+ private ImmutableMap<LogicalDatastoreType, DOMStore> datastores;
+ private final BindingToNormalizedNodeCodec bindingToNormalized ;
+
+ public ImmutableMap<LogicalDatastoreType, DOMStore> createDatastores() {
+ return ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
+ .put(LogicalDatastoreType.OPERATIONAL, createOperationalDatastore())
+ .put(LogicalDatastoreType.CONFIGURATION,createConfigurationDatastore())
+ .build();
+ }
+
+ public DataBrokerTestCustomizer() {
+ schemaService = new MockSchemaService();
+ ClassPool pool = ClassPool.getDefault();
+ mappingService = new RuntimeGeneratedMappingServiceImpl(pool);
+ DataObjectSerializerGenerator generator = StreamWriterGenerator.create(JavassistUtils.forClassPool(pool));
+ BindingNormalizedNodeCodecRegistry codecRegistry = new BindingNormalizedNodeCodecRegistry(generator);
+ GeneratedClassLoadingStrategy loading = GeneratedClassLoadingStrategy.getTCCLClassLoadingStrategy();
+ bindingToNormalized = new BindingToNormalizedNodeCodec(loading, mappingService, codecRegistry);
+ schemaService.registerSchemaContextListener(bindingToNormalized);
+ }
+
+ public DOMStore createConfigurationDatastore() {
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+ schemaService.registerSchemaContextListener(store);
+ return store;
+ }
+
+ public DOMStore createOperationalDatastore() {
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
+ schemaService.registerSchemaContextListener(store);
+ return store;
+ }
+
+ public DOMDataBroker createDOMDataBroker() {
+ return new SerializedDOMDataBroker(getDatastores(), getCommitCoordinatorExecutor());
+ }
+
+ public ListeningExecutorService getCommitCoordinatorExecutor() {
+ return MoreExecutors.sameThreadExecutor();
+ }
+
+ public DataBroker createDataBroker() {
+ return new ForwardedBindingDataBroker(getDOMDataBroker(), bindingToNormalized, schemaService );
+ }
+
+ public ForwardedBackwardsCompatibleDataBroker createBackwardsCompatibleDataBroker() {
+ return new ForwardedBackwardsCompatibleDataBroker(getDOMDataBroker(), bindingToNormalized, getSchemaService(), MoreExecutors.sameThreadExecutor());
+ }
+
+ private SchemaService getSchemaService() {
+ return schemaService;
+ }
+
+ private DOMDataBroker getDOMDataBroker() {
+ if(domDataBroker == null) {
+ domDataBroker = createDOMDataBroker();
+ }
+ return domDataBroker;
+ }
+
+ private synchronized ImmutableMap<LogicalDatastoreType, DOMStore> getDatastores() {
+ if (datastores == null) {
+ datastores = createDatastores();
+ }
+ return datastores;
+ }
+
+ public void updateSchema(final SchemaContext ctx) {
+ schemaService.changeSchema(ctx);
+ mappingService.onGlobalContextUpdated(ctx);
+ }
+
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowCookie;
+
+import java.math.BigInteger;
+import java.util.Random;
+
+public class FlowMockGenerator {
+ private static final Random rnd = new Random();
+ private static final FlowBuilder flowBuilder = new FlowBuilder();
+
+ public static Flow getRandomFlow() {
+ flowBuilder.setKey(new FlowKey(new FlowId("flow." + rnd.nextInt(1000))));
+ flowBuilder.setOutGroup(TestUtils.nextLong(0, 4294967296L));
+ flowBuilder.setTableId((short) rnd.nextInt(256));
+ flowBuilder.setOutPort(BigInteger.valueOf(TestUtils.nextLong(0, Long.MAX_VALUE)));
+ flowBuilder.setStrict(rnd.nextBoolean());
+ flowBuilder.setContainerName("container." + rnd.nextInt(1000));
+ flowBuilder.setBarrier(rnd.nextBoolean());
+ flowBuilder.setMatch(MatchMockGenerator.getRandomMatch());
+ flowBuilder.setPriority(rnd.nextInt(65535));
+ flowBuilder.setCookie(new FlowCookie(BigInteger.valueOf(TestUtils.nextLong(0, Long.MAX_VALUE))));
+ flowBuilder.setCookieMask(flowBuilder.getCookie());
+ return flowBuilder.build();
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupTypes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
+
+import java.util.Random;
+
+public class GroupMockGenerator {
+ private static final Random rnd = new Random();
+ private static final GroupBuilder groupBuilder = new GroupBuilder();
+
+ public static Group getRandomGroup() {
+ groupBuilder.setKey(new GroupKey(new GroupId(TestUtils.nextLong(0, 4294967295L))));
+ groupBuilder.setContainerName("container." + rnd.nextInt(1000));
+ groupBuilder.setBarrier(rnd.nextBoolean());
+ groupBuilder.setGroupName("group." + rnd.nextInt(1000));
+ groupBuilder.setGroupType(GroupTypes.forValue(rnd.nextInt(4)));
+ return groupBuilder.build();
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Dscp;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.IpMatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.MetadataBuilder;
+
+import java.math.BigInteger;
+import java.util.Random;
+
+public class MatchMockGenerator {
+ private static final Random rnd = new Random();
+ private static final MatchBuilder matchBuilder = new MatchBuilder();
+ private static final IpMatchBuilder ipMatchBuilder = new IpMatchBuilder();
+ private static final MetadataBuilder metadataBuilder = new MetadataBuilder();
+
+ public static Match getRandomMatch() {
+ matchBuilder.setInPort(new NodeConnectorId("port." + rnd.nextInt(500)));
+ ipMatchBuilder.setIpDscp(new Dscp((short) rnd.nextInt(64))).build();
+ ipMatchBuilder.setIpEcn((short) rnd.nextInt(256));
+ ipMatchBuilder.setIpProtocol((short) rnd.nextInt(256));
+ matchBuilder.setIpMatch(ipMatchBuilder.build());
+ metadataBuilder.setMetadata(BigInteger.valueOf(TestUtils.nextLong(0, Long.MAX_VALUE)));
+ metadataBuilder.setMetadataMask(BigInteger.valueOf(TestUtils.nextLong(0, Long.MAX_VALUE)));
+ matchBuilder.setMetadata(metadataBuilder.build());
+ return matchBuilder.build();
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.BandId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.MeterBandHeadersBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.meter.band.headers.MeterBandHeader;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.meter.band.headers.MeterBandHeaderBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.meter.band.headers.MeterBandHeaderKey;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+public class MeterMockGenerator {
+ private static final Random rnd = new Random();
+ private static final MeterBuilder meterBuilder = new MeterBuilder();
+ private static final MeterBandHeaderBuilder meterBandHeaderBuilder = new MeterBandHeaderBuilder();
+ private static final MeterBandHeadersBuilder meterBandHeadersBuilder = new MeterBandHeadersBuilder();
+
+ public static Meter getRandomMeter() {
+ meterBandHeaderBuilder.setKey(new MeterBandHeaderKey(new BandId(TestUtils.nextLong(0, 4294967295L))));
+ meterBandHeaderBuilder.setBandBurstSize(TestUtils.nextLong(0, 4294967295L));
+ meterBandHeaderBuilder.setBandRate(TestUtils.nextLong(0, 4294967295L));
+ List<MeterBandHeader> meterBandHeaders = new ArrayList<>();
+ meterBuilder.setKey(new MeterKey(new MeterId(TestUtils.nextLong(0, 4294967295L))));
+ meterBuilder.setBarrier(rnd.nextBoolean());
+ meterBuilder.setContainerName("container." + rnd.nextInt(1000));
+ meterBuilder.setMeterName("meter." + rnd.nextInt(1000));
+ meterBuilder.setMeterBandHeaders(meterBandHeadersBuilder.setMeterBandHeader(meterBandHeaders).build());
+ return meterBuilder.build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.util.ListenerRegistry;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+
+public final class MockSchemaService implements SchemaService, SchemaContextProvider {
+
+ private SchemaContext schemaContext;
+
+ ListenerRegistry<SchemaContextListener> listeners = ListenerRegistry.create();
+
+ @Override
+ public void addModule(final Module module) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized SchemaContext getGlobalContext() {
+ return schemaContext;
+ }
+
+ @Override
+ public synchronized SchemaContext getSessionContext() {
+ return schemaContext;
+ }
+
+ @Override
+ public ListenerRegistration<SchemaContextListener> registerSchemaContextListener(
+ final SchemaContextListener listener) {
+ return listeners.register(listener);
+ }
+
+ @Override
+ public void removeModule(final Module module) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized SchemaContext getSchemaContext() {
+ return schemaContext;
+ }
+
+ public synchronized void changeSchema(final SchemaContext newContext) {
+ schemaContext = newContext;
+ for (ListenerRegistration<SchemaContextListener> listener : listeners) {
+ listener.getInstance().onGlobalContextUpdated(schemaContext);
+ }
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
+import org.opendaylight.controller.sal.binding.impl.NotificationBrokerImpl;
+import org.opendaylight.yangtools.yang.binding.Notification;
+
+import java.util.Timer;
+import java.util.TimerTask;
+
+public class NotificationProviderServiceHelper {
+ private NotificationBrokerImpl notifBroker = new NotificationBrokerImpl(SingletonHolder.getDefaultNotificationExecutor());
+
+ public NotificationBrokerImpl getNotifBroker() {
+ return notifBroker;
+ }
+
+ public void pushDelayedNotification(final Notification notification, int delay) {
+ new Timer().schedule(new TimerTask() {
+ @Override
+ public void run() {
+ notifBroker.publish(notification);
+ }
+ }, delay);
+ }
+
+ public void pushNotification(final Notification notification) {
+ notifBroker.publish(notification);
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsUpdateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowsStatisticsUpdateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForGivenMatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForGivenMatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForGivenMatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+public class OpendaylightFlowStatisticsServiceMock implements OpendaylightFlowStatisticsService {
+ NotificationProviderServiceHelper notifService;
+
+ public OpendaylightFlowStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutput>> getAggregateFlowStatisticsFromFlowTableForAllFlows(GetAggregateFlowStatisticsFromFlowTableForAllFlowsInput input) {
+ GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutputBuilder builder = new GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetAggregateFlowStatisticsFromFlowTableForGivenMatchOutput>> getAggregateFlowStatisticsFromFlowTableForGivenMatch(GetAggregateFlowStatisticsFromFlowTableForGivenMatchInput input) {
+ GetAggregateFlowStatisticsFromFlowTableForGivenMatchOutputBuilder builder = new GetAggregateFlowStatisticsFromFlowTableForGivenMatchOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ AggregateFlowStatisticsUpdateBuilder afsuBuilder = new AggregateFlowStatisticsUpdateBuilder();
+ afsuBuilder.setMoreReplies(false);
+ afsuBuilder.setTransactionId(transId);
+ afsuBuilder.setByteCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ afsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ notifService.pushDelayedNotification(afsuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetAllFlowStatisticsFromFlowTableOutput>> getAllFlowStatisticsFromFlowTable(GetAllFlowStatisticsFromFlowTableInput input) {
+ GetAllFlowStatisticsFromFlowTableOutputBuilder builder = new GetAllFlowStatisticsFromFlowTableOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetAllFlowsStatisticsFromAllFlowTablesOutput>> getAllFlowsStatisticsFromAllFlowTables(GetAllFlowsStatisticsFromAllFlowTablesInput input) {
+ GetAllFlowsStatisticsFromAllFlowTablesOutputBuilder builder = new GetAllFlowsStatisticsFromAllFlowTablesOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ List<FlowAndStatisticsMapList> flowAndStatisticsMapLists = new ArrayList<>();
+ FlowsStatisticsUpdateBuilder flowsStatisticsUpdateBuilder = new FlowsStatisticsUpdateBuilder();
+ flowsStatisticsUpdateBuilder.setTransactionId(transId);
+ flowsStatisticsUpdateBuilder.setMoreReplies(false);
+ flowsStatisticsUpdateBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ FlowAndStatisticsMapListBuilder flowAndStatisticsMapListBuilder = new FlowAndStatisticsMapListBuilder(StatisticsManagerTest.getFlow());
+ flowAndStatisticsMapListBuilder.setTableId(StatisticsManagerTest.getFlow().getTableId());
+ flowAndStatisticsMapListBuilder.setContainerName(StatisticsManagerTest.getFlow().getContainerName());
+ flowAndStatisticsMapListBuilder.setBarrier(StatisticsManagerTest.getFlow().isBarrier());
+ flowAndStatisticsMapListBuilder.setByteCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ flowAndStatisticsMapLists.add(flowAndStatisticsMapListBuilder.build());
+ flowsStatisticsUpdateBuilder.setFlowAndStatisticsMapList(flowAndStatisticsMapLists);
+ notifService.pushDelayedNotification(flowsStatisticsUpdateBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetFlowStatisticsFromFlowTableOutput>> getFlowStatisticsFromFlowTable(GetFlowStatisticsFromFlowTableInput input) {
+ GetFlowStatisticsFromFlowTableOutputBuilder builder = new GetFlowStatisticsFromFlowTableOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ List<FlowAndStatisticsMapList> flowAndStatisticsMapLists = new ArrayList<>();
+ FlowsStatisticsUpdateBuilder flowsStatisticsUpdateBuilder = new FlowsStatisticsUpdateBuilder();
+ flowsStatisticsUpdateBuilder.setTransactionId(transId);
+ flowsStatisticsUpdateBuilder.setMoreReplies(false);
+ flowsStatisticsUpdateBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ FlowAndStatisticsMapListBuilder flowAndStatisticsMapListBuilder = new FlowAndStatisticsMapListBuilder(input);
+ flowAndStatisticsMapListBuilder.setTableId(input.getTableId());
+ flowAndStatisticsMapListBuilder.setContainerName(input.getContainerName());
+ flowAndStatisticsMapListBuilder.setBarrier(input.isBarrier());
+ flowAndStatisticsMapListBuilder.setByteCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ flowAndStatisticsMapLists.add(flowAndStatisticsMapListBuilder.build());
+ flowsStatisticsUpdateBuilder.setFlowAndStatisticsMapList(flowAndStatisticsMapLists);
+ notifService.pushDelayedNotification(flowsStatisticsUpdateBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsUpdateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.GetFlowTablesStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.GetFlowTablesStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.GetFlowTablesStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMapBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMapKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+public class OpendaylightFlowTableStatisticsServiceMock implements OpendaylightFlowTableStatisticsService {
+ NotificationProviderServiceHelper notifService;
+
+ public OpendaylightFlowTableStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetFlowTablesStatisticsOutput>> getFlowTablesStatistics(GetFlowTablesStatisticsInput input) {
+ GetFlowTablesStatisticsOutputBuilder builder = new GetFlowTablesStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ FlowTableStatisticsUpdateBuilder ftsBuilder = new FlowTableStatisticsUpdateBuilder();
+ FlowTableAndStatisticsMapBuilder ftasmBuilder = new FlowTableAndStatisticsMapBuilder();
+ List<FlowTableAndStatisticsMap> tableAndStatisticsMaps = new ArrayList<>();
+ ftasmBuilder.setKey(new FlowTableAndStatisticsMapKey(StatisticsManagerTest.getTableId()));
+ ftasmBuilder.setActiveFlows(StatisticsManagerTest.COUNTER_32_TEST_VALUE);
+ tableAndStatisticsMaps.add(ftasmBuilder.build());
+ ftsBuilder.setTransactionId(transId);
+ ftsBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ ftsBuilder.setFlowTableAndStatisticsMap(tableAndStatisticsMaps);
+ ftsBuilder.setMoreReplies(true);
+ notifService.pushDelayedNotification(ftsBuilder.build(), 0); // 1st notification
+ ftsBuilder.setMoreReplies(false);
+ ftasmBuilder.setPacketsLookedUp(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ tableAndStatisticsMaps.clear();
+ tableAndStatisticsMaps.add(ftasmBuilder.build());
+ ftsBuilder.setFlowTableAndStatisticsMap(tableAndStatisticsMaps);
+ notifService.pushDelayedNotification(ftsBuilder.build(), 0); // 2nd notification
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetAllGroupStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetAllGroupStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetAllGroupStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupDescriptionInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupDescriptionOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupDescriptionOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupFeaturesInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupFeaturesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupFeaturesOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupDescStatsUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupFeaturesUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupStatisticsUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStatsKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStatsKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+public class OpendaylightGroupStatisticsServiceMock implements OpendaylightGroupStatisticsService {
+ NotificationProviderServiceHelper notifService;
+
+ public OpendaylightGroupStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetAllGroupStatisticsOutput>> getAllGroupStatistics(GetAllGroupStatisticsInput input) {
+ GetAllGroupStatisticsOutputBuilder builder = new GetAllGroupStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ List<GroupStats> groupStats = new ArrayList<>();
+ GroupStatsBuilder gsBuilder = new GroupStatsBuilder();
+ GroupStatisticsUpdatedBuilder gsuBuilder = new GroupStatisticsUpdatedBuilder();
+ gsBuilder.setKey(new GroupStatsKey(StatisticsManagerTest.getGroup().getGroupId()));
+ gsBuilder.setByteCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ groupStats.add(gsBuilder.build());
+ builder.setGroupStats(groupStats);
+ gsuBuilder.setTransactionId(transId);
+ gsuBuilder.setMoreReplies(false);
+ gsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ gsuBuilder.setGroupStats(groupStats);
+ notifService.pushDelayedNotification(gsuBuilder.build(), 500);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetGroupDescriptionOutput>> getGroupDescription(GetGroupDescriptionInput input) {
+ GetGroupDescriptionOutputBuilder builder = new GetGroupDescriptionOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ List<GroupDescStats> groupDescStats = new ArrayList<>();
+ GroupDescStatsUpdatedBuilder gdsuBuilder = new GroupDescStatsUpdatedBuilder();
+ GroupDescStatsBuilder gdsBuilder = new GroupDescStatsBuilder();
+ gdsBuilder.setKey(new GroupDescStatsKey(StatisticsManagerTest.getGroup().getGroupId()));
+ gdsBuilder.setBuckets(StatisticsManagerTest.getGroup().getBuckets());
+ gdsBuilder.setContainerName(StatisticsManagerTest.getGroup().getContainerName());
+ gdsBuilder.setGroupName(StatisticsManagerTest.getGroup().getGroupName());
+ gdsBuilder.setGroupType(StatisticsManagerTest.getGroup().getGroupType());
+ groupDescStats.add(gdsBuilder.build());
+ builder.setGroupDescStats(groupDescStats);
+ gdsuBuilder.setTransactionId(transId);
+ gdsuBuilder.setMoreReplies(false);
+ gdsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ gdsuBuilder.setGroupDescStats(groupDescStats);
+ notifService.pushDelayedNotification(gdsuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetGroupFeaturesOutput>> getGroupFeatures(GetGroupFeaturesInput input) {
+ GetGroupFeaturesOutputBuilder builder = new GetGroupFeaturesOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ GroupFeaturesUpdatedBuilder gfuBuilder = new GroupFeaturesUpdatedBuilder();
+ gfuBuilder.setTransactionId(transId);
+ gfuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ gfuBuilder.setMoreReplies(false);
+ List<Long> maxGroups = new ArrayList<>();
+ maxGroups.add(StatisticsManagerTest.MAX_GROUPS_TEST_VALUE);
+ gfuBuilder.setMaxGroups(maxGroups);
+ notifService.pushDelayedNotification(gfuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetGroupStatisticsOutput>> getGroupStatistics(GetGroupStatisticsInput input) {
+ GetGroupStatisticsOutputBuilder builder = new GetGroupStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ GroupStatsBuilder gsBuilder = new GroupStatsBuilder();
+ List<GroupStats> groupStats = new ArrayList<>();
+ gsBuilder.setKey(new GroupStatsKey(input.getGroupId()));
+ gsBuilder.setByteCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ groupStats.add(gsBuilder.build());
+ GroupStatisticsUpdatedBuilder gsuBuilder = new GroupStatisticsUpdatedBuilder();
+ gsuBuilder.setTransactionId(transId);
+ gsuBuilder.setMoreReplies(false);
+ gsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ gsuBuilder.setGroupStats(groupStats);
+ notifService.pushDelayedNotification(gsuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterConfigStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterConfigStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterConfigStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterFeaturesInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterFeaturesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterFeaturesOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterConfigStatsUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterFeaturesUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterStatisticsUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.config.stats.reply.MeterConfigStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.config.stats.reply.MeterConfigStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStatsKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+public class OpendaylightMeterStatisticsServiceMock implements OpendaylightMeterStatisticsService {
+ NotificationProviderServiceHelper notifService;
+
+ public OpendaylightMeterStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetAllMeterConfigStatisticsOutput>> getAllMeterConfigStatistics(GetAllMeterConfigStatisticsInput input) {
+ GetAllMeterConfigStatisticsOutputBuilder builder = new GetAllMeterConfigStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ List<MeterConfigStats> meterConfigStats = new ArrayList<>();
+ MeterConfigStatsBuilder mcsBuilder = new MeterConfigStatsBuilder();
+ mcsBuilder.setMeterId(StatisticsManagerTest.getMeter().getMeterId());
+ mcsBuilder.setMeterName(StatisticsManagerTest.getMeter().getMeterName());
+ mcsBuilder.setContainerName(StatisticsManagerTest.getMeter().getContainerName());
+ meterConfigStats.add(mcsBuilder.build());
+ builder.setMeterConfigStats(meterConfigStats);
+ MeterConfigStatsUpdatedBuilder mscuBuilder = new MeterConfigStatsUpdatedBuilder();
+ mscuBuilder.setTransactionId(transId);
+ mscuBuilder.setMoreReplies(false);
+ mscuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ mscuBuilder.setMeterConfigStats(meterConfigStats);
+ notifService.pushDelayedNotification(mscuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetAllMeterStatisticsOutput>> getAllMeterStatistics(GetAllMeterStatisticsInput input) {
+ GetAllMeterStatisticsOutputBuilder builder = new GetAllMeterStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ MeterStatsBuilder msBuilder = new MeterStatsBuilder();
+ msBuilder.setByteInCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ msBuilder.setPacketInCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ msBuilder.setKey(new MeterStatsKey(StatisticsManagerTest.getMeter().getMeterId()));
+ List<MeterStats> meterStats = new ArrayList<>();
+ meterStats.add(msBuilder.build());
+ MeterStatisticsUpdatedBuilder msuBuilder = new MeterStatisticsUpdatedBuilder();
+ msuBuilder.setTransactionId(transId);
+ msuBuilder.setMoreReplies(false);
+ msuBuilder.setMeterStats(meterStats);
+ msuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ notifService.pushDelayedNotification(msuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetMeterFeaturesOutput>> getMeterFeatures(GetMeterFeaturesInput input) {
+ GetMeterFeaturesOutputBuilder builder = new GetMeterFeaturesOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ MeterFeaturesUpdatedBuilder mfuBuilder = new MeterFeaturesUpdatedBuilder();
+ mfuBuilder.setTransactionId(transId);
+ mfuBuilder.setMoreReplies(false);
+ mfuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ mfuBuilder.setMaxMeter(StatisticsManagerTest.COUNTER_32_TEST_VALUE);
+ notifService.pushDelayedNotification(mfuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetMeterStatisticsOutput>> getMeterStatistics(GetMeterStatisticsInput input) {
+ GetMeterStatisticsOutputBuilder builder = new GetMeterStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ MeterStatsBuilder msBuilder = new MeterStatsBuilder();
+ msBuilder.setKey(new MeterStatsKey(input.getMeterId()));
+ msBuilder.setByteInCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ msBuilder.setPacketInCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ List<MeterStats> meterStats = new ArrayList<>();
+ meterStats.add(msBuilder.build());
+ MeterStatisticsUpdatedBuilder msuBuilder = new MeterStatisticsUpdatedBuilder();
+ msuBuilder.setTransactionId(transId);
+ msuBuilder.setMoreReplies(false);
+ msuBuilder.setMeterStats(meterStats);
+ msuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ notifService.pushDelayedNotification(msuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetAllNodeConnectorsStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetAllNodeConnectorsStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetAllNodeConnectorsStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetNodeConnectorStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetNodeConnectorStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetNodeConnectorStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.NodeConnectorStatisticsUpdateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMapBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMapKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+public class OpendaylightPortStatisticsServiceMock implements OpendaylightPortStatisticsService {
+ NotificationProviderServiceHelper notifService;
+
+ public OpendaylightPortStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetAllNodeConnectorsStatisticsOutput>> getAllNodeConnectorsStatistics(GetAllNodeConnectorsStatisticsInput input) {
+ GetAllNodeConnectorsStatisticsOutputBuilder builder = new GetAllNodeConnectorsStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ NodeConnectorStatisticsUpdateBuilder ncsuBuilder = new NodeConnectorStatisticsUpdateBuilder();
+ NodeConnectorStatisticsAndPortNumberMapBuilder ncsapnmBuilder = new NodeConnectorStatisticsAndPortNumberMapBuilder();
+ List<NodeConnectorStatisticsAndPortNumberMap> nodeConnectorStatisticsAndPortNumberMaps = new ArrayList<>();
+ ncsapnmBuilder.setKey(new NodeConnectorStatisticsAndPortNumberMapKey(StatisticsManagerTest.getNodeConnectorId()));
+ ncsapnmBuilder.setReceiveDrops(StatisticsManagerTest.BIG_INTEGER_TEST_VALUE);
+ nodeConnectorStatisticsAndPortNumberMaps.add(ncsapnmBuilder.build());
+ ncsuBuilder.setTransactionId(transId);
+ ncsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ ncsuBuilder.setNodeConnectorStatisticsAndPortNumberMap(nodeConnectorStatisticsAndPortNumberMaps);
+ ncsuBuilder.setMoreReplies(true);
+ notifService.pushDelayedNotification(ncsuBuilder.build(), 0); // 1st notification
+ ncsuBuilder.setMoreReplies(false);
+ ncsapnmBuilder.setCollisionCount(StatisticsManagerTest.BIG_INTEGER_TEST_VALUE);
+ nodeConnectorStatisticsAndPortNumberMaps.clear();
+ nodeConnectorStatisticsAndPortNumberMaps.add(ncsapnmBuilder.build());
+ ncsuBuilder.setNodeConnectorStatisticsAndPortNumberMap(nodeConnectorStatisticsAndPortNumberMaps);
+ notifService.pushDelayedNotification(ncsuBuilder.build(), 10); // 2nd notification
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetNodeConnectorStatisticsOutput>> getNodeConnectorStatistics(GetNodeConnectorStatisticsInput input) {
+ GetNodeConnectorStatisticsOutputBuilder builder = new GetNodeConnectorStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromAllPortsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromAllPortsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromAllPortsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromGivenPortInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromGivenPortOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromGivenPortOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetQueueStatisticsFromGivenPortInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetQueueStatisticsFromGivenPortOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetQueueStatisticsFromGivenPortOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.QueueStatisticsUpdateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.queue.id.and.statistics.map.QueueIdAndStatisticsMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.queue.id.and.statistics.map.QueueIdAndStatisticsMapBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.queue.id.and.statistics.map.QueueIdAndStatisticsMapKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class OpendaylightQueueStatisticsServiceMock implements OpendaylightQueueStatisticsService {
+ NotificationProviderServiceHelper notifService;
+ AtomicLong transNum = new AtomicLong();
+
+ public OpendaylightQueueStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetAllQueuesStatisticsFromAllPortsOutput>> getAllQueuesStatisticsFromAllPorts(GetAllQueuesStatisticsFromAllPortsInput input) {
+ GetAllQueuesStatisticsFromAllPortsOutputBuilder builder = new GetAllQueuesStatisticsFromAllPortsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ QueueStatisticsUpdateBuilder qsuBuilder = new QueueStatisticsUpdateBuilder();
+ QueueIdAndStatisticsMapBuilder qiasmBuilder = new QueueIdAndStatisticsMapBuilder();
+ List<QueueIdAndStatisticsMap> queueIdAndStatisticsMaps = new ArrayList<>();
+ qsuBuilder.setMoreReplies(false);
+ qsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ qsuBuilder.setTransactionId(transId);
+ qiasmBuilder.setTransmittedBytes(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ qiasmBuilder.setKey(new QueueIdAndStatisticsMapKey(StatisticsManagerTest.getNodeConnectorId(), StatisticsManagerTest.getQueue().getQueueId()));
+ queueIdAndStatisticsMaps.add(qiasmBuilder.build());
+ qsuBuilder.setQueueIdAndStatisticsMap(queueIdAndStatisticsMaps);
+ notifService.pushDelayedNotification(qsuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetAllQueuesStatisticsFromGivenPortOutput>> getAllQueuesStatisticsFromGivenPort(GetAllQueuesStatisticsFromGivenPortInput input) {
+ GetAllQueuesStatisticsFromGivenPortOutputBuilder builder = new GetAllQueuesStatisticsFromGivenPortOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetQueueStatisticsFromGivenPortOutput>> getQueueStatisticsFromGivenPort(GetQueueStatisticsFromGivenPortInput input) {
+ GetQueueStatisticsFromGivenPortOutputBuilder builder = new GetQueueStatisticsFromGivenPortOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(TestUtils.getNewTransactionId()));
+ builder.setTransactionId(transId);
+ QueueIdAndStatisticsMapBuilder qiasmBuilder = new QueueIdAndStatisticsMapBuilder();
+ List<QueueIdAndStatisticsMap> queueIdAndStatisticsMaps = new ArrayList<>();
+ qiasmBuilder.setKey(new QueueIdAndStatisticsMapKey(input.getNodeConnectorId(), input.getQueueId()));
+ qiasmBuilder.setTransmittedBytes(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ queueIdAndStatisticsMaps.add(qiasmBuilder.build());
+ QueueStatisticsUpdateBuilder qsuBuilder = new QueueStatisticsUpdateBuilder();
+ qsuBuilder.setMoreReplies(false);
+ qsuBuilder.setTransactionId(transId);
+ qsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ qsuBuilder.setQueueIdAndStatisticsMap(queueIdAndStatisticsMaps);
+ notifService.pushDelayedNotification(qsuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+}
--- /dev/null
+package test.mock.util;
+
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.CommonPort;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortConfig;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.port.mod.port.Port;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.port.mod.port.PortBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.port.mod.port.PortKey;
+
+import java.util.Random;
+
+public class PortMockGenerator {
+ private static final Random rnd = new Random();
+ private static final PortBuilder portBuilder = new PortBuilder();
+
+ public static Port getRandomPort() {
+ portBuilder.setKey(new PortKey(TestUtils.nextLong(0, 4294967295L)));
+ portBuilder.setBarrier(rnd.nextBoolean());
+ portBuilder.setPortNumber(new CommonPort.PortNumber(TestUtils.nextLong(0, 4294967295L)));
+ portBuilder.setConfiguration(new PortConfig(rnd.nextBoolean(), rnd.nextBoolean(), rnd.nextBoolean(), rnd.nextBoolean()));
+ return portBuilder.build();
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.queue.rev130925.QueueId;
+
+import java.util.Random;
+
+public class QueueMockGenerator {
+ private static final Random rnd = new Random();
+ private static final QueueBuilder queueBuilder = new QueueBuilder();
+
+ public static Queue getRandomQueue() {
+ queueBuilder.setKey(new QueueKey(new QueueId(TestUtils.nextLong(0, 4294967295L))));
+ queueBuilder.setPort(TestUtils.nextLong(0, 4294967295L));
+ queueBuilder.setProperty(rnd.nextInt(65535));
+ return queueBuilder.build();
+ }
+
+ public static Queue getRandomQueueWithPortNum(long portNum) {
+ queueBuilder.setKey(new QueueKey(new QueueId(TestUtils.nextLong(0, 4294967295L))));
+ queueBuilder.setPort(portNum);
+ queueBuilder.setProperty(rnd.nextInt(65535));
+ return queueBuilder.build();
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
+import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.RpcService;
+
+public class RpcProviderRegistryMock implements RpcProviderRegistry {
+
+ OpendaylightFlowStatisticsServiceMock flowStatisticsServiceMock;
+ OpendaylightFlowTableStatisticsServiceMock flowTableStatisticsServiceMock;
+ OpendaylightGroupStatisticsServiceMock groupStatisticsServiceMock;
+ OpendaylightMeterStatisticsServiceMock meterStatisticsServiceMock;
+ OpendaylightPortStatisticsServiceMock portStatisticsServiceMock;
+ OpendaylightQueueStatisticsServiceMock queueStatisticsServiceMock;
+
+ public RpcProviderRegistryMock(NotificationProviderServiceHelper notificationProviderService) {
+ this.flowStatisticsServiceMock = new OpendaylightFlowStatisticsServiceMock(notificationProviderService);
+ this.flowTableStatisticsServiceMock = new OpendaylightFlowTableStatisticsServiceMock(notificationProviderService);
+ this.groupStatisticsServiceMock = new OpendaylightGroupStatisticsServiceMock(notificationProviderService);
+ this.meterStatisticsServiceMock = new OpendaylightMeterStatisticsServiceMock(notificationProviderService);
+ this.portStatisticsServiceMock = new OpendaylightPortStatisticsServiceMock(notificationProviderService);
+ this.queueStatisticsServiceMock = new OpendaylightQueueStatisticsServiceMock(notificationProviderService);
+ }
+
+ @Override
+ public <T extends RpcService> BindingAwareBroker.RpcRegistration<T> addRpcImplementation(Class<T> serviceInterface, T implementation) throws IllegalStateException {
+ return null;
+ }
+
+ @Override
+ public <T extends RpcService> BindingAwareBroker.RoutedRpcRegistration<T> addRoutedRpcImplementation(Class<T> serviceInterface, T implementation) throws IllegalStateException {
+ return null;
+ }
+
+ @Override
+ public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(L listener) {
+ return null;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <T extends RpcService> T getRpcService(Class<T> serviceInterface) {
+ if (serviceInterface.equals(OpendaylightFlowStatisticsService.class)) {
+ return (T)flowStatisticsServiceMock;
+ } else if (serviceInterface.equals(OpendaylightFlowTableStatisticsService.class)) {
+ return (T) flowTableStatisticsServiceMock;
+ } else if (serviceInterface.equals(OpendaylightGroupStatisticsService.class)) {
+ return (T) groupStatisticsServiceMock;
+ } else if (serviceInterface.equals(OpendaylightMeterStatisticsService.class)) {
+ return (T) meterStatisticsServiceMock;
+ } else if (serviceInterface.equals(OpendaylightPortStatisticsService.class)) {
+ return (T) portStatisticsServiceMock;
+ } else if (serviceInterface.equals(OpendaylightQueueStatisticsService.class)) {
+ return (T) queueStatisticsServiceMock;
+ } else {
+ return null;
+ }
+ }
+
+ public OpendaylightFlowStatisticsServiceMock getFlowStatisticsServiceMock() {
+ return flowStatisticsServiceMock;
+ }
+
+ public OpendaylightFlowTableStatisticsServiceMock getFlowTableStatisticsServiceMock() {
+ return flowTableStatisticsServiceMock;
+ }
+
+ public OpendaylightGroupStatisticsServiceMock getGroupStatisticsServiceMock() {
+ return groupStatisticsServiceMock;
+ }
+
+ public OpendaylightMeterStatisticsServiceMock getMeterStatisticsServiceMock() {
+ return meterStatisticsServiceMock;
+ }
+
+ public OpendaylightPortStatisticsServiceMock getPortStatisticsServiceMock() {
+ return portStatisticsServiceMock;
+ }
+
+ public OpendaylightQueueStatisticsServiceMock getQueueStatisticsServiceMock() {
+ return queueStatisticsServiceMock;
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.impl.StatisticsManagerConfig;
+import org.opendaylight.controller.md.statistics.manager.impl.StatisticsManagerImpl;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.Counter32;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.Counter64;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FeatureCapability;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.SwitchFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.port.mod.port.Port;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+public abstract class StatisticsManagerTest extends AbstractDataBrokerTest {
+
+ public static final Counter64 COUNTER_64_TEST_VALUE = new Counter64(BigInteger.valueOf(128));
+ public static final Counter32 COUNTER_32_TEST_VALUE = new Counter32(64L);
+ public static final Long MAX_GROUPS_TEST_VALUE = 2000L;
+ public static final BigInteger BIG_INTEGER_TEST_VALUE = BigInteger.valueOf(1000);
+
+ private static final int DEFAULT_MIN_REQUEST_NET_MONITOR_INTERVAL = 5000;
+ private static final int MAX_NODES_FOR_COLLECTOR = 16;
+
+ private static Flow flow;
+ private static Group group;
+ private static Meter meter;
+ private static Port port;
+ private static Queue queue;
+ private static TableId tableId;
+ private static NodeConnectorId nodeConnectorId;
+
+ private final NotificationProviderServiceHelper notificationMock = new NotificationProviderServiceHelper();
+ protected final NodeKey s1Key = new NodeKey(new NodeId("S1"));
+ protected RpcProviderRegistryMock rpcRegistry;
+
+ @BeforeClass
+ public static void setupTests() {
+ flow = FlowMockGenerator.getRandomFlow();
+ group = GroupMockGenerator.getRandomGroup();
+ meter = MeterMockGenerator.getRandomMeter();
+ port = PortMockGenerator.getRandomPort();
+ queue = QueueMockGenerator.getRandomQueueWithPortNum(port.getPortNumber().getUint32());
+ tableId = new TableId((short) 2);
+ nodeConnectorId = new NodeConnectorId("connector.1");
+ }
+
+ @Before
+ public void init() {
+ rpcRegistry = new RpcProviderRegistryMock(notificationMock);
+ }
+
+ // node with statistics capabilities will enable cyclic statistics collection
+ @SafeVarargs
+ protected final void addFlowCapableNodeWithFeatures(final NodeKey nodeKey, final Boolean hasMeterCapabilities,
+ final Class<? extends FeatureCapability>... capabilities)
+ throws ExecutionException, InterruptedException {
+ final Nodes nodes = new NodesBuilder().setNode(Collections.<Node>emptyList()).build();
+ final InstanceIdentifier<Node> flowNodeIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, nodeKey);
+
+ final FlowCapableNodeBuilder fcnBuilder = new FlowCapableNodeBuilder();
+ final SwitchFeaturesBuilder sfBuilder = new SwitchFeaturesBuilder();
+ final List<Class<? extends FeatureCapability>> capabilitiyList = new ArrayList<>();
+ for (final Class<? extends FeatureCapability> capability : capabilities) {
+ capabilitiyList.add(capability);
+ }
+ sfBuilder.setCapabilities(capabilitiyList);
+ sfBuilder.setMaxTables((short) 255);
+ final NodeBuilder nodeBuilder = new NodeBuilder();
+ nodeBuilder.setKey(nodeKey);
+ fcnBuilder.setSwitchFeatures(sfBuilder.build());
+ final List<Table> tables = new ArrayList<>();
+ final TableBuilder tBuilder = new TableBuilder();
+ tBuilder.setId(getFlow().getTableId());
+ tables.add(tBuilder.build());
+ fcnBuilder.setTable(tables);
+ final FlowCapableNode flowCapableNode = fcnBuilder.build();
+ nodeBuilder.addAugmentation(FlowCapableNode.class, flowCapableNode);
+ final Node node = nodeBuilder.build();
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class), nodes);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, flowNodeIdentifier, nodeBuilder.build());
+ if (hasMeterCapabilities) {
+ final NodeMeterFeaturesBuilder nmfBuilder = new NodeMeterFeaturesBuilder();
+ final MeterFeaturesBuilder mfBuilder = new MeterFeaturesBuilder();
+ mfBuilder.setMaxBands((short) 4);
+ nmfBuilder.setMeterFeatures(mfBuilder.build());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, flowNodeIdentifier.augmentation(NodeMeterFeatures.class),
+ nmfBuilder.build());
+ }
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, InstanceIdentifier.create(Nodes.class), nodes);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowNodeIdentifier, node);
+ assertCommit(writeTx.submit());
+
+ final NodeUpdatedBuilder nuBuilder = new NodeUpdatedBuilder(node);
+ final FlowCapableNodeUpdatedBuilder fcnuBuilder = new FlowCapableNodeUpdatedBuilder(flowCapableNode);
+ nuBuilder.setNodeRef(new NodeRef(flowNodeIdentifier));
+ nuBuilder.addAugmentation(FlowCapableNodeUpdated.class, fcnuBuilder.build());
+ notificationMock.pushNotification(nuBuilder.build());
+ }
+
+ public void addFlowCapableNode(final NodeKey nodeKey) throws ExecutionException, InterruptedException {
+ final Nodes nodes = new NodesBuilder().setNode(Collections.<Node>emptyList()).build();
+ final InstanceIdentifier<Node> flowNodeIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, nodeKey);
+
+ final FlowCapableNodeBuilder fcnBuilder = new FlowCapableNodeBuilder();
+ final NodeBuilder nodeBuilder = new NodeBuilder();
+ nodeBuilder.setKey(nodeKey);
+ final SwitchFeaturesBuilder sfBuilder = new SwitchFeaturesBuilder();
+ sfBuilder.setMaxTables((short) 255);
+ fcnBuilder.setSwitchFeatures(sfBuilder.build());
+ final FlowCapableNode flowCapableNode = fcnBuilder.build();
+ nodeBuilder.addAugmentation(FlowCapableNode.class, flowCapableNode);
+ final Node node = nodeBuilder.build();
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class), nodes);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, flowNodeIdentifier, node);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, InstanceIdentifier.create(Nodes.class), nodes);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowNodeIdentifier, node);
+ assertCommit(writeTx.submit());
+
+ final NodeUpdatedBuilder nuBuilder = new NodeUpdatedBuilder(node);
+ final FlowCapableNodeUpdatedBuilder fcnuBuilder = new FlowCapableNodeUpdatedBuilder(flowCapableNode);
+ nuBuilder.setNodeRef(new NodeRef(flowNodeIdentifier));
+ nuBuilder.addAugmentation(FlowCapableNodeUpdated.class, fcnuBuilder.build());
+ notificationMock.pushNotification(nuBuilder.build());
+ }
+
+ protected void removeNode(final NodeKey nodeKey) throws ExecutionException, InterruptedException {
+ final InstanceIdentifier<Node> nodeII = InstanceIdentifier.create(Nodes.class).child(Node.class, nodeKey);
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.OPERATIONAL, nodeII);
+ writeTx.submit().get();
+
+ final NodeRemovedBuilder nrBuilder = new NodeRemovedBuilder();
+ nrBuilder.setNodeRef(new NodeRef(nodeII));
+ notificationMock.pushNotification(nrBuilder.build());
+ }
+
+ public StatisticsManager setupStatisticsManager() {
+ StatisticsManagerConfig.StatisticsManagerConfigBuilder confBuilder = StatisticsManagerConfig.builder();
+ confBuilder.setMaxNodesForCollector(MAX_NODES_FOR_COLLECTOR);
+ confBuilder.setMinRequestNetMonitorInterval(DEFAULT_MIN_REQUEST_NET_MONITOR_INTERVAL);
+ StatisticsManager statsProvider = new StatisticsManagerImpl(getDataBroker(), confBuilder.build());
+ statsProvider.start(notificationMock.getNotifBroker(), rpcRegistry);
+ return statsProvider;
+ }
+
+ public static Flow getFlow() {
+ return flow;
+ }
+
+ public static Group getGroup() {
+ return group;
+ }
+
+ public static Meter getMeter() {
+ return meter;
+ }
+
+ public static Port getPort() {
+ return port;
+ }
+
+ public static Queue getQueue() {
+ return queue;
+ }
+
+ public static TableId getTableId() {
+ return tableId;
+ }
+
+ public static NodeConnectorId getNodeConnectorId() {
+ return nodeConnectorId;
+ }
+}
+
--- /dev/null
+package test.mock.util;
+
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class TestUtils {
+
+ private static AtomicLong transId = new AtomicLong();
+
+ private static Random rnd = new Random();
+
+ public static long nextLong(long RangeBottom, long rangeTop) {
+ return RangeBottom + ((long)(rnd.nextDouble()*(rangeTop - RangeBottom)));
+ }
+
+ public static long getNewTransactionId() {
+ return transId.incrementAndGet();
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>\r
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">\r
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">\r
+\r
+ <appender name="console" class="org.apache.log4j.ConsoleAppender">\r
+ <layout class="org.apache.log4j.PatternLayout">\r
+ <param name="ConversionPattern" value="%-6p %d{HH:mm:ss.SSS} [%10.10t] %30.30c %x - %m%n" />\r
+ </layout>\r
+ </appender>\r
+\r
+ <logger name="org.opendaylight.controller.md.statistics" additivity="false">\r
+ <level value="DEBUG" />\r
+ <appender-ref ref="console" />\r
+ </logger>\r
+\r
+ <root>\r
+ <priority value="INFO" />\r
+ <appender-ref ref="console" />\r
+ </root>\r
+</log4j:configuration>\r
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>table-miss-enforcer</artifactId>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>applications</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ <relativePath>../</relativePath>
+ </parent>
+ <groupId>org.opendaylight.openflowplugin.applications</groupId>
+ <artifactId>topology-lldp-discovery</artifactId>
+ <packaging>bundle</packaging>
+ <properties>
+ <bundle.plugin.version>2.4.0</bundle.plugin.version>
+ <guava.version>14.0.1</guava.version>
+ <maven.clean.plugin.version>2.5</maven.clean.plugin.version>
+ </properties>
+ <dependencies>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>commons-codec</groupId>
+ <artifactId>commons-codec</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>commons-lang</groupId>
+ <artifactId>commons-lang</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>equinoxSDK381</groupId>
+ <artifactId>org.eclipse.osgi</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>liblldp</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-base</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-service</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-inventory</artifactId>
+ </dependency>
+
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-Activator>org.opendaylight.md.controller.topology.lldp.LLDPActivator</Bundle-Activator>
+ <Export-Package>org.opendaylight.md.controller.topology.lldp.utils</Export-Package>
+ <Embed-Dependency>commons-lang</Embed-Dependency>
+ >
+ <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
+ </instructions>
+ <manifestLocation>${project.basedir}/META-INF</manifestLocation>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</developerConnection>
+ <tag>HEAD</tag>
+ </scm>
+</project>
--- /dev/null
+/**
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.md.controller.topology.lldp;
+
+import org.opendaylight.controller.sal.binding.api.AbstractBindingAwareProvider;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
+import org.osgi.framework.BundleContext;
+
+public class LLDPActivator extends AbstractBindingAwareProvider {
+ private static LLDPDiscoveryProvider provider = new LLDPDiscoveryProvider();
+
+ public void onSessionInitiated(final ProviderContext session) {
+ DataProviderService dataService = session.<DataProviderService>getSALService(DataProviderService.class);
+ provider.setDataService(dataService);
+ NotificationProviderService notificationService = session.<NotificationProviderService>getSALService(NotificationProviderService.class);
+ provider.setNotificationService(notificationService);
+ provider.start();
+ }
+
+ protected void stopImpl(final BundleContext context) {
+ provider.close();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.md.controller.topology.lldp;
+
+import org.opendaylight.md.controller.topology.lldp.utils.LLDPDiscoveryUtils;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkDiscovered;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkDiscoveredBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.PacketProcessingListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.PacketReceived;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class LLDPDiscoveryListener implements PacketProcessingListener {
+ static Logger LOG = LoggerFactory.getLogger(LLDPDiscoveryListener.class);
+
+ private LLDPDiscoveryProvider manager;
+
+ LLDPDiscoveryListener(LLDPDiscoveryProvider manager) {
+ this.manager = manager;
+ }
+
+ public void onPacketReceived(PacketReceived lldp) {
+ NodeConnectorRef src = LLDPDiscoveryUtils.lldpToNodeConnectorRef(lldp.getPayload());
+ if(src != null) {
+ LinkDiscoveredBuilder ldb = new LinkDiscoveredBuilder();
+ ldb.setDestination(lldp.getIngress());
+ ldb.setSource(new NodeConnectorRef(src));
+ LinkDiscovered ld = ldb.build();
+
+ manager.getNotificationService().publish(ld);
+ LLDPLinkAger.getInstance().put(ld);
+ }
+ }
+
+}
--- /dev/null
+/**
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.md.controller.topology.lldp;
+
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LLDPDiscoveryProvider implements AutoCloseable {
+ private final static Logger LOG = LoggerFactory.getLogger(LLDPDiscoveryProvider.class);
+ private DataProviderService dataService;
+ private NotificationProviderService notificationService;
+ private final LLDPDiscoveryListener commiter = new LLDPDiscoveryListener(LLDPDiscoveryProvider.this);
+ private ListenerRegistration<NotificationListener> listenerRegistration;
+
+ public DataProviderService getDataService() {
+ return this.dataService;
+ }
+
+ public void setDataService(final DataProviderService dataService) {
+ this.dataService = dataService;
+ }
+
+ public NotificationProviderService getNotificationService() {
+ return this.notificationService;
+ }
+
+ public void setNotificationService(final NotificationProviderService notificationService) {
+ this.notificationService = notificationService;
+ }
+
+ public void start() {
+ ListenerRegistration<NotificationListener> registerNotificationListener = this.getNotificationService().registerNotificationListener(this.commiter);
+ this.listenerRegistration = registerNotificationListener;
+ LLDPLinkAger.getInstance().setManager(this);
+ LOG.info("LLDPDiscoveryListener Started.");
+ }
+
+ public void close() {
+ try {
+ LOG.info("LLDPDiscoveryListener stopped.");
+ if (this.listenerRegistration!=null) {
+ this.listenerRegistration.close();
+ }
+ LLDPLinkAger.getInstance().close();
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.md.controller.topology.lldp;
+
+import java.util.Date;
+import java.util.Map;
+import java.util.Timer;
+import java.util.Map.Entry;
+import java.util.TimerTask;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.opendaylight.md.controller.topology.lldp.utils.LLDPDiscoveryUtils;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkDiscovered;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkRemovedBuilder;
+
+
+public class LLDPLinkAger {
+ private static final LLDPLinkAger instance = new LLDPLinkAger();
+ private Map<LinkDiscovered,Date> linkToDate = new ConcurrentHashMap<LinkDiscovered,Date>();
+ private LLDPDiscoveryProvider manager;
+ private Timer timer = new Timer();
+
+ public LLDPDiscoveryProvider getManager() {
+ return manager;
+ }
+ public void setManager(LLDPDiscoveryProvider manager) {
+ this.manager = manager;
+ }
+ private LLDPLinkAger() {
+ timer.schedule(new LLDPAgingTask(), 0,LLDPDiscoveryUtils.LLDP_INTERVAL);
+ }
+ public static LLDPLinkAger getInstance() {
+ return instance;
+ }
+
+ public void put(LinkDiscovered link) {
+ Date expires = new Date();
+ expires.setTime(expires.getTime() + LLDPDiscoveryUtils.LLDP_EXPIRATION_TIME);
+ linkToDate.put(link, expires);
+ }
+
+ public void close() {
+ timer.cancel();
+ }
+
+ private class LLDPAgingTask extends TimerTask {
+
+ @Override
+ public void run() {
+ for (Entry<LinkDiscovered,Date> entry : linkToDate.entrySet()) {
+ LinkDiscovered link = entry.getKey();
+ Date expires = entry.getValue();
+ Date now = new Date();
+ if(now.after(expires)) {
+ if(getInstance().getManager() != null) {
+ LinkRemovedBuilder lrb = new LinkRemovedBuilder(link);
+ getInstance().getManager().getNotificationService().publish(lrb.build());
+ linkToDate.remove(link);
+ }
+ }
+ }
+
+ }
+
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.md.controller.topology.lldp.utils;
+
+import java.nio.charset.Charset;
+
+import org.opendaylight.controller.liblldp.Ethernet;
+import org.opendaylight.controller.liblldp.LLDP;
+import org.opendaylight.controller.liblldp.LLDPTLV;
+import org.opendaylight.controller.liblldp.NetUtils;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LLDPDiscoveryUtils {
+ static Logger LOG = LoggerFactory.getLogger(LLDPDiscoveryUtils.class);
+
+ public static final Long LLDP_INTERVAL = (long) (1000*5); // Send LLDP every five seconds
+ public static final Long LLDP_EXPIRATION_TIME = LLDP_INTERVAL*3; // Let up to three intervals pass before we decide we are expired.
+
+ public static String macToString(byte[] mac) {
+ StringBuilder b = new StringBuilder();
+ for (int i = 0; i < mac.length; i++) {
+ b.append(String.format("%02X%s", mac[i], (i < mac.length - 1) ? ":" : ""));
+ }
+
+ return b.toString();
+ }
+
+ public static NodeConnectorRef lldpToNodeConnectorRef(byte[] payload) {
+ Ethernet ethPkt = new Ethernet();
+ try {
+ ethPkt.deserialize(payload, 0,payload.length * NetUtils.NumBitsInAByte);
+ } catch (Exception e) {
+ LOG.warn("Failed to decode LLDP packet {}", e);
+ }
+
+ if (ethPkt.getPayload() instanceof LLDP) {
+ LLDP lldp = (LLDP) ethPkt.getPayload();
+
+ try {
+ NodeId srcNodeId = null;
+ NodeConnectorId srcNodeConnectorId = null;
+ for (LLDPTLV lldptlv : lldp.getOptionalTLVList()) {
+ if (lldptlv.getType() == LLDPTLV.TLVType.Custom.getValue()) {
+ srcNodeConnectorId = new NodeConnectorId(LLDPTLV.getCustomString(lldptlv.getValue(), lldptlv.getLength()));
+ }
+ if (lldptlv.getType() == LLDPTLV.TLVType.SystemName.getValue()) {
+ String srcNodeIdString = new String(lldptlv.getValue(),Charset.defaultCharset());
+ srcNodeId = new NodeId(srcNodeIdString);
+ }
+ }
+ if(srcNodeId != null && srcNodeConnectorId != null) {
+ InstanceIdentifier<NodeConnector> srcInstanceId = InstanceIdentifier.builder(Nodes.class)
+ .child(Node.class,new NodeKey(srcNodeId))
+ .child(NodeConnector.class, new NodeConnectorKey(srcNodeConnectorId))
+ .toInstance();
+ return new NodeConnectorRef(srcInstanceId);
+ }
+ } catch (Exception e) {
+ LOG.warn("Caught exception ", e);
+ }
+ }
+ return null;
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>applications</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+ <groupId>org.opendaylight.openflowplugin.applications</groupId>
+ <artifactId>topology-manager</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-service</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-inventory</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-topology</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.core</artifactId>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Bundle-Activator>org.opendaylight.md.controller.topology.manager.FlowCapableTopologyProvider</Bundle-Activator>
+ <Private-Package>org.opendaylight.md.controller.topology.manager</Private-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/openflowplugin.git</developerConnection>
+ <tag>HEAD</tag>
+ </scm>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.md.controller.topology.manager;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNodeConnectorBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.LinkId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TpId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.DestinationBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.SourceBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointBuilder;
+
+public final class FlowCapableNodeMapping {
+
+ private FlowCapableNodeMapping() {
+ throw new UnsupportedOperationException("Utility class.");
+ }
+
+ public static NodeKey getNodeKey(final NodeRef ref) {
+ return ref.getValue().firstKeyOf(Node.class, NodeKey.class);
+ }
+
+ public static NodeKey getNodeKey(final NodeConnectorRef ref) {
+ return ref.getValue().firstKeyOf(Node.class, NodeKey.class);
+ }
+
+ public static NodeConnectorKey getNodeConnectorKey(final NodeConnectorRef ref) {
+ return ref.getValue().firstKeyOf(NodeConnector.class, NodeConnectorKey.class);
+ }
+
+ public static NodeId toTopologyNodeId(
+ final org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId nodeId) {
+ return new NodeId(nodeId);
+ }
+
+ private static NodeId toTopologyNodeId(final NodeConnectorRef source) {
+ return toTopologyNodeId(getNodeKey(source).getId());
+ }
+
+ public static TpId toTerminationPointId(final NodeConnectorId id) {
+ return new TpId(id);
+ }
+
+ private static TpId toTerminationPointId(final NodeConnectorRef source) {
+ return toTerminationPointId(getNodeConnectorKey(source).getId());
+ }
+
+ public static org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node toTopologyNode(
+ final NodeId nodeId, final NodeRef invNodeRef) {
+ return new NodeBuilder() //
+ .setNodeId(nodeId) //
+ .addAugmentation(InventoryNode.class, new InventoryNodeBuilder() //
+ .setInventoryNodeRef(invNodeRef) //
+ .build()) //
+ .build();
+ }
+
+ public static TerminationPoint toTerminationPoint(final TpId id, final NodeConnectorRef invNodeConnectorRef) {
+ return new TerminationPointBuilder() //
+ .setTpId(id) //
+ .addAugmentation(InventoryNodeConnector.class, new InventoryNodeConnectorBuilder() //
+ .setInventoryNodeConnectorRef(invNodeConnectorRef) //
+ .build()) //
+ .build();
+ }
+
+ public static Link toTopologyLink(
+ final org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.Link link) {
+ return new LinkBuilder() //
+ .setSource(new SourceBuilder() //
+ .setSourceNode(toTopologyNodeId(link.getSource())) //
+ .setSourceTp(toTerminationPointId(link.getSource())) //
+ .build()) //
+ .setDestination(new DestinationBuilder() //
+ .setDestNode(toTopologyNodeId(link.getDestination())) //
+ .setDestTp(toTerminationPointId(link.getDestination())) //
+ .build()) //
+ .setLinkId(new LinkId(getNodeConnectorKey(link.getSource()).getId())) //
+ .build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.md.controller.topology.manager;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.FlowTopologyDiscoveryListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkDiscovered;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkOverutilized;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkUtilizationNormal;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.OpendaylightInventoryListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TpId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.List;
+
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.getNodeConnectorKey;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.getNodeKey;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTerminationPoint;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTerminationPointId;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyLink;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNode;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNodeId;
+
+class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, OpendaylightInventoryListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class);
+ private final InstanceIdentifier<Topology> topology;
+ private final OperationProcessor processor;
+
+ FlowCapableTopologyExporter(final OperationProcessor processor,
+ final InstanceIdentifier<Topology> topology) {
+ this.processor = Preconditions.checkNotNull(processor);
+ this.topology = Preconditions.checkNotNull(topology);
+ }
+
+ @Override
+ public void onNodeRemoved(final NodeRemoved notification) {
+
+ final NodeId nodeId = toTopologyNodeId(getNodeKey(notification.getNodeRef()).getId());
+ final InstanceIdentifier<Node> nodeInstance = toNodeIdentifier(notification.getNodeRef());
+
+ processor.enqueueOperation(new TopologyOperation() {
+ @Override
+ public void applyOperation(ReadWriteTransaction transaction) {
+ removeAffectedLinks(nodeId, transaction);
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
+ }
+
+ @Override
+ public String toString() {
+ return "onNodeRemoved";
+ }
+ });
+ }
+
+ @Override
+ public void onNodeUpdated(final NodeUpdated notification) {
+ FlowCapableNodeUpdated fcnu = notification.getAugmentation(FlowCapableNodeUpdated.class);
+ if (fcnu != null) {
+ processor.enqueueOperation(new TopologyOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction transaction) {
+ final Node node = toTopologyNode(toTopologyNodeId(notification.getId()), notification.getNodeRef());
+ final InstanceIdentifier<Node> path = getNodePath(toTopologyNodeId(notification.getId()));
+ transaction.merge(LogicalDatastoreType.OPERATIONAL, path, node, true);
+ }
+
+ @Override
+ public String toString() {
+ return "onNodeUpdated";
+ }
+ });
+ }
+ }
+
+ @Override
+ public void onNodeConnectorRemoved(final NodeConnectorRemoved notification) {
+
+ final InstanceIdentifier<TerminationPoint> tpInstance = toTerminationPointIdentifier(
+ notification.getNodeConnectorRef());
+
+ final InstanceIdentifier<Node> node = tpInstance.firstIdentifierOf(Node.class);
+
+ final TpId tpId = toTerminationPointId(getNodeConnectorKey(
+ notification.getNodeConnectorRef()).getId());
+
+ processor.enqueueOperation(new TopologyOperation() {
+ @Override
+ public void applyOperation(ReadWriteTransaction transaction) {
+ Optional<Node> nodeOptional = Optional.absent();
+ try {
+ nodeOptional = transaction.read(LogicalDatastoreType.OPERATIONAL, node).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error occured when trying to read NodeConnector ", e);
+ }
+ if (nodeOptional.isPresent()) {
+ removeAffectedLinks(tpId, transaction);
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, tpInstance);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "onNodeConnectorRemoved";
+ }
+ });
+ }
+
+ @Override
+ public void onNodeConnectorUpdated(final NodeConnectorUpdated notification) {
+ final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation(
+ FlowCapableNodeConnectorUpdated.class);
+ if (fcncu != null) {
+ processor.enqueueOperation(new TopologyOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction transaction) {
+ final NodeId nodeId = toTopologyNodeId(getNodeKey(notification.getNodeConnectorRef()).getId());
+ TerminationPoint point = toTerminationPoint(toTerminationPointId(notification.getId()),
+ notification.getNodeConnectorRef());
+ final InstanceIdentifier<TerminationPoint> path = tpPath(nodeId, point.getKey().getTpId());
+ transaction.merge(LogicalDatastoreType.OPERATIONAL, path, point, true);
+ if ((fcncu.getState() != null && fcncu.getState().isLinkDown())
+ || (fcncu.getConfiguration() != null && fcncu.getConfiguration().isPORTDOWN())) {
+ removeAffectedLinks(point.getTpId(), transaction);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "onNodeConnectorUpdated";
+ }
+ });
+ }
+ }
+
+ @Override
+ public void onLinkDiscovered(final LinkDiscovered notification) {
+ processor.enqueueOperation(new TopologyOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction transaction) {
+ final Link link = toTopologyLink(notification);
+ final InstanceIdentifier<Link> path = linkPath(link);
+ transaction.merge(LogicalDatastoreType.OPERATIONAL, path, link, true);
+ }
+
+ @Override
+ public String toString() {
+ return "onLinkDiscovered";
+ }
+ });
+ }
+
+ @Override
+ public void onLinkOverutilized(final LinkOverutilized notification) {
+ // NOOP
+ }
+
+ @Override
+ public void onLinkRemoved(final LinkRemoved notification) {
+ processor.enqueueOperation(new TopologyOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction transaction) {
+ Optional<Link> linkOptional = Optional.absent();
+ try {
+ // read that checks if link exists (if we do not do this we might get an exception on delete)
+ linkOptional = transaction.read(LogicalDatastoreType.OPERATIONAL,
+ linkPath(toTopologyLink(notification))).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error occured when trying to read Link ", e);
+ }
+ if (linkOptional.isPresent()) {
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(toTopologyLink(notification)));
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "onLinkRemoved";
+ }
+ });
+ }
+
+ @Override
+ public void onLinkUtilizationNormal(final LinkUtilizationNormal notification) {
+ // NOOP
+ }
+
+ private InstanceIdentifier<Node> toNodeIdentifier(final NodeRef ref) {
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey invNodeKey = getNodeKey(ref);
+ NodeKey nodeKey = new NodeKey(toTopologyNodeId(invNodeKey.getId()));
+ return topology.child(Node.class, nodeKey);
+ }
+
+ private InstanceIdentifier<TerminationPoint> toTerminationPointIdentifier(final NodeConnectorRef ref) {
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey invNodeKey = getNodeKey(ref);
+ NodeConnectorKey invNodeConnectorKey = getNodeConnectorKey(ref);
+ return tpPath(toTopologyNodeId(invNodeKey.getId()), toTerminationPointId(invNodeConnectorKey.getId()));
+ }
+
+ private void removeAffectedLinks(final NodeId id, final ReadWriteTransaction transaction) {
+ Optional<Topology> topologyOptional = Optional.absent();
+ try {
+ topologyOptional = transaction.read(LogicalDatastoreType.OPERATIONAL, topology).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error reading topology data for topology {}", topology, e);
+ }
+ if (topologyOptional.isPresent()) {
+ removeAffectedLinks(id, topologyOptional, transaction);
+ }
+ }
+
+ private void removeAffectedLinks(final NodeId id, Optional<Topology> topologyOptional, ReadWriteTransaction transaction) {
+ if (!topologyOptional.isPresent()) {
+ return;
+ }
+
+ List<Link> linkList = topologyOptional.get().getLink() != null ?
+ topologyOptional.get().getLink() : Collections.<Link> emptyList();
+ for (Link link : linkList) {
+ if (id.equals(link.getSource().getSourceNode()) ||
+ id.equals(link.getDestination().getDestNode())) {
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
+ }
+ }
+ }
+
+ private void removeAffectedLinks(final TpId id, final ReadWriteTransaction transaction) {
+ Optional<Topology> topologyOptional = Optional.absent();
+ try {
+ topologyOptional = transaction.read(LogicalDatastoreType.OPERATIONAL, topology).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error reading topology data for topology {}", topology, e);
+ }
+ if (topologyOptional.isPresent()) {
+ removeAffectedLinks(id, topologyOptional, transaction);
+ }
+ }
+
+ private void removeAffectedLinks(final TpId id, Optional<Topology> topologyOptional, ReadWriteTransaction transaction) {
+ if (!topologyOptional.isPresent()) {
+ return;
+ }
+
+ List<Link> linkList = topologyOptional.get().getLink() != null
+ ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
+ for (Link link : linkList) {
+ if (id.equals(link.getSource().getSourceTp()) ||
+ id.equals(link.getDestination().getDestTp())) {
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
+ }
+ }
+ }
+
+ private InstanceIdentifier<Node> getNodePath(final NodeId nodeId) {
+ return topology.child(Node.class, new NodeKey(nodeId));
+ }
+
+ private InstanceIdentifier<TerminationPoint> tpPath(final NodeId nodeId, final TpId tpId) {
+ NodeKey nodeKey = new NodeKey(nodeId);
+ TerminationPointKey tpKey = new TerminationPointKey(tpId);
+ return topology.child(Node.class, nodeKey).child(TerminationPoint.class, tpKey);
+ }
+
+ private InstanceIdentifier<Link> linkPath(final Link link) {
+ return topology.child(Link.class, link.getKey());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.md.controller.topology.manager;
+
+import java.util.concurrent.ExecutionException;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.AbstractBindingAwareProvider;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.osgi.framework.BundleContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class FlowCapableTopologyProvider extends AbstractBindingAwareProvider implements AutoCloseable {
+ private final static Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyProvider.class);
+ private ListenerRegistration<NotificationListener> listenerRegistration;
+ private Thread thread;
+
+ /**
+ * Gets called on start of a bundle.
+ *
+ * @param session
+ */
+ @Override
+ public synchronized void onSessionInitiated(final ProviderContext session) {
+ final DataBroker dataBroker = session.getSALService(DataBroker.class);
+ final NotificationProviderService notificationService = session.getSALService(NotificationProviderService.class);
+
+ final String name = "flow:1";
+ final TopologyKey key = new TopologyKey(new TopologyId(name));
+ final InstanceIdentifier<Topology> path = InstanceIdentifier
+ .create(NetworkTopology.class)
+ .child(Topology.class, key);
+
+ final OperationProcessor processor = new OperationProcessor(dataBroker);
+ final FlowCapableTopologyExporter listener = new FlowCapableTopologyExporter(processor, path);
+ this.listenerRegistration = notificationService.registerNotificationListener(listener);
+
+ final ReadWriteTransaction tx = dataBroker.newReadWriteTransaction();
+ tx.put(LogicalDatastoreType.OPERATIONAL, path, new TopologyBuilder().setKey(key).build(), true);
+ try {
+ tx.submit().get();
+ } catch (InterruptedException | ExecutionException e) {
+ LOG.warn("Initial topology export failed, continuing anyway", e);
+ }
+
+ thread = new Thread(processor);
+ thread.setDaemon(true);
+ thread.setName("FlowCapableTopologyExporter-" + name);
+ thread.start();
+ }
+
+ @Override
+ public synchronized void close() throws InterruptedException {
+ LOG.info("FlowCapableTopologyProvider stopped.");
+ if (this.listenerRegistration != null) {
+ try {
+ this.listenerRegistration.close();
+ } catch (Exception e) {
+ LOG.error("Failed to close listener registration", e);
+ }
+ listenerRegistration = null;
+ }
+ if (thread != null) {
+ thread.interrupt();
+ thread.join();
+ thread = null;
+ }
+ }
+
+ /**
+ * Gets called during stop bundle
+ *
+ * @param context The execution context of the bundle being stopped.
+ */
+ @Override
+ public void stopImpl(final BundleContext context) {
+ try {
+ this.close();
+ } catch (InterruptedException e) {
+ LOG.error("Failed to stop provider", e);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.md.controller.topology.manager;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+final class OperationProcessor implements AutoCloseable, Runnable, TransactionChainListener {
+ private static final Logger LOG = LoggerFactory.getLogger(OperationProcessor.class);
+ private static final int MAX_TRANSACTION_OPERATIONS = 100;
+ private static final int OPERATION_QUEUE_DEPTH = 500;
+
+ private final BlockingQueue<TopologyOperation> queue = new LinkedBlockingQueue<>(OPERATION_QUEUE_DEPTH);
+ private final DataBroker dataBroker;
+ private BindingTransactionChain transactionChain;
+ private volatile boolean finishing = false;
+
+ OperationProcessor(final DataBroker dataBroker) {
+ this.dataBroker = Preconditions.checkNotNull(dataBroker);
+ transactionChain = this.dataBroker.createTransactionChain(this);
+ }
+
+ void enqueueOperation(final TopologyOperation task) {
+ try {
+ queue.put(task);
+ } catch (InterruptedException e) {
+ LOG.warn("Interrupted while submitting task {}", task, e);
+ }
+ }
+
+ @Override
+ public void run() {
+ while (!finishing) {
+ try {
+ TopologyOperation op = queue.take();
+
+ LOG.debug("New {} operation available, starting transaction", op);
+
+ final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
+
+ int ops = 0;
+ do {
+ op.applyOperation(tx);
+
+ ops++;
+ if (ops < MAX_TRANSACTION_OPERATIONS) {
+ op = queue.poll();
+ } else {
+ op = null;
+ }
+
+ LOG.debug("Next operation {}", op);
+ } while (op != null);
+
+ LOG.debug("Processed {} operations, submitting transaction", ops);
+
+ try {
+ tx.submit().checkedGet();
+ } catch (final TransactionCommitFailedException e) {
+ LOG.warn("Stat DataStoreOperation unexpected State!", e);
+ transactionChain.close();
+ transactionChain = dataBroker.createTransactionChain(this);
+ cleanDataStoreOperQueue();
+ }
+
+ } catch (final IllegalStateException e) {
+ LOG.warn("Stat DataStoreOperation unexpected State!", e);
+ transactionChain.close();
+ transactionChain = dataBroker.createTransactionChain(this);
+ cleanDataStoreOperQueue();
+ } catch (final InterruptedException e) {
+ LOG.warn("Stat Manager DS Operation thread interupted!", e);
+ finishing = true;
+ } catch (final Exception e) {
+ LOG.warn("Stat DataStore Operation executor fail!", e);
+ }
+ }
+ // Drain all events, making sure any blocked threads are unblocked
+ cleanDataStoreOperQueue();
+ }
+
+ private void cleanDataStoreOperQueue() {
+ while (!queue.isEmpty()) {
+ queue.poll();
+ }
+ }
+
+ @Override
+ public void onTransactionChainFailed(TransactionChain<?, ?> chain, AsyncTransaction<?, ?> transaction, Throwable cause) {
+ LOG.error("Failed to export Topology manager operations, Transaction {} failed.", transaction.getIdentifier(), cause);
+ transactionChain.close();
+ transactionChain = dataBroker.createTransactionChain(this);
+ cleanDataStoreOperQueue();
+ }
+
+ @Override
+ public void onTransactionChainSuccessful(TransactionChain<?, ?> chain) {
+ //NOOP
+ }
+
+ @Override
+ public void close() throws Exception {
+ if (transactionChain != null) {
+ transactionChain.close();
+ }
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.md.controller.topology.manager;
+
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+
+/**
+ * Internal interface for submitted operations. Implementations of this
+ * interface are enqueued and batched into data store transactions.
+ */
+interface TopologyOperation {
+ /**
+ * Execute the operation on top of the transaction.
+ *
+ * @param transaction Datastore transaction
+ */
+ void applyOperation(ReadWriteTransaction transaction);
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.md.controller.topology.manager;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkDiscoveredBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortConfig;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.flow.capable.port.StateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.LinkId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TpId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Destination;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.DestinationBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Source;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.SourceBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+
+public class FlowCapableTopologyExporterTest {
+
+ @Mock
+ private DataBroker mockDataBroker;
+
+ @Mock
+ private BindingTransactionChain mockTxChain;
+
+ private OperationProcessor processor;
+
+ private FlowCapableTopologyExporter exporter;
+
+ private InstanceIdentifier<Topology> topologyIID;
+
+ private final ExecutorService executor = Executors.newFixedThreadPool(1);
+
+ @Before
+ public void setUp() {
+ MockitoAnnotations.initMocks(this);
+
+ doReturn(mockTxChain).when(mockDataBroker)
+ .createTransactionChain(any(TransactionChainListener.class));
+
+ processor = new OperationProcessor(mockDataBroker);
+
+ topologyIID = InstanceIdentifier.create(NetworkTopology.class)
+ .child(Topology.class, new TopologyKey(new TopologyId("test")));
+ exporter = new FlowCapableTopologyExporter(processor, topologyIID);
+
+ executor.execute(processor);
+ }
+
+ @After
+ public void tearDown() {
+ executor.shutdownNow();
+ }
+
+ @SuppressWarnings({ "rawtypes" })
+ @Test
+ public void testOnNodeRemoved() {
+
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ InstanceIdentifier<Node> topoNodeII = topologyIID.child(Node.class, topoNodeKey);
+ Node topoNode = new NodeBuilder().setKey(topoNodeKey).build();
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
+ InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey);
+
+ List<Link> linkList = Arrays.asList(
+ newLink("link1", newSourceNode("node1"), newDestNode("dest")),
+ newLink("link2", newSourceNode("source"), newDestNode("node1")),
+ newLink("link2", newSourceNode("source2"), newDestNode("dest2")));
+ final Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Link.class, linkList.get(0).getKey()),
+ topologyIID.child(Link.class, linkList.get(1).getKey()),
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ };
+
+ SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+ readFuture.set(Optional.of(topology));
+ ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
+ doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+
+ SettableFuture<Optional<Node>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoNode));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topoNodeII);
+
+ CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
+
+ int expDeleteCalls = expDeletedIIDs.length;
+ CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx1).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
+
+ waitForSubmit(submitLatch1);
+
+ setReadFutureAsync(topology, readFuture);
+
+ waitForDeletes(expDeleteCalls, deleteLatch);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+
+ verifyMockTx(mockTx1);
+ }
+
+ @SuppressWarnings({ "rawtypes" })
+ @Test
+ public void testOnNodeRemovedWithNoTopology() {
+
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ InstanceIdentifier<Node> topoNodeII = topologyIID.child(Node.class, topoNodeKey);
+ Node topoNode = new NodeBuilder().setKey(topoNodeKey).build();
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
+ InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey);
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ };
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+
+ SettableFuture<Optional<Node>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoNode));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topoNodeII);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
+
+ waitForSubmit(submitLatch);
+
+ waitForDeletes(1, deleteLatch);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorRemoved() {
+
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ TerminationPointKey terminationPointKey = new TerminationPointKey(new TpId("tp1"));
+
+ InstanceIdentifier<Node> topoNodeII = topologyIID.child(Node.class, topoNodeKey);
+ Node topoNode = new NodeBuilder().setKey(topoNodeKey).build();
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey(terminationPointKey.getTpId().getValue());
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ List<Link> linkList = Arrays.asList(
+ newLink("link1", newSourceTp("tp1"), newDestTp("dest")),
+ newLink("link2", newSourceTp("source"), newDestTp("tp1")),
+ newLink("link3", newSourceTp("source2"), newDestTp("dest2")));
+ final Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Link.class, linkList.get(0).getKey()),
+ topologyIID.child(Link.class, linkList.get(1).getKey()),
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1")))
+ };
+
+ final SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+ readFuture.set(Optional.of(topology));
+ ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
+ doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+
+ SettableFuture<Optional<Node>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoNode));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topoNodeII);
+
+ CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
+
+ int expDeleteCalls = expDeletedIIDs.length;
+ CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx1).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).build());
+
+ waitForSubmit(submitLatch1);
+
+ setReadFutureAsync(topology, readFuture);
+
+ waitForDeletes(expDeleteCalls, deleteLatch);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+
+ verifyMockTx(mockTx1);
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorRemovedWithNoTopology() {
+
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ TerminationPointKey terminationPointKey = new TerminationPointKey(new TpId("tp1"));
+
+ InstanceIdentifier<Node> topoNodeII = topologyIID.child(Node.class, topoNodeKey);
+ Node topoNode = new NodeBuilder().setKey(topoNodeKey).build();
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey(terminationPointKey.getTpId().getValue());
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1")))
+ };
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+
+ SettableFuture<Optional<Node>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoNode));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topoNodeII);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ waitForDeletes(1, deleteLatch);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+ }
+
+ @Test
+ public void testOnNodeUpdated() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+ InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeUpdated(new NodeUpdatedBuilder().setNodeRef(new NodeRef(invNodeID))
+ .setId(nodeKey.getId()).addAugmentation(FlowCapableNodeUpdated.class,
+ new FlowCapableNodeUpdatedBuilder().build()).build());
+
+ waitForSubmit(submitLatch);
+
+ ArgumentCaptor<Node> mergedNode = ArgumentCaptor.forClass(Node.class);
+ NodeId expNodeId = new NodeId("node1");
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(Node.class,
+ new NodeKey(expNodeId))), mergedNode.capture(), eq(true));
+ assertEquals("getNodeId", expNodeId, mergedNode.getValue().getNodeId());
+ InventoryNode augmentation = mergedNode.getValue().getAugmentation(InventoryNode.class);
+ assertNotNull("Missing augmentation", augmentation);
+ assertEquals("getInventoryNodeRef", new NodeRef(invNodeID), augmentation.getInventoryNodeRef());
+ }
+
+ @Test
+ public void testOnNodeConnectorUpdated() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+ FlowCapableNodeConnectorUpdated.class,
+ new FlowCapableNodeConnectorUpdatedBuilder().build()).build());
+
+ waitForSubmit(submitLatch);
+
+ ArgumentCaptor<TerminationPoint> mergedNode = ArgumentCaptor.forClass(TerminationPoint.class);
+ NodeId expNodeId = new NodeId("node1");
+ TpId expTpId = new TpId("tp1");
+ InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+ Node.class, new NodeKey(expNodeId)).child(TerminationPoint.class,
+ new TerminationPointKey(expTpId));
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+ mergedNode.capture(), eq(true));
+ assertEquals("getTpId", expTpId, mergedNode.getValue().getTpId());
+ InventoryNodeConnector augmentation = mergedNode.getValue().getAugmentation(
+ InventoryNodeConnector.class);
+ assertNotNull("Missing augmentation", augmentation);
+ assertEquals("getInventoryNodeConnectorRef", new NodeConnectorRef(invNodeConnID),
+ augmentation.getInventoryNodeConnectorRef());
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorUpdatedWithLinkStateDown() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ List<Link> linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest")));
+ Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ setupStubbedSubmit(mockTx);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+ FlowCapableNodeConnectorUpdated.class,
+ new FlowCapableNodeConnectorUpdatedBuilder().setState(
+ new StateBuilder().setLinkDown(true).build()).build()).build());
+
+ waitForDeletes(1, deleteLatch);
+
+ InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+ Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class,
+ new TerminationPointKey(new TpId("tp1")));
+
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+ any(TerminationPoint.class), eq(true));
+
+ assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class,
+ linkList.get(0).getKey())}, deletedLinkIDs);
+ }
+
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorUpdatedWithPortDown() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ List<Link> linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest")));
+ Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ setupStubbedSubmit(mockTx);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+ FlowCapableNodeConnectorUpdated.class,
+ new FlowCapableNodeConnectorUpdatedBuilder().setConfiguration(
+ new PortConfig(true, true, true, true)).build()).build());
+
+ waitForDeletes(1, deleteLatch);
+
+ InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+ Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class,
+ new TerminationPointKey(new TpId("tp1")));
+
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+ any(TerminationPoint.class), eq(true));
+
+ assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class,
+ linkList.get(0).getKey())}, deletedLinkIDs);
+ }
+
+ @Test
+ public void testOnLinkDiscovered() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ sourceNodeKey = newInvNodeKey("sourceNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+ InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ destNodeKey = newInvNodeKey("destNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ destNodeConnKey = newInvNodeConnKey("destTP");
+ InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onLinkDiscovered(new LinkDiscoveredBuilder().setSource(
+ new NodeConnectorRef(sourceConnID)).setDestination(
+ new NodeConnectorRef(destConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ ArgumentCaptor<Link> mergedNode = ArgumentCaptor.forClass(Link.class);
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))),
+ mergedNode.capture(), eq(true));
+ assertEquals("Source node ID", "sourceNode",
+ mergedNode.getValue().getSource().getSourceNode().getValue());
+ assertEquals("Dest TP ID", "sourceTP",
+ mergedNode.getValue().getSource().getSourceTp().getValue());
+ assertEquals("Dest node ID", "destNode",
+ mergedNode.getValue().getDestination().getDestNode().getValue());
+ assertEquals("Dest TP ID", "destTP",
+ mergedNode.getValue().getDestination().getDestTp().getValue());
+ }
+
+ @Test
+ public void testOnLinkRemoved() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ sourceNodeKey = newInvNodeKey("sourceNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+ InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ destNodeKey = newInvNodeKey("destNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ destNodeConnKey = newInvNodeConnKey("destTP");
+ InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+ Link link = newLink(sourceNodeConnKey.getId().getValue(), newSourceTp(sourceNodeConnKey.getId().getValue()),
+ newDestTp(destNodeConnKey.getId().getValue()));
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+ doReturn(Futures.immediateCheckedFuture(Optional.of(link))).when(mockTx).read(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+
+ exporter.onLinkRemoved(new LinkRemovedBuilder().setSource(
+ new NodeConnectorRef(sourceConnID)).setDestination(
+ new NodeConnectorRef(destConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ verify(mockTx).delete(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+ }
+
+ @Test
+ public void testOnLinkRemovedLinkDoesNotExist() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ sourceNodeKey = newInvNodeKey("sourceNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+ InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ destNodeKey = newInvNodeKey("destNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ destNodeConnKey = newInvNodeConnKey("destTP");
+ InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+ doReturn(Futures.immediateCheckedFuture(Optional.<Link>absent())).when(mockTx).read(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+
+ exporter.onLinkRemoved(new LinkRemovedBuilder().setSource(
+ new NodeConnectorRef(sourceConnID)).setDestination(
+ new NodeConnectorRef(destConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ verify(mockTx, never()).delete(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+ }
+
+ private void verifyMockTx(ReadWriteTransaction mockTx) {
+ InOrder inOrder = inOrder(mockTx);
+ inOrder.verify(mockTx, atLeast(0)).submit();
+ inOrder.verify(mockTx, never()).delete(eq(LogicalDatastoreType.OPERATIONAL),
+ any(InstanceIdentifier.class));
+ }
+
+ @SuppressWarnings("rawtypes")
+ private void assertDeletedIDs(InstanceIdentifier[] expDeletedIIDs,
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs) {
+ Set<InstanceIdentifier> actualIIDs = new HashSet<>(deletedLinkIDs.getAllValues());
+ for(InstanceIdentifier id: expDeletedIIDs) {
+ assertTrue("Missing expected deleted IID " + id, actualIIDs.contains(id));
+ }
+ }
+
+ private void setReadFutureAsync(final Topology topology,
+ final SettableFuture<Optional<Topology>> readFuture) {
+ new Thread() {
+ @Override
+ public void run() {
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ readFuture.set(Optional.of(topology));
+ }
+
+ }.start();
+ }
+
+ private void waitForSubmit(CountDownLatch latch) {
+ assertEquals("Transaction submitted", true,
+ Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS));
+ }
+
+ private void waitForDeletes(int expDeleteCalls, final CountDownLatch latch) {
+ boolean done = Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS);
+ if(!done) {
+ fail("Expected " + expDeleteCalls + " delete calls. Actual: " +
+ (expDeleteCalls - latch.getCount()));
+ }
+ }
+
+ private CountDownLatch setupStubbedSubmit(ReadWriteTransaction mockTx) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ doAnswer(new Answer<CheckedFuture<Void, TransactionCommitFailedException>>() {
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> answer(
+ InvocationOnMock invocation) {
+ latch.countDown();
+ return Futures.immediateCheckedFuture(null);
+ }
+ }).when(mockTx).submit();
+
+ return latch;
+ }
+
+ @SuppressWarnings("rawtypes")
+ private void setupStubbedDeletes(ReadWriteTransaction mockTx,
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs, final CountDownLatch latch) {
+ doAnswer(new Answer<Void>() {
+ @Override
+ public Void answer(InvocationOnMock invocation) {
+ latch.countDown();
+ return null;
+ }
+ }).when(mockTx).delete(eq(LogicalDatastoreType.OPERATIONAL), deletedLinkIDs.capture());
+ }
+
+ private org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ newInvNodeKey(String id) {
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey =
+ new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey(
+ new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.
+ rev130819.NodeId(id));
+ return nodeKey;
+ }
+
+ private NodeConnectorKey newInvNodeConnKey(String id) {
+ return new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey(
+ new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.
+ NodeConnectorId(id));
+ }
+
+ private KeyedInstanceIdentifier<NodeConnector, NodeConnectorKey> newNodeConnID(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey) {
+ return InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey).child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.
+ rev130819.node.NodeConnector.class, ncKey);
+ }
+
+ private Link newLink(String id, Source source, Destination dest) {
+ return new LinkBuilder().setLinkId(new LinkId(id))
+ .setSource(source).setDestination(dest).build();
+ }
+
+ private Destination newDestTp(String id) {
+ return new DestinationBuilder().setDestTp(new TpId(id)).build();
+ }
+
+ private Source newSourceTp(String id) {
+ return new SourceBuilder().setSourceTp(new TpId(id)).build();
+ }
+
+ private Destination newDestNode(String id) {
+ return new DestinationBuilder().setDestNode(new NodeId(id)).build();
+ }
+
+ private Source newSourceNode(String id) {
+ return new SourceBuilder().setSourceNode(new NodeId(id)).build();
+ }
+}
<karaf.distro.empty.version>1.5.0-SNAPSHOT</karaf.distro.empty.version>
<yang.binding.version>0.7.0-SNAPSHOT</yang.binding.version>
<dlux.version>0.2.0-SNAPSHOT</dlux.version>
+ <config.version>0.3.0-SNAPSHOT</config.version>
+ <yangtools.version>0.7.0-SNAPSHOT</yangtools.version>
+ <opendaylight-l2-types.version>2013.08.27.7-SNAPSHOT</opendaylight-l2-types.version>
+ <ietf-yang-types.version>2010.09.24.7-SNAPSHOT</ietf-yang-types.version>
+ <ietf-inet-types.version>2010.09.24.7-SNAPSHOT</ietf-inet-types.version>
+ <yang-ext.version>2013.09.07.7-SNAPSHOT</yang-ext.version>
+ <openflowplugin-api.version>0.1.0-SNAPSHOT</openflowplugin-api.version>
</properties>
<dependencyManagement>
<artifactId>yang-binding</artifactId>
<version>${yang.binding.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ <version>${yang.binding.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ <version>${ietf-inet-types.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types</artifactId>
+ <version>${ietf-yang-types.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>yang-ext</artifactId>
+ <version>${yang-ext.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>opendaylight-l2-types</artifactId>
+ <version>${opendaylight-l2-types.version}</version>
+ </dependency>
+
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-api</artifactId>
- <version>${project.version}</version>
+ <version>${openflowplugin-api.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.openflowjava</groupId>
<artifactId>model-inventory</artifactId>
<version>${controller.model.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-topology</artifactId>
+ <version>${controller.model.version}</version>
+ </dependency>
+
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-binding-api</artifactId>
<version>${controller.model.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-config</artifactId>
+ <version>${controller.model.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-util</artifactId>
+ <version>${controller.model.version}</version>
+ </dependency>
+
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>liblldp</artifactId>