if (flowNode.isPresent()) {
/* Tables - have to be pushed before groups */
// CHECK if while pusing the update, updateTableInput can be null to emulate a table add
- List<Table> tableList = flowNode.get().getTable() != null
- ? flowNode.get().getTable() : Collections.<Table> emptyList() ;
- for (Table table : tableList) {
- TableKey tableKey = table.getKey();
+ List<TableFeatures> tableList = flowNode.get().getTableFeatures() != null
+ ? flowNode.get().getTableFeatures() : Collections.<TableFeatures> emptyList() ;
+ for (TableFeatures tableFeaturesItem : tableList) {
+ TableFeaturesKey tableKey = tableFeaturesItem.getKey();
KeyedInstanceIdentifier<TableFeatures, TableFeaturesKey> tableFeaturesII
- = nodeIdent.child(Table.class, tableKey).child(TableFeatures.class, new TableFeaturesKey(tableKey.getId()));
- List<TableFeatures> tableFeatures = table.getTableFeatures();
- if (tableFeatures != null) {
- for (TableFeatures tableFeaturesItem : tableFeatures) {
+ = nodeIdent.child(TableFeatures.class, new TableFeaturesKey(tableKey.getTableId()));
provider.getTableFeaturesCommiter().update(tableFeaturesII, tableFeaturesItem, null, nodeIdent);
- }
- }
}
/* Groups - have to be first */
import java.util.Collections;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
-
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.openflowplugin.common.wait.SimpleTaskRetryLooper;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
private ListenerRegistration<TableForwarder> listenerRegistration;
- public TableForwarder (final ForwardingRulesManager manager, final DataBroker db) {
+ public TableForwarder(final ForwardingRulesManager manager, final DataBroker db) {
super(manager, TableFeatures.class);
Preconditions.checkNotNull(db, "DataBroker can not be null!");
final DataTreeIdentifier<TableFeatures> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION, getWildCardPath());
@Override
protected InstanceIdentifier<TableFeatures> getWildCardPath() {
return InstanceIdentifier.create(Nodes.class).child(Node.class)
- .augmentation(FlowCapableNode.class).child(Table.class).child(TableFeatures.class);
+ .augmentation(FlowCapableNode.class).child(TableFeatures.class);
}
@Override
public void remove(final InstanceIdentifier<TableFeatures> identifier, final TableFeatures removeDataObj,
final InstanceIdentifier<FlowCapableNode> nodeIdent) {
- // DO Nothing
+ // DO Nothing
}
@Override
public void update(final InstanceIdentifier<TableFeatures> identifier,
final TableFeatures original, final TableFeatures update,
final InstanceIdentifier<FlowCapableNode> nodeIdent) {
- LOG.debug( "Received the Table Update request [Tbl id, node Id, original, upd" +
- " " + identifier + " " + nodeIdent + " " + original + " " + update );
+ LOG.debug("Received the Table Update request [Tbl id, node Id, original, upd" +
+ " " + identifier + " " + nodeIdent + " " + original + " " + update);
final TableFeatures originalTableFeatures = original;
- TableFeatures updatedTableFeatures ;
- if( null == update)
- updatedTableFeatures = original;
+ TableFeatures updatedTableFeatures;
+ if (null == update)
+ updatedTableFeatures = original;
else
- updatedTableFeatures = update;
+ updatedTableFeatures = update;
final UpdateTableInputBuilder builder = new UpdateTableInputBuilder();
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
- InstanceIdentifier<Table> iiToTable = identifier.firstIdentifierOf(Table.class);
- builder.setTableRef(new TableRef(iiToTable));
+ // TODO: reconsider model - this particular field is not used in service implementation
+ builder.setTableRef(new TableRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setOriginalTable(new OriginalTableBuilder().setTableFeatures(
Collections.singletonList(originalTableFeatures)).build());
- LOG.debug( "Invoking SalTableService " ) ;
+ LOG.debug("Invoking SalTableService ");
- if( this.provider.getSalTableService() != null )
- LOG.debug( " Handle to SalTableServices" + this.provider.getSalTableService()) ;
+ if (this.provider.getSalTableService() != null)
+ LOG.debug(" Handle to SalTableServices" + this.provider.getSalTableService());
this.provider.getSalTableService().updateTable(builder.build());
}
@Override
public void add(final InstanceIdentifier<TableFeatures> identifier, final TableFeatures addDataObj,
final InstanceIdentifier<FlowCapableNode> nodeIdent) {
- //DO NOthing
+ //DO NOthing
}
@Override
*/
package test.mock;
-import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesKey;
+import static org.junit.Assert.assertEquals;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.UpdateTableInput;
-import test.mock.util.EntityOwnershipServiceMock;
-import test.mock.util.SalTableServiceMock;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures;
+import java.util.List;
import org.junit.Test;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.openflowplugin.applications.frm.impl.ForwardingRulesManagerImpl;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.UpdateTableInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import test.mock.util.EntityOwnershipServiceMock;
import test.mock.util.FRMTest;
import test.mock.util.RpcProviderRegistryMock;
-import java.util.List;
-import static org.junit.Assert.assertEquals;
+import test.mock.util.SalTableServiceMock;
public class TableFeaturesListenerTest extends FRMTest {
RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
TableFeatures tableFeaturesData = new TableFeaturesBuilder().setKey(tableFeaturesKey).build();
InstanceIdentifier<TableFeatures> tableFeaturesII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
- .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(TableFeatures.class, tableFeaturesKey);
+ .augmentation(FlowCapableNode.class).child(TableFeatures.class, tableFeaturesKey);
WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
writeTx.put(LogicalDatastoreType.CONFIGURATION, tableFeaturesII, tableFeaturesData);
assertCommit(writeTx.submit());
package org.opendaylight.openflowplugin.applications.inventory.manager;
import com.google.common.base.Preconditions;
+import java.util.List;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.SalTableListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.TableUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.List;
-
/**
* Class receives and processes table feature updates. It augment table feature on table node
* in the inventory tree (node/table/{table-id}).
for (final TableFeatures tableFeatureData : swTablesFeatures) {
final Short tableId = tableFeatureData.getTableId();
final KeyedInstanceIdentifier<TableFeatures, TableFeaturesKey> tableFeaturesII = flowCapableNodeII
- .child(Table.class, new TableKey(tableId))
.child(TableFeatures.class,new TableFeaturesKey(tableId));
LOG.trace("Updating table feature for table {} of node {}", tableId, nodeId.getValue());
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev130715.DottedQuad;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.MacAddressFilter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.arp.match.fields.ArpSourceHardwareAddress;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.arp.match.fields.ArpTargetHardwareAddress;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.arp.match.fields.ArpTargetHardwareAddressBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatch;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.Layer3Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.ArpMatch;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4Match;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4MatchArbitraryBitMask;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv6Match;
}
verdict = MatchComparatorHelper.compareIpv4PrefixNullSafe(ipv4PrefixSource, statsIpv4Match.getIpv4Source());
}
+ } else if (statsLayer3Match instanceof ArpMatch && storedLayer3Match instanceof ArpMatch) {
+ verdict = arpMatchEquals((ArpMatch)statsLayer3Match, (ArpMatch)storedLayer3Match);
} else {
final Boolean nullCheckOut = checkNullValues(storedLayer3Match, statsLayer3Match);
if (nullCheckOut != null) {
return verdict;
}
+ static boolean arpMatchEquals(final ArpMatch statsArpMatch, final ArpMatch storedArpMatch) {
+
+ Integer statsOp = statsArpMatch.getArpOp();
+ Integer storedOp = storedArpMatch.getArpOp();
+
+ Boolean nullCheck = checkNullValues(statsOp, storedOp);
+ if (nullCheck != null) {
+ if (nullCheck == false) {
+ return false;
+ }
+ } else if (!statsOp.equals(storedOp)) {
+ return false;
+ }
+
+ Ipv4Prefix statsIp = statsArpMatch.getArpSourceTransportAddress();
+ Ipv4Prefix storedIp = storedArpMatch.getArpSourceTransportAddress();
+ if (!compareIpv4PrefixNullSafe(statsIp, storedIp)) {
+ return false;
+ }
+
+ statsIp = statsArpMatch.getArpTargetTransportAddress();
+ storedIp = storedArpMatch.getArpTargetTransportAddress();
+ if (!compareIpv4PrefixNullSafe(statsIp, storedIp)) {
+ return false;
+ }
+
+ MacAddressFilter statsMac = statsArpMatch.getArpSourceHardwareAddress();
+ MacAddressFilter storedMac = storedArpMatch.getArpSourceHardwareAddress();
+ if (!ethernetMatchFieldsEquals(statsMac, storedMac)) {
+ return false;
+ }
+
+ statsMac = statsArpMatch.getArpTargetHardwareAddress();
+ storedMac = storedArpMatch.getArpTargetHardwareAddress();
+ if (!ethernetMatchFieldsEquals(statsMac, storedMac)) {
+ return false;
+ }
+
+ return true;
+ }
+
/**
* TODO: why don't we use the default Ipv4Prefix.equals()?
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
- <groupId>org.opendaylight.openflowplugin</groupId>
- <artifactId>openflowplugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
- <relativePath>../../parent</relativePath>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>karaf-parent</artifactId>
+ <version>1.7.0-SNAPSHOT</version>
+ <relativePath></relativePath>
</parent>
+ <groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-karaf</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>pom</packaging>
<prerequisites>
<maven>3.0</maven>
</prerequisites>
+
<properties>
- <branding.version>1.3.0-SNAPSHOT</branding.version>
- <karaf.resources.version>1.7.0-SNAPSHOT</karaf.resources.version>
+ <openflowplugin.version>0.3.0-SNAPSHOT</openflowplugin.version>
</properties>
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>openflowplugin-artifacts</artifactId>
+ <version>${openflowplugin.version}</version>
+ <scope>import</scope>
+ <type>pom</type>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+
<dependencies>
<dependency>
<!-- scope is compile so all features (there is only one) are installed
into startup.properties and the feature repo itself is not installed -->
<groupId>org.apache.karaf.features</groupId>
<artifactId>framework</artifactId>
- <version>${karaf.version}</version>
<type>kar</type>
</dependency>
- <!-- scope is runtime so the feature repo is listed in the features
- service config file, and features may be installed using the
- karaf-maven-plugin configuration -->
- <dependency>
- <groupId>org.apache.karaf.features</groupId>
- <artifactId>standard</artifactId>
- <version>${karaf.version}</version>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
-
- <!-- ODL Branding -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>karaf.branding</artifactId>
- <version>${branding.version}</version>
- <scope>compile</scope>
- </dependency>
-
- <!-- Resources needed -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>opendaylight-karaf-resources</artifactId>
- <version>${karaf.resources.version}</version>
- </dependency>
-
<!-- openflowplugin feature -->
<dependency>
<artifactId>features-openflowplugin</artifactId>
<type>xml</type>
<scope>runtime</scope>
</dependency>
- <!-- MD-SAL Related Features -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>features-mdsal</artifactId>
- <version>${mdsal.version}</version>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
<!-- openflowplugin extension feature -->
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
</dependencies>
<build>
- <pluginManagement>
- <plugins>
- <plugin>
- <groupId>org.eclipse.m2e</groupId>
- <artifactId>lifecycle-mapping</artifactId>
- <version>1.0.0</version>
- <configuration>
- <lifecycleMappingMetadata>
- <pluginExecutions>
- <pluginExecution>
- <pluginExecutionFilter>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <versionRange>[0,)</versionRange>
- <goals>
- <goal>cleanVersions</goal>
- </goals>
- </pluginExecutionFilter>
- <action>
- <ignore></ignore>
- </action>
- </pluginExecution>
- <pluginExecution>
- <pluginExecutionFilter>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-dependency-plugin</artifactId>
- <versionRange>[0,)</versionRange>
- <goals>
- <goal>copy</goal>
- <goal>unpack</goal>
- </goals>
- </pluginExecutionFilter>
- <action>
- <ignore></ignore>
- </action>
- </pluginExecution>
- <pluginExecution>
- <pluginExecutionFilter>
- <groupId>org.apache.karaf.tooling</groupId>
- <artifactId>karaf-maven-plugin</artifactId>
- <versionRange>[0,)</versionRange>
- <goals>
- <goal>commands-generate-help</goal>
- </goals>
- </pluginExecutionFilter>
- <action>
- <ignore></ignore>
- </action>
- </pluginExecution>
- <pluginExecution>
- <pluginExecutionFilter>
- <groupId>org.fusesource.scalate</groupId>
- <artifactId>maven-scalate-plugin</artifactId>
- <versionRange>[0,)</versionRange>
- <goals>
- <goal>sitegen</goal>
- </goals>
- </pluginExecutionFilter>
- <action>
- <ignore></ignore>
- </action>
- </pluginExecution>
- <pluginExecution>
- <pluginExecutionFilter>
- <groupId>org.apache.servicemix.tooling</groupId>
- <artifactId>depends-maven-plugin</artifactId>
- <versionRange>[0,)</versionRange>
- <goals>
- <goal>generate-depends-file</goal>
- </goals>
- </pluginExecutionFilter>
- <action>
- <ignore></ignore>
- </action>
- </pluginExecution>
- </pluginExecutions>
- </lifecycleMappingMetadata>
- </configuration>
- </plugin>
- </plugins>
- </pluginManagement>
<plugins>
- <plugin>
- <groupId>org.apache.karaf.tooling</groupId>
- <artifactId>karaf-maven-plugin</artifactId>
- <extensions>true</extensions>
- <configuration>
- <!-- no startupFeatures -->
- <bootFeatures>
- <feature>standard</feature>
- </bootFeatures>
- <!-- no installedFeatures -->
- </configuration>
- <executions>
- <execution>
- <id>process-resources</id>
- <goals>
- <goal>install-kars</goal>
- </goals>
- <phase>process-resources</phase>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-checkstyle-plugin</artifactId>
- <version>${checkstyle.version}</version>
- <configuration>
- <excludes>**\/target\/,**\/bin\/,**\/target-ide\/,**\/configuration\/initial\/</excludes>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-dependency-plugin</artifactId>
- <version>2.6</version>
- <executions>
- <execution>
- <id>copy</id>
- <goals>
- <goal>copy</goal>
- </goals>
- <!-- here the phase you need -->
- <phase>generate-resources</phase>
- <configuration>
- <artifactItems>
- <artifactItem>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>karaf.branding</artifactId>
- <version>${karaf.branding.version}</version>
- <outputDirectory>target/assembly/lib</outputDirectory>
- <destFileName>karaf.branding-${branding.version}.jar</destFileName>
- </artifactItem>
- </artifactItems>
- </configuration>
- </execution>
- <execution>
- <id>unpack-karaf-resources</id>
- <goals>
- <goal>unpack-dependencies</goal>
- </goals>
- <phase>prepare-package</phase>
- <configuration>
- <outputDirectory>${project.build.directory}/assembly</outputDirectory>
- <groupId>org.opendaylight.controller</groupId>
- <includeArtifactIds>opendaylight-karaf-resources</includeArtifactIds>
- <excludes>META-INF\/**</excludes>
- <excludeTransitive>true</excludeTransitive>
- <ignorePermissions>false</ignorePermissions>
- </configuration>
- </execution>
- <execution>
- <id>copy-dependencies</id>
- <phase>prepare-package</phase>
- <goals>
- <goal>copy-dependencies</goal>
- </goals>
- <configuration>
- <outputDirectory>${project.build.directory}/assembly/system</outputDirectory>
- <overWriteReleases>false</overWriteReleases>
- <overWriteSnapshots>true</overWriteSnapshots>
- <overWriteIfNewer>true</overWriteIfNewer>
- <useRepositoryLayout>true</useRepositoryLayout>
- <addParentPoms>true</addParentPoms>
- <copyPom>true</copyPom>
- </configuration>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-antrun-plugin</artifactId>
- <executions>
- <execution>
- <phase>prepare-package</phase>
- <goals>
- <goal>run</goal>
- </goals>
- <configuration>
- <tasks>
- <chmod perm="755">
- <fileset dir="${project.build.directory}/assembly/bin">
- <include name="karaf"/>
- <include name="instance"/>
- </fileset>
- </chmod>
- </tasks>
- </configuration>
- </execution>
- </executions>
- </plugin>
-
<!-- DO NOT deploy the karaf artifact -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
</plugins>
</build>
- <profiles>
- <profile>
- <id>create-karaf-archive</id>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.karaf.tooling</groupId>
- <artifactId>karaf-maven-plugin</artifactId>
- <extensions>true</extensions>
- <executions>
- <execution>
- <id>package</id>
- <goals>
- <goal>instance-create-archive</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
- </profile>
- </profiles>
-
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
--- /dev/null
+module barrier-common {
+ namespace "urn:opendaylight:service:barrier:common";
+ prefix barrier-common;
+
+ description "Openflow barrier for services - common groupings.";
+
+ revision "2016-03-15" {
+ description "Initial revision of batch common groupings.";
+ }
+
+ grouping barrier-suffix {
+ description "Flag indicating that barrier will be attached after some service-specific action.";
+
+ leaf barrier-after {
+ type boolean;
+ }
+ }
+}
--- /dev/null
+module batch-common {
+ namespace "urn:opendaylight:service:batch:common";
+ prefix batch-common;
+
+ description "Openflow batch services - common groupings.";
+
+ revision "2016-03-22" {
+ description "Initial revision of batch common groupings.";
+ }
+
+ grouping batch-order-grouping {
+ description "provide unified batch order value";
+ leaf batch-order {
+ type uint16;
+ }
+ }
+}
import yang-ext {prefix ext; revision-date "2013-07-09";}
import ietf-inet-types {prefix inet; revision-date "2010-09-24";}
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
import opendaylight-port-types {prefix port;revision-date "2013-09-25";}
import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
import opendaylight-table-types {prefix table;revision-date "2013-10-26";}
import opendaylight-flow-types {prefix flow;revision-date "2013-10-26";}
import opendaylight-group-types {prefix group;revision-date "2013-10-18";}
import opendaylight-meter-types {prefix meter;revision-date "2013-09-18";}
-
+
description "Flow Capable Node extensions to the Inventory model";
revision "2013-08-19" {
description "added descriptions";
}
-
+
identity feature-capability {
}
-
+
identity flow-feature-capability-flow-stats {
- description "Flow statistics";
- base feature-capability;
+ description "Flow statistics";
+ base feature-capability;
}
-
+
identity flow-feature-capability-table-stats {
description "Table statistics";
- base feature-capability;
+ base feature-capability;
}
-
+
identity flow-feature-capability-port-stats {
description "Port statistics";
- base feature-capability;
+ base feature-capability;
}
-
+
identity flow-feature-capability-stp {
description "802.1d spanning tree";
- base feature-capability;
+ base feature-capability;
}
-
+
identity flow-feature-capability-reserved {
description "Reserved, must be zero";
- base feature-capability;
+ base feature-capability;
}
-
+
identity flow-feature-capability-ip-reasm {
description "Can reassemble IP fragments";
- base feature-capability;
+ base feature-capability;
}
-
+
identity flow-feature-capability-queue-stats {
description "Queue statistics";
- base feature-capability;
+ base feature-capability;
}
-
+
identity flow-feature-capability-arp-match-ip {
description "Match IP addresses in ARP pkts";
- base feature-capability;
+ base feature-capability;
}
-
+
identity flow-feature-capability-group-stats {
description "Group statistics";
- base feature-capability;
+ base feature-capability;
}
-
+
identity flow-feature-capability-port-blocked {
description "Switch will block looping ports";
- base feature-capability;
+ base feature-capability;
}
-
+
grouping feature {
description "Features supported by openflow device.";
leaf support-state {
leaf queue-id {
type uint32;
description "id for the specific queue";
- mandatory true;
+ mandatory true;
}
container properties {
leaf minimum-rate {
grouping tables {
description "Openflow table structure. Here flows are contained.";
list table {
- key "id";
-
+ key "id";
+
leaf id {
type uint8;
- }
-
- uses table:table-features;
-
+ }
+
list flow {
- key "id";
-
+ key "id";
+
leaf id {
type flow-id;
- }
-
+ }
+
uses flow:flow;
}
// BE-RECON: Modification for including stale-flow for Reconciliation
}
}
}
-
+
grouping meters {
description "Openflow meter list.";
list meter {
}
uses tables;
+ uses table:table-features;
uses group:groups;
uses meters;
uses ip-address-grouping;
leaf match {
type string; // FIXME: Add identity
}
-
+
}
}
-
+
container supported-instructions {
list instruction-type {
key "instruction";
}
}
}
-
+
container switch-features {
-
+
leaf max_buffers {
type uint32;
}
-
+
leaf max_tables {
type uint8;
}
-
+
leaf-list capabilities {
type identityref {
base feature-capability;
}
}
-
+
}
}
uses port:flow-capable-port;
}
+ grouping snapshot-gathering-status-grouping {
+ description "Basic info about snapshot gathering - timestamps of begin, end.";
+
+ container snapshot-gathering-status-start {
+ description "gathering start mark";
+ leaf begin {
+ type yang:date-and-time;
+ }
+ }
+
+ container snapshot-gathering-status-end {
+ description "gathering end mark + result";
+ leaf end {
+ type yang:date-and-time;
+ }
+ leaf succeeded {
+ type boolean;
+ }
+ }
+ }
+
augment "/inv:nodes/inv:node" {
ext:augment-identifier "flow-capable-node";
description "Top attach point of openflow node into node inventory tree.";
description "Openflow port into node notification.";
uses flow-node-connector;
}
-
+
augment "/inv:node-connector-updated" {
ext:augment-identifier "flow-capable-node-connector-updated";
description "Openflow port into node-connector notification.";
}
}
}
+
+ augment "/inv:nodes/inv:node" {
+ ext:augment-identifier "flow-capable-statistics-gathering-status";
+ description "Placeholder for timestamp of device status snapshot.
+ This is contructed by asynchronous process.";
+ uses snapshot-gathering-status-grouping;
+ }
}
--- /dev/null
+module sal-flat-batch {
+ namespace "urn:opendaylight:flat-batch:service";
+ prefix fbatch;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import batch-common {prefix batch;revision-date "2016-03-22";}
+ import sal-flows-batch {prefix f-batch;revision-date "2016-03-14";}
+ import flow-node-inventory {prefix flow-inv; revision-date "2013-08-19";}
+ import sal-groups-batch {prefix g-batch;revision-date "2016-03-15";}
+ import opendaylight-group-types {prefix group-type;revision-date "2013-10-18";}
+ import sal-meters-batch {prefix m-batch;revision-date "2016-03-16";}
+ import opendaylight-meter-types {prefix meter-type;revision-date "2013-09-18";}
+
+ description "Openflow batch flow management.";
+
+ revision "2016-03-21" {
+ description "Initial revision of batch flat service.";
+ }
+
+
+ rpc process-flat-batch {
+ description "Process add/update/remove of items in batch towards openflow device.";
+ input {
+ uses "inv:node-context-ref";
+
+ list batch {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ choice batch-choice {
+ // filled via augmentations
+ }
+ }
+ leaf exit-on-first-error {
+ description "If true then batch will execute all steps and report list of occurred errors,
+ otherwise there will be only first error reported and execution will be stop right there.";
+ type boolean;
+ }
+ }
+ output {
+ list batch-failure {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ choice batch-item-id-choice {
+ // filled via augmentations
+ }
+ }
+ }
+ }
+
+ augment "/process-flat-batch/input/batch/batch-choice" {
+ ext:augment-identifier "flat-batch-flow-crud-case-aug";
+ description "Openflow add/remove/update flow operation.";
+
+ case flat-batch-add-flow-case {
+ list flat-batch-add-flow {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ uses f-batch:batch-flow-input-grouping;
+ }
+ }
+ case flat-batch-remove-flow-case {
+ list flat-batch-remove-flow {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ uses f-batch:batch-flow-input-grouping;
+ }
+ }
+ case flat-batch-update-flow-case {
+ list flat-batch-update-flow {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ uses f-batch:batch-flow-input-update-grouping;
+ }
+ }
+ }
+
+ augment "/process-flat-batch/input/batch/batch-choice" {
+ ext:augment-identifier "flat-batch-group-crud-case-aug";
+ description "Openflow add/remove/update group operation.";
+
+ case flat-batch-add-group-case {
+ list flat-batch-add-group {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ uses group-type:group;
+ }
+ }
+ case flat-batch-remove-group-case {
+ list flat-batch-remove-group {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ uses group-type:group;
+ }
+ }
+ case flat-batch-update-group-case {
+ list flat-batch-update-group {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ uses g-batch:batch-group-input-update-grouping;
+ }
+ }
+ }
+
+ augment "/process-flat-batch/input/batch/batch-choice" {
+ ext:augment-identifier "flat-batch-meter-crud-case-aug";
+ description "Openflow add/remove/update meter operation.";
+
+ case flat-batch-add-meter-case {
+ list flat-batch-add-meter {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ uses meter-type:meter;
+ }
+ }
+ case flat-batch-remove-meter-case {
+ list flat-batch-remove-meter {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ uses meter-type:meter;
+ }
+ }
+ case flat-batch-update-meter-case {
+ list flat-batch-update-meter {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ uses m-batch:batch-meter-input-update-grouping;
+ }
+ }
+ }
+
+ augment "/process-flat-batch/output/batch-failure/batch-item-id-choice" {
+ ext:augment-identifier "flat-batch-failure-ids-aug";
+ description "Openflow flat batch failures - corresponding item id.";
+
+ case flat-batch-failure-flow-id-case {
+ description "case for flow-id";
+ leaf flow-id {
+ type flow-inv:flow-id;
+ }
+ }
+ case flat-batch-failure-group-id-case {
+ description "case for group-id";
+ leaf group-id {
+ type group-type:group-id;
+ }
+ }
+ case flat-batch-failure-meter-id-case {
+ description "case for meter-id";
+ leaf meter-id {
+ type meter-type:meter-id;
+ }
+ }
+ }
+}
--- /dev/null
+module sal-flows-batch {
+ namespace "urn:opendaylight:flows:service";
+ prefix flows;
+
+ import barrier-common {prefix bc;revision-date "2016-03-15";}
+ import batch-common {prefix batch;revision-date "2016-03-22";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-flow-types {prefix types;revision-date "2013-10-26";}
+ import flow-node-inventory {prefix flow-inv; revision-date "2013-08-19";}
+
+ description "Openflow batch flow management.";
+
+ revision "2016-03-14" {
+ description "Initial revision of batch flow service";
+ }
+
+ grouping batch-flow-id-grouping {
+ description "General flow-id leaf.";
+
+ leaf flow-id {
+ type flow-inv:flow-id;
+ }
+ }
+
+ grouping batch-flow-input-grouping {
+ description "Openflow flow structure suitable for batch rpc input.";
+
+ uses batch-flow-id-grouping;
+ uses types:flow;
+ }
+
+ grouping batch-flow-input-update-grouping {
+ description "Openflow flow structure suitable for batch rpc input.";
+
+ uses batch-flow-id-grouping;
+ container original-batched-flow {
+ uses types:flow;
+ }
+ container updated-batched-flow {
+ uses types:flow;
+ }
+ }
+
+ grouping batch-flow-output-list-grouping {
+ description "Openflow flow list suitable for batch rpc output.";
+
+ list batch-failed-flows-output {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ uses batch-flow-id-grouping;
+ }
+ }
+
+
+ rpc add-flows-batch {
+ description "Batch adding flows to openflow device.";
+ input {
+ uses "inv:node-context-ref";
+
+ list batch-add-flows {
+ key flow-id;
+ uses batch-flow-input-grouping;
+ }
+ uses bc:barrier-suffix;
+ }
+ output {
+ uses batch-flow-output-list-grouping;
+ }
+ }
+
+ rpc remove-flows-batch {
+ description "Batch removing flows from openflow device.";
+ input {
+ uses "inv:node-context-ref";
+
+ list batch-remove-flows {
+ key flow-id;
+ uses batch-flow-input-grouping;
+ }
+ uses bc:barrier-suffix;
+ }
+ output {
+ uses batch-flow-output-list-grouping;
+ }
+ }
+
+ rpc update-flows-batch {
+ description "Batch updating flows on openflow device.";
+ input {
+ uses "inv:node-context-ref";
+
+ list batch-update-flows {
+ key flow-id;
+ uses batch-flow-input-update-grouping;
+ }
+ uses bc:barrier-suffix;
+ }
+ output {
+ uses batch-flow-output-list-grouping;
+ }
+ }
+}
--- /dev/null
+module sal-groups-batch {
+ namespace "urn:opendaylight:groups:service";
+ prefix groups;
+
+ import barrier-common {prefix bc;revision-date "2016-03-15";}
+ import batch-common {prefix batch;revision-date "2016-03-22";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-group-types {prefix group-type;revision-date "2013-10-18";}
+
+ description "Openflow batch group management.";
+
+ revision "2016-03-15" {
+ description "Initial revision of batch group service";
+ }
+
+ grouping batch-group-input-update-grouping {
+ description "Openflow group structure for group batch update rpc.";
+
+ // group-id is included in group-type:group
+ container original-batched-group {
+ uses group-type:group;
+ }
+ container updated-batched-group {
+ uses group-type:group;
+ }
+ }
+
+ grouping batch-group-output-list-grouping {
+ description "Openflow group list suitable for batch rpc output.";
+
+ list batch-failed-groups-output {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ leaf group-id {
+ type group-type:group-id;
+ }
+ }
+ }
+
+
+ rpc add-groups-batch {
+ description "Batch adding groups to openflow device.";
+ input {
+ uses "inv:node-context-ref";
+
+ list batch-add-groups {
+ key group-id;
+ uses group-type:group;
+ }
+ uses bc:barrier-suffix;
+ }
+ output {
+ uses batch-group-output-list-grouping;
+ }
+ }
+
+ rpc remove-groups-batch {
+ description "Batch removing groups from openflow device.";
+ input {
+ uses "inv:node-context-ref";
+
+ list batch-remove-groups {
+ key group-id;
+ uses group-type:group;
+ }
+ uses bc:barrier-suffix;
+ }
+ output {
+ uses batch-group-output-list-grouping;
+ }
+ }
+
+ rpc update-groups-batch {
+ description "Batch updating groups on openflow device.";
+ input {
+ uses "inv:node-context-ref";
+
+ list batch-update-groups {
+ // key group-id;
+ uses batch-group-input-update-grouping;
+ }
+ uses bc:barrier-suffix;
+ }
+ output {
+ uses batch-group-output-list-grouping;
+ }
+ }
+}
--- /dev/null
+module sal-meters-batch {
+ namespace "urn:opendaylight:meters:service";
+ prefix meters;
+
+ import barrier-common {prefix bc;revision-date "2016-03-15";}
+ import batch-common {prefix batch;revision-date "2016-03-22";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-meter-types {prefix meter-type;revision-date "2013-09-18";}
+
+ description "Openflow batch meter management.";
+
+ revision "2016-03-16" {
+ description "Initial revision of meter batch service";
+ }
+
+ grouping batch-meter-input-update-grouping {
+ description "Update openflow meter structure suitable for batch rpc input.";
+
+ // meter-id is included in meter-type:meter
+ container original-batched-meter {
+ uses meter-type:meter;
+ }
+ container updated-batched-meter {
+ uses meter-type:meter;
+ }
+ }
+
+ grouping batch-meter-output-list-grouping {
+ description "Openflow meter list suitable for batch rpc output.";
+
+ list batch-failed-meters-output {
+ key batch-order;
+
+ uses batch:batch-order-grouping;
+ leaf meter-id {
+ type meter-type:meter-id;
+ }
+ }
+ }
+
+ rpc add-meters-batch {
+ description "Adding batch meters to openflow device.";
+ input {
+ uses "inv:node-context-ref";
+
+ list batch-add-meters {
+ key meter-id;
+
+ leaf meter-ref {
+ type meter-type:meter-ref;
+ }
+ uses meter-type:meter;
+ }
+ uses bc:barrier-suffix;
+ }
+ output {
+ uses batch-meter-output-list-grouping;
+ }
+ }
+
+ rpc remove-meters-batch {
+ description "Removing batch meter from openflow device.";
+ input {
+ uses "inv:node-context-ref";
+
+ list batch-remove-meters {
+ key meter-id;
+
+ leaf meter-ref {
+ type meter-type:meter-ref;
+ }
+ uses meter-type:meter;
+ }
+ uses bc:barrier-suffix;
+ }
+ output {
+ uses batch-meter-output-list-grouping;
+ }
+ }
+
+ rpc update-meters-batch {
+ description "Updating batch meter on openflow device.";
+ input {
+ uses "inv:node-context-ref";
+ list batch-update-meters {
+ leaf meter-ref {
+ type meter-type:meter-ref;
+ }
+ uses batch-meter-input-update-grouping;
+ }
+ uses bc:barrier-suffix;
+ }
+ output {
+ uses batch-meter-output-list-grouping;
+ }
+ }
+}
revision "2013-12-15" {
description "Initial revision of flow table statistics model";
}
-
+
augment "/inv:nodes/inv:node/flow-node:table" {
description "Openflow flow table statistics data into the table node.";
ext:augment-identifier "flow-table-statistics-data";
uses flow-table-statistics;
}
-
- augment "/inv:nodes/inv:node/flow-node:table" {
- description "Openflow flow table features data into the table node.";
- ext:augment-identifier "node-table-features";
- container table-feature-container {
- uses table-types:table-features;
- }
- }
-
+
grouping flow-table-statistics {
description "TODO:: simplify.";
container flow-table-statistics {
uses stat-types:generic-table-statistics;
}
- }
-
+ }
+
grouping flow-table-and-statistics-map {
status deprecated;
description "RPC calls to fetch flow table statistics.";
uses stat-types:generic-table-statistics;
}
}
-
+
rpc get-flow-tables-statistics {
status deprecated;
description "Fetch statistics of all the flow tables present on the tarnet node";
uses tr:transaction-aware;
}
}
-
+
//Notification to receive table statistics update
-
+
notification flow-table-statistics-update {
status deprecated;
description "Receive flow table statistics update";
-
+
uses inv:node;
uses flow-table-and-statistics-map;
uses tr:multipart-transaction-aware;
import org.opendaylight.openflowplugin.api.openflow.OpenFlowPluginTimer;
import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceReplyProcessor;
-import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.MultiMsgCollector;
import org.opendaylight.openflowplugin.api.openflow.registry.ItemLifeCycleRegistry;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.DeviceFlowRegistry;
* Created by Martin Bobak <mbobak@cisco.com> on 25.2.2015.
*/
public interface DeviceContext extends AutoCloseable,
- OpenFlowPluginTimer,
DeviceReplyProcessor,
- PortNumberCache {
+ PortNumberCache,
+ XidSequencer {
+
+ void setStatisticsRpcEnabled(boolean isStatisticsRpcEnabled);
/**
* distinguished device context states
/**
* Method creates put operation using provided data in underlying transaction chain.
*/
- <T extends DataObject> void writeToTransaction(final LogicalDatastoreType store, final InstanceIdentifier<T> path, final T data);
+ <T extends DataObject> void writeToTransaction(final LogicalDatastoreType store, final InstanceIdentifier<T> path, final T data) throws Exception;
/**
* Method creates delete operation for provided path in underlying transaction chain.
*/
- <T extends DataObject> void addDeleteToTxChain(final LogicalDatastoreType store, final InstanceIdentifier<T> path);
+ <T extends DataObject> void addDeleteToTxChain(final LogicalDatastoreType store, final InstanceIdentifier<T> path) throws Exception;
/**
* Method submits Transaction to DataStore.
*/
Timeout getBarrierTaskTimeout();
- /**
- * Sets notification service
- *
- * @param notificationService
- */
- void setNotificationService(NotificationService notificationService);
-
void setNotificationPublishService(NotificationPublishService notificationPublishService);
MessageSpy getMessageSpy();
- /**
- * Method sets reference to handler used for cleanup after device context about to be closed.
- */
- void addDeviceContextClosedHandler(DeviceTerminationPhaseHandler deviceContextClosedHandler);
-
MultiMsgCollector getMultiMsgCollector(final RequestContext<List<MultipartReply>> requestContext);
- /**
- * Method is reserved unique XID for Device Message.
- * Attention: OFJava expect the message, otherwise OutboundQueue could stop working.
- * @return Reserved XID
- */
- Long reservedXidForDeviceMessage();
-
/**
* indicates that device context is fully published (e.g.: packetIn messages should be passed)
*/
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceLifecycleSupervisor;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.translator.TranslatorLibrarian;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
/**
* This interface is responsible for instantiating DeviceContext and
public interface DeviceManager extends DeviceConnectedHandler, DeviceDisconnectedHandler, DeviceLifecycleSupervisor,
DeviceInitializationPhaseHandler, DeviceTerminationPhaseHandler, TranslatorLibrarian, AutoCloseable {
- /**
- * Sets notification receiving service
- *
- * @param notificationService
- */
- void setNotificationService(NotificationService notificationService);
-
/**
* Sets notification publish service
*
* invoked after all services injected
*/
void initialize();
+
+ /**
+ * Returning device context from map maintained in device manager
+ * This prevent to send whole device context to another context
+ * If device context not exists for nodeId it will return null
+ * @param nodeId
+ * @return device context or null
+ */
+ DeviceContext getDeviceContextFromNodeId(NodeId nodeId);
+
+ void setStatisticsRpcEnabled(boolean isStatisticsRpcEnabled);
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.api.openflow.device;
+
+/**
+ * Reserves unique XID for Device Messages.
+ */
+public interface XidSequencer {
+
+ /**
+ * Method reserve unique XID for Device Message.
+ * Attention: OFJava expect the message, otherwise OutboundQueue could stop working.
+ * @return Reserved XID
+ */
+ Long reserveXidForDeviceMessage();
+}
* Method is used to propagate information about established connection with device.
* It propagates connected device's connection context.
*/
- void deviceConnected(ConnectionContext connectionContext) throws Exception;
+ boolean deviceConnected(ConnectionContext connectionContext) throws Exception;
}
import javax.annotation.CheckForNull;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
/**
* openflowplugin-api
/**
* Method represents an initialization cycle for {@link DeviceContext} preparation for use.
*
- * @param deviceContext
+ * @param nodeId
* @throws Exception - needs to be catch in ConnectionHandler implementation
*/
- void onDeviceContextLevelUp(@CheckForNull DeviceContext deviceContext) throws Exception;
+ void onDeviceContextLevelUp(@CheckForNull NodeId nodeId) throws Exception;
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.api.openflow.lifecycle;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
+
+/**
+ * This API is for all listeners who wish to know about device context in cluster
+ */
+public interface DeviceContextChangeListener {
+
+ /**
+ * Notification about start phase in device context, right after successful handshake
+ * @param nodeId
+ * @param success or failure
+ */
+ void deviceStartInitializationDone(final NodeId nodeId, final boolean success);
+
+ /**
+ * Notification about start phase in device context, after all other contexts initialized properly
+ * @param nodeId
+ * @param success
+ */
+ void deviceInitializationDone(final NodeId nodeId, final boolean success);
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.api.openflow.lifecycle;
+
+import io.netty.util.Timeout;
+import io.netty.util.TimerTask;
+import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
+import org.opendaylight.openflowplugin.api.openflow.device.DeviceManager;
+import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.MessageIntelligenceAgency;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+
+import javax.annotation.Nonnull;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This class is a binder between all managers
+ * Should be defined in OpenFlowPluginProviderImpl
+ */
+public interface LifecycleConductor {
+
+ /**
+ * Returns device context from device manager device contexts maps
+ * @param nodeId node identification
+ * @return null if context doesn't exists
+ */
+ DeviceContext getDeviceContext(final NodeId nodeId);
+
+ /**
+ * Registers ont time listener for notify when services rpc, statistics are done stop or start
+ * @param manager service change listener
+ * @param nodeId node identification
+ */
+ void addOneTimeListenerWhenServicesChangesDone(final ServiceChangeListener manager, final NodeId nodeId);
+
+ /**
+ * Returns device of version
+ * @param nodeId node identification
+ * @return null if device context doesn't exists
+ */
+ Short gainVersionSafely(final NodeId nodeId);
+
+ /**
+ * Set new timeout for {@link io.netty.util.HashedWheelTimer}
+ * @param task timer task
+ * @param delay delay
+ * @param unit time unit
+ * @return new timeout
+ */
+ Timeout newTimeout(@Nonnull TimerTask task, long delay, @Nonnull TimeUnit unit);
+
+ /**
+ * Returns message intelligence agency
+ * @return MessageIntelligenceAgency set by constructor
+ */
+ MessageIntelligenceAgency getMessageIntelligenceAgency();
+
+ /**
+ * Interrupt connection for the node
+ * @param nodeId node identification
+ */
+ void closeConnection(final NodeId nodeId);
+
+ /**
+ * Setter for device manager once set it cant be unset or overwritten
+ * @param deviceManager should be set in OpenFlowPluginProviderImpl
+ */
+ void setSafelyDeviceManager(final DeviceManager deviceManager);
+
+ /**
+ * Xid from outboundqueue
+ * @param nodeId
+ * @return
+ */
+ Long reserveXidForDeviceMessage(final NodeId nodeId);
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.api.openflow.lifecycle;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
+
+/**
+ * This API is for all listeners who wish to know about role change in cluster
+ */
+public interface RoleChangeListener {
+
+ /**
+ * Notification when initialization for role context is done
+ * @param nodeId
+ * @param success or failure
+ */
+ void roleInitializationDone(final NodeId nodeId, final boolean success);
+
+ /**
+ * Notification when the role change on device is done
+ * @param nodeId
+ * @param success
+ * @param newRole
+ * @param initializationPhase
+ */
+ void roleChangeOnDevice(final NodeId nodeId, final boolean success, final OfpRole newRole, final boolean initializationPhase);
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.api.openflow.lifecycle;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+
+/**
+ * This API is defined for listening when services (Statistics and RPCs) are fully stopped
+ * or fully started. Role manager use it for unregister tx entity on shutdown when all is stopped.
+ */
+public interface ServiceChangeListener {
+
+ /**
+ * Notification when services (rpc, statistics) are started or stopped working
+ * @param nodeId
+ * @param success
+ */
+ void servicesChangeDone(NodeId nodeId, boolean success);
+
+}
+++ /dev/null
-/**
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.openflowplugin.api.openflow.role;
-
-import org.opendaylight.controller.md.sal.common.api.clustering.Entity;
-import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
-
-/**
- * Double candidate aproach brings protected role change for all node cluster instances.
- *
- * Created by kramesha on 9/19/15.
- */
-public interface RoleChangeListener extends AutoCloseable {
-
- /**
- * Method has to be called from MainCandidate Leaderhip notification {@link OfpRole#BECOMEMASTER}
- * It locks any MainCandidate changes and it registrates TxCandidate to Cluster.
- */
- void onDeviceTryToTakeClusterLeadership();
-
- /**
- * Method has to be called from TxCandidate Leadership notification {@link OfpRole#BECOMEMASTER}
- * and propagate {@link OfpRole#BECOMEMASTER} to device. When device accepts new role, it has to
- * notifies whole DeviceContext suite to take Leadership responsibility
- */
- void onDeviceTakeClusterLeadership();
-
- /**
- * Method has to be called from MainCandidate Leadership notification {@link OfpRole#BECOMESLAVE}
- * It locks any MainCandidate and TxCandidate changes and it starts propagate LostClusterLeadership
- * to Device and whole DeviceContext suite.
- */
- void onDeviceLostClusterLeadership();
-
- /**
- * We need to know when the candidate is registrated or in close process
- * @return true/false
- */
- boolean isMainCandidateRegistered();
-
- /**
- * We need to know when the candidate is registrated or in close process
- * @return true/false
- */
- boolean isTxCandidateRegistered();
-
- Entity getEntity();
-
- Entity getTxEntity();
-
- DeviceState getDeviceState();
-
- @Override
- void close();
-}
*/
package org.opendaylight.openflowplugin.api.openflow.role;
-import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
+import org.opendaylight.controller.md.sal.common.api.clustering.Entity;
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SalRoleService;
+
+import javax.annotation.Nonnull;
/**
- * Created by kramesha on 9/12/15.
+ * Rewrote whole role context to prevent errors to change role on cluster
*/
-public interface RoleContext extends RoleChangeListener, RequestContextStack {
+public interface RoleContext extends RequestContextStack, AutoCloseable {
/**
* Initialization method is responsible for a registration of
* {@link org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService}
* returns Role which has to be applied for responsible Device Context suite. Any Exception
* state has to close Device connection channel.
+ * @return true if initialization done ok
*/
- void initializationRoleContext();
+ boolean initialization();
/**
- * Termination method is responsible for an unregistrion of
+ * Termination method is responsible for an unregistration of
* {@link org.opendaylight.controller.md.sal.common.api.clustering.Entity} and listener
* for notification from service
* {@link org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService}
* returns notification "Someone else take Leadership" or "I'm last"
* and we need to clean Oper. DS.
*/
- void terminationRoleContext();
+ void unregisterAllCandidates();
- @Override
- void close();
+ /**
+ * Setter for sal role service
+ * @param salRoleService
+ */
+ void setSalRoleService(@Nonnull final SalRoleService salRoleService);
+
+ /**
+ * Getter for sal role service
+ * @return
+ */
+ SalRoleService getSalRoleService();
+
+ /**
+ * Getter for main entity
+ * @return
+ */
+ Entity getEntity();
+
+ /**
+ * Getter for tx entity
+ * @return
+ */
+ Entity getTxEntity();
+
+ /**
+ * Actual nodeId
+ * @return
+ */
+ NodeId getNodeId();
+
+ /**
+ * Returns true if main entity is registered
+ * @return
+ */
+ boolean isMainCandidateRegistered();
+
+ /**
+ * Returns true if tx entity is registered
+ * @return
+ */
+ boolean isTxCandidateRegistered();
+
+ /**
+ * Register candidate depending on parameter
+ * @param entity
+ * @return true is registration was successful
+ */
+ boolean registerCandidate(final Entity entity);
- DeviceContext getDeviceContext();
+ /**
+ * Unregister candidate depending on parameter
+ * @param entity
+ * @return true is registration was successful
+ */
+ boolean unregisterCandidate(final Entity entity);
- OfpRole getClusterRole();
+ /**
+ * Returns true if we hold both registrations
+ * @return
+ */
+ boolean isMaster();
+ @Override
+ void close();
}
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceLifecycleSupervisor;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.RoleChangeListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
/**
* Created by kramesha on 8/31/15.
*/
public interface RoleManager extends DeviceLifecycleSupervisor, DeviceInitializationPhaseHandler, AutoCloseable,
DeviceTerminationPhaseHandler {
- public static final String ENTITY_TYPE = "openflow";
- public static final String TX_ENTITY_TYPE = "ofTransaction";
+ String ENTITY_TYPE = "openflow";
+ String TX_ENTITY_TYPE = "ofTransaction";
+
+ /**
+ * Adding listener to by notified for role changes
+ * API for listener {@link RoleChangeListener}
+ * @param roleChangeListener
+ */
+ void addRoleChangeListener(RoleChangeListener roleChangeListener);
+
}
<S extends RpcService> S lookupRpcService(Class<S> serviceClass);
<S extends RpcService> void unregisterRpcServiceImplementation(Class<S> serviceClass);
- void registerStatCompatibilityServices();
-
@Override
void close();
}
import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceLifecycleSupervisor;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
/**
* The RPC Manager will maintain an RPC Context for each online switch. RPC context for device is created when
- * {@link org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler#onDeviceContextLevelUp(org.opendaylight.openflowplugin.api.openflow.device.DeviceContext)}
+ * {@link org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler#onDeviceContextLevelUp(NodeId)}
* is called.
* <p>
* Created by Martin Bobak <mbobak@cisco.com> on 25.2.2015.
*/
public interface RpcManager extends DeviceLifecycleSupervisor, DeviceInitializationPhaseHandler, AutoCloseable, DeviceTerminationPhaseHandler {
- void setStatisticsRpcEnabled(boolean isStatisticsRpcEnabled);
-
- void setNotificationPublishService(NotificationPublishService notificationPublishService);
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timeout;
+import io.netty.util.TimerTask;
+import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
+import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
+import org.opendaylight.openflowplugin.api.openflow.device.DeviceManager;
+import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.DeviceContextChangeListener;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.RoleChangeListener;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.ServiceChangeListener;
+import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.MessageIntelligenceAgency;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public final class LifecycleConductorImpl implements LifecycleConductor, RoleChangeListener, DeviceContextChangeListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(LifecycleConductorImpl.class);
+ private static final int TICKS_PER_WHEEL = 500;
+ private static final long TICK_DURATION = 10; // 0.5 sec.
+
+ private final HashedWheelTimer hashedWheelTimer = new HashedWheelTimer(TICK_DURATION, TimeUnit.MILLISECONDS, TICKS_PER_WHEEL);
+ private DeviceManager deviceManager;
+ private final MessageIntelligenceAgency messageIntelligenceAgency;
+ private ConcurrentHashMap<NodeId, ServiceChangeListener> serviceChangeListeners = new ConcurrentHashMap<>();
+
+ public LifecycleConductorImpl(final MessageIntelligenceAgency messageIntelligenceAgency) {
+ Preconditions.checkNotNull(messageIntelligenceAgency);
+ this.messageIntelligenceAgency = messageIntelligenceAgency;
+ }
+
+ public void setSafelyDeviceManager(final DeviceManager deviceManager) {
+ if (this.deviceManager == null) {
+ this.deviceManager = deviceManager;
+ }
+ }
+
+ public void addOneTimeListenerWhenServicesChangesDone(final ServiceChangeListener manager, final NodeId nodeId){
+ LOG.debug("Listener {} for service change for node {} registered.", manager, nodeId);
+ serviceChangeListeners.put(nodeId, manager);
+ }
+
+ private void notifyServiceChangeListeners(final NodeId nodeId, final boolean success){
+ if (serviceChangeListeners.size() == 0) {
+ return;
+ }
+ LOG.debug("Notifying registered listeners for service change, no. of listeners {}", serviceChangeListeners.size());
+ for (final Map.Entry<NodeId, ServiceChangeListener> nodeIdServiceChangeListenerEntry : serviceChangeListeners.entrySet()) {
+ if (nodeIdServiceChangeListenerEntry.getKey().equals(nodeId)) {
+ LOG.debug("Listener {} for service change for node {} was notified. Success was set on {}", nodeIdServiceChangeListenerEntry.getValue(), nodeId, success);
+ nodeIdServiceChangeListenerEntry.getValue().servicesChangeDone(nodeId, success);
+ serviceChangeListeners.remove(nodeId);
+ }
+ }
+ }
+
+ @Override
+ public void roleInitializationDone(final NodeId nodeId, final boolean success) {
+ if (!success) {
+ LOG.warn("Initialization phase for node {} in role context was NOT successful, closing connection.", nodeId);
+ closeConnection(nodeId);
+ } else {
+ LOG.info("initialization phase for node {} in role context was successful, continuing to next context.", nodeId);
+ }
+ }
+
+ public void closeConnection(final NodeId nodeId) {
+ LOG.debug("Close connection called for node {}", nodeId);
+ final DeviceContext deviceContext = getDeviceContext(nodeId);
+ if (null != deviceContext) {
+ deviceContext.shutdownConnection();
+ }
+ }
+
+ @Override
+ public void roleChangeOnDevice(final NodeId nodeId, final boolean success, final OfpRole newRole, final boolean initializationPhase) {
+
+ final DeviceContext deviceContext = getDeviceContext(nodeId);
+
+ if (null == deviceContext) {
+ LOG.warn("Something went wrong, device context for nodeId: {} doesn't exists");
+ return;
+ }
+ if (!success) {
+ LOG.warn("Role change to {} in role context for node {} was NOT successful, closing connection", newRole, nodeId);
+ closeConnection(nodeId);
+ } else {
+ if (initializationPhase) {
+ LOG.debug("Initialization phase skipping starting services.");
+ return;
+ }
+ LOG.info("Role change to {} in role context for node {} was successful, staring/stopping services.", newRole, nodeId);
+
+ //TODO: This is old way to check if statistics is running, remove after statistics changes implemented
+ final DeviceState deviceState = deviceContext.getDeviceState();
+ if (null != deviceState) {
+ if (OfpRole.BECOMEMASTER.equals(newRole) && (getDeviceContext(nodeId) != null)) {
+ deviceState.setRole(OfpRole.BECOMEMASTER);
+ } else {
+ deviceState.setRole(OfpRole.BECOMESLAVE);
+ }
+ }
+
+ final ListenableFuture<Void> onClusterRoleChange = deviceContext.onClusterRoleChange(null, newRole);
+ Futures.addCallback(onClusterRoleChange, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(@Nullable final Void aVoid) {
+ LOG.info("Starting/Stopping services for node {} was successful", nodeId);
+ if (newRole.equals(OfpRole.BECOMESLAVE)) notifyServiceChangeListeners(nodeId, true);
+ }
+
+ @Override
+ public void onFailure(final Throwable throwable) {
+ LOG.warn("Starting/Stopping services for node {} was NOT successful, closing connection", nodeId);
+ closeConnection(nodeId);
+ }
+ });
+ }
+ }
+
+ public MessageIntelligenceAgency getMessageIntelligenceAgency() {
+ return messageIntelligenceAgency;
+ }
+
+ @Override
+ public DeviceContext getDeviceContext(final NodeId nodeId){
+ return deviceManager.getDeviceContextFromNodeId(nodeId);
+ }
+
+ public Short gainVersionSafely(final NodeId nodeId) {
+ return (null != getDeviceContext(nodeId)) ? getDeviceContext(nodeId).getPrimaryConnectionContext().getFeatures().getVersion() : null;
+ }
+
+ public Timeout newTimeout(@Nonnull TimerTask task, long delay, @Nonnull TimeUnit unit) {
+ return hashedWheelTimer.newTimeout(task, delay, unit);
+ }
+
+ public ConnectionContext.CONNECTION_STATE gainConnectionStateSafely(final NodeId nodeId){
+ return (null != getDeviceContext(nodeId)) ? getDeviceContext(nodeId).getPrimaryConnectionContext().getConnectionState() : null;
+ }
+
+ public Long reserveXidForDeviceMessage(final NodeId nodeId){
+ return null != getDeviceContext(nodeId) ? getDeviceContext(nodeId).reserveXidForDeviceMessage() : null;
+ }
+
+ @Override
+ public void deviceStartInitializationDone(final NodeId nodeId, final boolean success) {
+ if (!success) {
+ LOG.warn("Initialization phase for node {} in device context was NOT successful, closing connection.", nodeId);
+ closeConnection(nodeId);
+ } else {
+ LOG.info("initialization phase for node {} in device context was successful. Continuing to next context.", nodeId);
+ }
+ }
+
+ @Override
+ public void deviceInitializationDone(final NodeId nodeId, final boolean success) {
+ if (!success) {
+ LOG.warn("Initialization phase for node {} in device context was NOT successful, closing connection.", nodeId);
+ closeConnection(nodeId);
+ } else {
+ LOG.info("initialization phase for node {} in device context was successful. All phases initialized OK.", nodeId);
+ }
+ }
+}
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
+import javax.annotation.Nonnull;
import javax.management.InstanceAlreadyExistsException;
import javax.management.MBeanRegistrationException;
import javax.management.MBeanServer;
import org.opendaylight.openflowplugin.api.openflow.OpenFlowPluginProvider;
import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionManager;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceManager;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.RoleChangeListener;
import org.opendaylight.openflowplugin.api.openflow.role.RoleManager;
import org.opendaylight.openflowplugin.api.openflow.rpc.RpcManager;
import org.opendaylight.openflowplugin.api.openflow.statistics.StatisticsManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/**
- * Created by Martin Bobak <mbobak@cisco.com> on 27.3.2015.
- */
public class OpenFlowPluginProviderImpl implements OpenFlowPluginProvider, OpenFlowPluginExtensionRegistratorProvider {
private static final Logger LOG = LoggerFactory.getLogger(OpenFlowPluginProviderImpl.class);
private boolean isStatisticsPollingOff = false;
private boolean isStatisticsRpcEnabled;
+ private final LifecycleConductor conductor;
+
public OpenFlowPluginProviderImpl(final long rpcRequestsQuota, final Long globalNotificationQuota) {
Preconditions.checkArgument(rpcRequestsQuota > 0 && rpcRequestsQuota <= Integer.MAX_VALUE, "rpcRequestQuota has to be in range <1,%s>", Integer.MAX_VALUE);
this.rpcRequestsQuota = (int) rpcRequestsQuota;
this.globalNotificationQuota = Preconditions.checkNotNull(globalNotificationQuota);
+ conductor = new LifecycleConductorImpl(messageIntelligenceAgency);
}
@Override
}
@Override
- public void onFailure(final Throwable t) {
+ public void onFailure(@Nonnull final Throwable t) {
LOG.warn("Some switchConnectionProviders failed to start.", t);
}
});
registerMXBean(messageIntelligenceAgency);
- deviceManager = new DeviceManagerImpl(dataBroker, messageIntelligenceAgency, globalNotificationQuota,
- switchFeaturesMandatory, barrierInterval, barrierCountLimit);
+ deviceManager = new DeviceManagerImpl(dataBroker,
+ globalNotificationQuota,
+ switchFeaturesMandatory,
+ barrierInterval,
+ barrierCountLimit,
+ conductor);
((ExtensionConverterProviderKeeper) deviceManager).setExtensionConverterProvider(extensionConverterManager);
- roleManager = new RoleManagerImpl(entityOwnershipService, dataBroker);
- statisticsManager = new StatisticsManagerImpl(rpcProviderRegistry, isStatisticsPollingOff);
- rpcManager = new RpcManagerImpl(rpcProviderRegistry, rpcRequestsQuota);
+
+ conductor.setSafelyDeviceManager(deviceManager);
+
+ roleManager = new RoleManagerImpl(entityOwnershipService, dataBroker, conductor);
+ statisticsManager = new StatisticsManagerImpl(rpcProviderRegistry, isStatisticsPollingOff, conductor);
+ rpcManager = new RpcManagerImpl(rpcProviderRegistry, rpcRequestsQuota, conductor);
+
+ roleManager.addRoleChangeListener((RoleChangeListener) conductor);
/* Initialization Phase ordering - OFP Device Context suite */
// CM -> DM -> SM -> RPC -> Role -> DM
statisticsManager.setDeviceTerminationPhaseHandler(roleManager);
roleManager.setDeviceTerminationPhaseHandler(deviceManager);
- rpcManager.setStatisticsRpcEnabled(isStatisticsRpcEnabled);
- rpcManager.setNotificationPublishService(notificationPublishService);
-
- deviceManager.setNotificationService(this.notificationProviderService);
- deviceManager.setNotificationPublishService(this.notificationPublishService);
+ deviceManager.setStatisticsRpcEnabled(isStatisticsRpcEnabled);
+ deviceManager.setNotificationPublishService(notificationPublishService);
TranslatorLibraryUtil.setBasicTranslatorLibrary(deviceManager);
deviceManager.initialize();
import java.math.BigInteger;
import java.net.InetSocketAddress;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
import org.opendaylight.openflowjava.protocol.api.connection.ConnectionAdapter;
import org.opendaylight.openflowjava.protocol.api.connection.OutboundQueue;
import org.opendaylight.openflowjava.protocol.api.connection.OutboundQueueHandlerRegistration;
}
@Override
- public void closeConnection(boolean propagate) {
+ public void closeConnection(final boolean propagate) {
if (null == nodeId){
SessionStatistics.countEvent(connectionAdapter.getRemoteAddress().toString(), SessionStatistics.ConnectionStatus.CONNECTION_DISCONNECTED_BY_OFP);
} else {
connectionAdapter.getRemoteAddress(), datapathId);
connectionState = ConnectionContext.CONNECTION_STATE.RIP;
- unregisterOutboundQueue();
+ Future<Void> future = Executors.newSingleThreadExecutor().submit(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ unregisterOutboundQueue();
+ return null;
+ }
+ });
+ try {
+ LOG.debug("Waiting 1s for unregistering outbound queue.");
+ future.get(1, TimeUnit.SECONDS);
+ LOG.info("Unregistering outbound queue successful.");
+ } catch (InterruptedException e) {
+ LOG.warn("Unregistering outbound queue was interrupted for node {}", nodeId);
+ } catch (ExecutionException e) {
+ LOG.warn("Unregistering outbound queue throws exception for node {}", nodeId, e);
+ } catch (TimeoutException e) {
+ LOG.warn("Unregistering outbound queue took longer than 1 seconds for node {}", nodeId);
+ }
+
closeHandshakeContext();
if (getConnectionAdapter().isAlive()) {
}
if (propagate) {
+ LOG.debug("Propagating device disconnect for node {}", nodeId);
propagateDeviceDisconnectedEvent();
+ } else {
+ LOG.debug("Close connection without propagating for node {}", nodeId);
}
}
private void closeHandshakeContext() {
+ LOG.debug("Trying closing handshake context for node {}", nodeId);
if (handshakeContext != null) {
try {
handshakeContext.close();
}
private void unregisterOutboundQueue() {
+ LOG.debug("Trying unregister outbound queue handler registration for node {}", nodeId);
if (outboundQueueHandlerRegistration != null) {
outboundQueueHandlerRegistration.close();
outboundQueueHandlerRegistration = null;
for (;;) {
OutboundQueue queue = outboundQueue;
if (queue == null) {
- LOG.debug("No queue present, failing request");
+ LOG.error("No queue present, failing request");
return null;
}
public void onSuccess(@Nullable final RpcResult<BarrierOutput> result) {
LOG.debug("succeeded by getting sweep barrier after posthandshake for device {}", connectionContext.getNodeId());
try {
- deviceConnectedHandler.deviceConnected(connectionContext);
+ if (!deviceConnectedHandler.deviceConnected(connectionContext)) {
+ connectionContext.closeConnection(true);
+ }
SessionStatistics.countEvent(connectionContext.getNodeId().toString(),
SessionStatistics.ConnectionStatus.CONNECTION_CREATED);
} catch (final Exception e) {
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.base.Verify;
-import com.google.common.collect.Iterators;
import com.google.common.util.concurrent.AsyncFunction;
import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.FutureFallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import io.netty.util.HashedWheelTimer;
import io.netty.util.Timeout;
import java.math.BigInteger;
-import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
import javax.annotation.CheckForNull;
import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.openflowjava.protocol.api.connection.ConnectionAdapter;
import org.opendaylight.openflowplugin.api.openflow.device.RequestContext;
import org.opendaylight.openflowplugin.api.openflow.device.TranslatorLibrary;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
-import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.MultiMsgCollector;
import org.opendaylight.openflowplugin.api.openflow.md.core.SwitchConnectionDistinguisher;
import org.opendaylight.openflowplugin.api.openflow.md.core.TranslatorKey;
import org.opendaylight.openflowplugin.impl.registry.flow.FlowRegistryKeyFactory;
import org.opendaylight.openflowplugin.impl.registry.group.DeviceGroupRegistryImpl;
import org.opendaylight.openflowplugin.impl.registry.meter.DeviceMeterRegistryImpl;
+import org.opendaylight.openflowplugin.impl.rpc.RpcContextImpl;
import org.opendaylight.openflowplugin.impl.util.DeviceInitializationUtils;
import org.opendaylight.openflowplugin.impl.util.MdSalRegistrationUtils;
import org.opendaylight.openflowplugin.openflow.md.core.session.SwitchConnectionCookieOFImpl;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
private static final Logger LOG = LoggerFactory.getLogger(DeviceContextImpl.class);
// TODO: drain factor should be parametrized
- public static final float REJECTED_DRAIN_FACTOR = 0.25f;
+ private static final float REJECTED_DRAIN_FACTOR = 0.25f;
// TODO: low water mark factor should be parametrized
private static final float LOW_WATERMARK_FACTOR = 0.75f;
// TODO: high water mark factor should be parametrized
private final ConnectionContext primaryConnectionContext;
private final DeviceState deviceState;
private final DataBroker dataBroker;
- private final HashedWheelTimer hashedWheelTimer;
private final Map<SwitchConnectionDistinguisher, ConnectionContext> auxiliaryConnectionContexts;
private final TransactionChainManager transactionChainManager;
private final DeviceFlowRegistry deviceFlowRegistry;
private final DeviceGroupRegistry deviceGroupRegistry;
private final DeviceMeterRegistry deviceMeterRegistry;
- private final Collection<DeviceTerminationPhaseHandler> closeHandlers = new HashSet<>();
private final PacketInRateLimiter packetInLimiter;
private final MessageSpy messageSpy;
private final ItemLifeCycleKeeper flowLifeCycleKeeper;
private NotificationPublishService notificationPublishService;
- private NotificationService notificationService;
private final OutboundQueue outboundQueueProvider;
private Timeout barrierTaskTimeout;
private final MessageTranslator<PortGrouping, FlowCapableNodeConnector> portStatusTranslator;
private final boolean switchFeaturesMandatory;
private StatisticsContext statisticsContext;
+ private final NodeId nodeId;
+
private volatile DEVICE_CONTEXT_STATE deviceCtxState;
+ private boolean isStatisticsRpcEnabled;
@VisibleForTesting
DeviceContextImpl(@Nonnull final ConnectionContext primaryConnectionContext,
@Nonnull final DeviceState deviceState,
@Nonnull final DataBroker dataBroker,
- @Nonnull final HashedWheelTimer hashedWheelTimer,
@Nonnull final MessageSpy _messageSpy,
@Nonnull final OutboundQueueProvider outboundQueueProvider,
@Nonnull final TranslatorLibrary translatorLibrary,
this.primaryConnectionContext = Preconditions.checkNotNull(primaryConnectionContext);
this.deviceState = Preconditions.checkNotNull(deviceState);
this.dataBroker = Preconditions.checkNotNull(dataBroker);
- this.hashedWheelTimer = Preconditions.checkNotNull(hashedWheelTimer);
this.outboundQueueProvider = Preconditions.checkNotNull(outboundQueueProvider);
this.transactionChainManager = new TransactionChainManager(dataBroker, deviceState);
auxiliaryConnectionContexts = new HashMap<>();
flowLifeCycleKeeper = new ItemLifeCycleSourceImpl();
itemLifeCycleSourceRegistry.registerLifeCycleSource(flowLifeCycleKeeper);
deviceCtxState = DEVICE_CONTEXT_STATE.INITIALIZATION;
+
+ nodeId = primaryConnectionContext.getNodeId();
}
/**
}
@Override
- public Long reservedXidForDeviceMessage() {
+ public Long reserveXidForDeviceMessage() {
return outboundQueueProvider.reserveEntry();
}
@Override
public void removeAuxiliaryConnectionContext(final ConnectionContext connectionContext) {
final SwitchConnectionDistinguisher connectionDistinguisher = createConnectionDistinguisher(connectionContext);
- if (null != connectionDistinguisher) {
- LOG.debug("auxiliary connection dropped: {}, nodeId:{}", connectionContext.getConnectionAdapter()
- .getRemoteAddress(), getDeviceState().getNodeId());
- auxiliaryConnectionContexts.remove(connectionDistinguisher);
- }
+ LOG.debug("auxiliary connection dropped: {}, nodeId:{}", connectionContext.getConnectionAdapter()
+ .getRemoteAddress(), nodeId);
+ auxiliaryConnectionContexts.remove(connectionDistinguisher);
}
@Override
@Override
public ListenableFuture<Void> onClusterRoleChange(final OfpRole oldRole, @CheckForNull final OfpRole role) {
- LOG.trace("onClusterRoleChange {} for node:", role, deviceState.getNodeId());
+ LOG.trace("onClusterRoleChange {} for node:", role, nodeId);
Preconditions.checkArgument(role != null);
if (role.equals(oldRole)) {
- LOG.debug("Demanded role change for device {} is not changed. OldRole: {}, NewRole {}", deviceState.getNodeId(), oldRole, role);
+ LOG.debug("Demanded role change for device {} is not changed. OldRole: {}, NewRole {}", nodeId, oldRole, role);
return Futures.immediateFuture(null);
}
if (OfpRole.BECOMEMASTER.equals(role)) {
} else if (OfpRole.BECOMESLAVE.equals(role)) {
return onDeviceLostClusterLeadership();
} else {
- LOG.warn("Unknown OFCluster Role {} for Node {}", role, deviceState.getNodeId());
+ LOG.warn("Unknown OFCluster Role {} for Node {}", role, nodeId);
if (null != rpcContext) {
MdSalRegistrationUtils.unregisterServices(rpcContext);
}
@Override
public ListenableFuture<Void> onDeviceLostClusterLeadership() {
- LOG.trace("onDeviceLostClusterLeadership for node: {}", deviceState.getNodeId());
+ LOG.trace("onDeviceLostClusterLeadership for node: {}", nodeId);
if (null != rpcContext) {
MdSalRegistrationUtils.registerSlaveServices(rpcContext, OfpRole.BECOMESLAVE);
}
@Override
public ListenableFuture<Void> onDeviceTakeClusterLeadership() {
- LOG.trace("onDeviceTakeClusterLeadership for node: {}", deviceState.getNodeId());
+ LOG.trace("onDeviceTakeClusterLeadership for node: {}", nodeId);
/* validation */
if (statisticsContext == null) {
- final String errMsg = String.format("DeviceCtx %s is up but we are missing StatisticsContext", deviceState.getNodeId());
+ final String errMsg = String.format("DeviceCtx %s is up but we are missing StatisticsContext", nodeId);
LOG.warn(errMsg);
return Futures.immediateFailedFuture(new IllegalStateException(errMsg));
}
if (rpcContext == null) {
- final String errMsg = String.format("DeviceCtx %s is up but we are missing RpcContext", deviceState.getNodeId());
+ final String errMsg = String.format("DeviceCtx %s is up but we are missing RpcContext", nodeId);
LOG.warn(errMsg);
return Futures.immediateFailedFuture(new IllegalStateException(errMsg));
}
/* Routed RPC registration */
MdSalRegistrationUtils.registerMasterServices(getRpcContext(), DeviceContextImpl.this, OfpRole.BECOMEMASTER);
- getRpcContext().registerStatCompatibilityServices();
+
+ if (isStatisticsRpcEnabled) {
+ MdSalRegistrationUtils.registerStatCompatibilityServices(getRpcContext(), this,
+ notificationPublishService, new AtomicLong());
+ }
/* Prepare init info collecting */
getDeviceState().setDeviceSynchronized(false);
new AsyncFunction<Void, Boolean>() {
@Override
- public ListenableFuture<Boolean> apply(final Void input) throws Exception {
+ public ListenableFuture<Boolean> apply(@Nonnull final Void input) throws Exception {
getStatisticsContext().statListForCollectingInitialization();
return getStatisticsContext().gatherDynamicData();
}
LOG.warn(errMsg);
throw new IllegalStateException(errMsg);
}
- if (!input.booleanValue()) {
+ if (!input) {
final String errMsg = String.format("Get Initial Device %s information fails",
getDeviceState().getNodeId());
LOG.warn(errMsg);
throw new IllegalStateException(errMsg);
}
- LOG.debug("Get Initial Device {} information is successful", getDeviceState().getNodeId());
+ LOG.debug("Get Initial Device {} information is successful", nodeId);
getDeviceState().setDeviceSynchronized(true);
initialSubmitTransaction();
getDeviceState().setStatisticsPollingEnabledProp(true);
@Override
public <T extends DataObject> void writeToTransaction(final LogicalDatastoreType store,
- final InstanceIdentifier<T> path, final T data) {
+ final InstanceIdentifier<T> path, final T data) throws Exception {
transactionChainManager.writeToTransaction(store, path, data);
}
@Override
- public <T extends DataObject> void addDeleteToTxChain(final LogicalDatastoreType store, final InstanceIdentifier<T> path) {
+ public <T extends DataObject> void addDeleteToTxChain(final LogicalDatastoreType store, final InstanceIdentifier<T> path) throws Exception {
transactionChainManager.addDeleteOperationTotTxChain(store, path);
}
final FlowCapableNodeConnector flowCapableNodeConnector = portStatusTranslator.translate(portStatus, this, null);
final KeyedInstanceIdentifier<NodeConnector, NodeConnectorKey> iiToNodeConnector = provideIIToNodeConnector(portStatus.getPortNo(), portStatus.getVersion());
- if (portStatus.getReason().equals(PortReason.OFPPRADD) || portStatus.getReason().equals(PortReason.OFPPRMODIFY)) {
- // because of ADD status node connector has to be created
- final NodeConnectorBuilder nConnectorBuilder = new NodeConnectorBuilder().setKey(iiToNodeConnector.getKey());
- nConnectorBuilder.addAugmentation(FlowCapableNodeConnectorStatisticsData.class, new FlowCapableNodeConnectorStatisticsDataBuilder().build());
- nConnectorBuilder.addAugmentation(FlowCapableNodeConnector.class, flowCapableNodeConnector);
- writeToTransaction(LogicalDatastoreType.OPERATIONAL, iiToNodeConnector, nConnectorBuilder.build());
- } else if (portStatus.getReason().equals(PortReason.OFPPRDELETE)) {
- addDeleteToTxChain(LogicalDatastoreType.OPERATIONAL, iiToNodeConnector);
+ try {
+ if (portStatus.getReason().equals(PortReason.OFPPRADD) || portStatus.getReason().equals(PortReason.OFPPRMODIFY)) {
+ // because of ADD status node connector has to be created
+ final NodeConnectorBuilder nConnectorBuilder = new NodeConnectorBuilder().setKey(iiToNodeConnector.getKey());
+ nConnectorBuilder.addAugmentation(FlowCapableNodeConnectorStatisticsData.class, new FlowCapableNodeConnectorStatisticsDataBuilder().build());
+ nConnectorBuilder.addAugmentation(FlowCapableNodeConnector.class, flowCapableNodeConnector);
+ writeToTransaction(LogicalDatastoreType.OPERATIONAL, iiToNodeConnector, nConnectorBuilder.build());
+ } else if (portStatus.getReason().equals(PortReason.OFPPRDELETE)) {
+ addDeleteToTxChain(LogicalDatastoreType.OPERATIONAL, iiToNodeConnector);
+ }
+ submitTransaction();
+ } catch (final Exception e) {
+ LOG.warn("Error processing port status message: {}", e.getMessage());
}
- submitTransaction();
}
private KeyedInstanceIdentifier<NodeConnector, NodeConnectorKey> provideIIToNodeConnector(final long portNo, final short version) {
return;
}
- final ListenableFuture<? extends Object> offerNotification = notificationPublishService.offerNotification(packetReceived);
+ final ListenableFuture<?> offerNotification = notificationPublishService.offerNotification(packetReceived);
if (NotificationPublishService.REJECTED.equals(offerNotification)) {
LOG.debug("notification offer rejected");
messageSpy.spyMessage(packetReceived.getImplementedInterface(), MessageSpy.STATISTIC_GROUP.FROM_SWITCH_NOTIFICATION_REJECTED);
return translatorLibrary;
}
- @Override
- public HashedWheelTimer getTimer() {
- return hashedWheelTimer;
- }
-
@Override
public synchronized void close() {
LOG.debug("closing deviceContext: {}, nodeId:{}",
return barrierTaskTimeout;
}
- @Override
- public void setNotificationService(final NotificationService notificationService) {
- this.notificationService = notificationService;
- }
-
@Override
public void setNotificationPublishService(final NotificationPublishService notificationPublishService) {
this.notificationPublishService = notificationPublishService;
return messageSpy;
}
- @Override
- public void addDeviceContextClosedHandler(final DeviceTerminationPhaseHandler deviceContextClosedHandler) {
- closeHandlers.add(deviceContextClosedHandler);
- }
-
@Override
public void onPublished() {
Verify.verify(DEVICE_CONTEXT_STATE.INITIALIZATION.equals(deviceCtxState));
}
@Override
- public void storeNodeConnectorRef(final Long portNumber, final NodeConnectorRef nodeConnectorRef) {
+ public void storeNodeConnectorRef(@Nonnull final Long portNumber, @Nonnull final NodeConnectorRef nodeConnectorRef) {
nodeConnectorCache.put(
Preconditions.checkNotNull(portNumber),
Preconditions.checkNotNull(nodeConnectorRef));
@Override
public synchronized void shutdownConnection() {
- LOG.trace("shutdown method for node {}", deviceState.getNodeId());
+ LOG.debug("Shutdown method for node {}", nodeId);
deviceState.setValid(false);
if (DEVICE_CONTEXT_STATE.TERMINATION.equals(deviceCtxState)) {
- LOG.debug("DeviceCtx for Node {} is in termination process.", deviceState.getNodeId());
+ LOG.debug("DeviceCtx for Node {} is in termination process.", nodeId);
return;
}
deviceCtxState = DEVICE_CONTEXT_STATE.TERMINATION;
- for (final Iterator<ConnectionContext> iterator = Iterators.consumingIterator(auxiliaryConnectionContexts
- .values().iterator()); iterator.hasNext();) {
- iterator.next().closeConnection(false);
- }
+
if (ConnectionContext.CONNECTION_STATE.RIP.equals(getPrimaryConnectionContext().getConnectionState())) {
LOG.debug("ConnectionCtx for Node {} is in RIP state.", deviceState.getNodeId());
return;
}
/* Terminate Auxiliary Connection */
for (final ConnectionContext connectionContext : auxiliaryConnectionContexts.values()) {
+ LOG.debug("Closing auxiliary connection {}", connectionContext.getNodeId());
connectionContext.closeConnection(false);
}
/* Terminate Primary Connection */
deviceMeterRegistry.close();
}
+ @Override
+ public void setStatisticsRpcEnabled(boolean isStatisticsRpcEnabled) {
+ this.isStatisticsRpcEnabled = isStatisticsRpcEnabled;
+ }
+
@Override
public DEVICE_CONTEXT_STATE getDeviceContextState() {
return deviceCtxState;
*/
package org.opendaylight.openflowplugin.impl.device;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Verify;
import com.google.common.collect.Iterators;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import io.netty.util.HashedWheelTimer;
import io.netty.util.Timeout;
import io.netty.util.TimerTask;
import java.util.Collections;
import org.opendaylight.openflowplugin.api.openflow.device.TranslatorLibrary;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
-import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.MessageIntelligenceAgency;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
import org.opendaylight.openflowplugin.extension.api.ExtensionConverterProviderKeeper;
import org.opendaylight.openflowplugin.extension.api.core.extension.ExtensionConverterProvider;
import org.opendaylight.openflowplugin.impl.connection.OutboundQueueProviderImpl;
private static final Logger LOG = LoggerFactory.getLogger(DeviceManagerImpl.class);
- private static final long TICK_DURATION = 10; // 0.5 sec.
private final long globalNotificationQuota;
private final boolean switchFeaturesMandatory;
- private ScheduledThreadPoolExecutor spyPool;
private final int spyRate = 10;
private final DataBroker dataBroker;
- private final HashedWheelTimer hashedWheelTimer;
private TranslatorLibrary translatorLibrary;
private DeviceInitializationPhaseHandler deviceInitPhaseHandler;
private DeviceTerminationPhaseHandler deviceTerminPhaseHandler;
- private NotificationService notificationService;
private NotificationPublishService notificationPublishService;
private final ConcurrentMap<NodeId, DeviceContext> deviceContexts = new ConcurrentHashMap<>();
- private final MessageIntelligenceAgency messageIntelligenceAgency;
private final long barrierIntervalNanos;
private final int barrierCountLimit;
private ExtensionConverterProvider extensionConverterProvider;
+ private ScheduledThreadPoolExecutor spyPool;
+
+ private final LifecycleConductor conductor;
+ private boolean isStatisticsRpcEnabled;
public DeviceManagerImpl(@Nonnull final DataBroker dataBroker,
- @Nonnull final MessageIntelligenceAgency messageIntelligenceAgency,
final long globalNotificationQuota, final boolean switchFeaturesMandatory,
- final long barrierInterval, final int barrierCountLimit) {
+ final long barrierInterval, final int barrierCountLimit,
+ final LifecycleConductor lifecycleConductor) {
this.switchFeaturesMandatory = switchFeaturesMandatory;
this.globalNotificationQuota = globalNotificationQuota;
this.dataBroker = Preconditions.checkNotNull(dataBroker);
- hashedWheelTimer = new HashedWheelTimer(TICK_DURATION, TimeUnit.MILLISECONDS, 500);
/* merge empty nodes to oper DS to predict any problems with missing parent for Node */
final WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
throw new IllegalStateException(e);
}
- this.messageIntelligenceAgency = messageIntelligenceAgency;
this.barrierIntervalNanos = TimeUnit.MILLISECONDS.toNanos(barrierInterval);
this.barrierCountLimit = barrierCountLimit;
+
+ this.conductor = lifecycleConductor;
+ spyPool = new ScheduledThreadPoolExecutor(1);
}
}
@Override
- public void onDeviceContextLevelUp(final DeviceContext deviceContext) throws Exception {
+ public void onDeviceContextLevelUp(final NodeId nodeId) throws Exception {
// final phase - we have to add new Device to MD-SAL DataStore
- LOG.debug("Final phase of DeviceContextLevelUp for Node: {} ", deviceContext.getDeviceState().getNodeId());
- Preconditions.checkNotNull(deviceContext);
+ LOG.debug("Final phase of DeviceContextLevelUp for Node: {} ", nodeId);
+ DeviceContext deviceContext = Preconditions.checkNotNull(deviceContexts.get(nodeId));
((DeviceContextImpl) deviceContext).initialSubmitTransaction();
deviceContext.onPublished();
}
@Override
- public void deviceConnected(@CheckForNull final ConnectionContext connectionContext) throws Exception {
+ public boolean deviceConnected(@CheckForNull final ConnectionContext connectionContext) throws Exception {
Preconditions.checkArgument(connectionContext != null);
- Preconditions.checkState(!deviceContexts.containsKey(connectionContext.getNodeId()),
- "Rejecting connection from node which is already connected and there exist deviceContext for it: {}",
- connectionContext.getNodeId()
- );
+
+ NodeId nodeId = connectionContext.getNodeId();
+ /**
+ * This part prevent destroy another device context. Throwing here an exception result to propagate close connection
+ * in {@link org.opendaylight.openflowplugin.impl.connection.org.opendaylight.openflowplugin.impl.connection.HandshakeContextImpl}
+ * If context already exist we are in state closing process (connection flapping) and we should not propagate connection close
+ */
+ if (deviceContexts.containsKey(nodeId)) {
+ LOG.warn("Rejecting connection from node which is already connected and there exist deviceContext for it: {}", connectionContext.getNodeId());
+ return false;
+ }
+
LOG.info("ConnectionEvent: Device connected to controller, Device:{}, NodeId:{}",
- connectionContext.getConnectionAdapter().getRemoteAddress(), connectionContext.getNodeId());
+ connectionContext.getConnectionAdapter().getRemoteAddress(), nodeId);
// Add Disconnect handler
connectionContext.setDeviceDisconnectedHandler(DeviceManagerImpl.this);
connectionContext.setOutboundQueueHandleRegistration(outboundQueueHandlerRegistration);
final DeviceState deviceState = createDeviceState(connectionContext);
- final DeviceContext deviceContext = new DeviceContextImpl(connectionContext, deviceState, dataBroker,
- hashedWheelTimer, messageIntelligenceAgency, outboundQueueProvider, translatorLibrary, switchFeaturesMandatory);
+ final DeviceContext deviceContext = new DeviceContextImpl(connectionContext,
+ deviceState,
+ dataBroker,
+ conductor.getMessageIntelligenceAgency(),
+ outboundQueueProvider,
+ translatorLibrary,
+ switchFeaturesMandatory);
- Verify.verify(deviceContexts.putIfAbsent(connectionContext.getNodeId(), deviceContext) == null, "DeviceCtx still not closed.");
- deviceContext.addDeviceContextClosedHandler(this);
+ Verify.verify(deviceContexts.putIfAbsent(nodeId, deviceContext) == null, "DeviceCtx still not closed.");
((ExtensionConverterProviderKeeper) deviceContext).setExtensionConverterProvider(extensionConverterProvider);
- deviceContext.setNotificationService(notificationService);
+ deviceContext.setStatisticsRpcEnabled(isStatisticsRpcEnabled);
deviceContext.setNotificationPublishService(notificationPublishService);
updatePacketInRateLimiters();
connectionAdapter.setMessageListener(messageListener);
deviceState.setValid(true);
- deviceInitPhaseHandler.onDeviceContextLevelUp(deviceContext);
+ deviceInitPhaseHandler.onDeviceContextLevelUp(nodeId);
+
+ return true;
}
private static DeviceStateImpl createDeviceState(final @Nonnull ConnectionContext connectionContext) {
this.translatorLibrary = translatorLibrary;
}
- @Override
- public void setNotificationService(final NotificationService notificationServiceParam) {
- notificationService = notificationServiceParam;
- }
-
@Override
public void setNotificationPublishService(final NotificationPublishService notificationService) {
notificationPublishService = notificationService;
@Override
public void initialize() {
- spyPool = new ScheduledThreadPoolExecutor(1);
- spyPool.scheduleAtFixedRate(messageIntelligenceAgency, spyRate, spyRate, TimeUnit.SECONDS);
+ spyPool.scheduleAtFixedRate(conductor.getMessageIntelligenceAgency(), spyRate, spyRate, TimeUnit.SECONDS);
+ }
+
+ @Override
+ public DeviceContext getDeviceContextFromNodeId(final NodeId nodeId) {
+ return deviceContexts.get(nodeId);
+ }
+
+ @Override
+ public void setStatisticsRpcEnabled(boolean isStatisticsRpcEnabled) {
+ this.isStatisticsRpcEnabled = isStatisticsRpcEnabled;
}
@Override
@Override
public void onDeviceDisconnected(final ConnectionContext connectionContext) {
LOG.trace("onDeviceDisconnected method call for Node: {}", connectionContext.getNodeId());
- Preconditions.checkArgument(connectionContext != null);
final NodeId nodeId = connectionContext.getNodeId();
final DeviceContext deviceCtx = this.deviceContexts.get(nodeId);
if (null == deviceCtx) {
- LOG.info("DeviceContext for Node {} was not found. Connection is terminated without OFP context suite.",
- connectionContext.getNodeId());
+ LOG.info("DeviceContext for Node {} was not found. Connection is terminated without OFP context suite.", nodeId);
return;
}
@Override
public void onSuccess(final Void result) {
- LOG.debug("TxChainManager for device {} is closed successful.", deviceCtx.getDeviceState().getNodeId());
+ LOG.debug("TxChainManager for device {} is closed successful.", nodeId);
deviceTerminPhaseHandler.onDeviceContextLevelDown(deviceCtx);
}
@Override
public void onFailure(final Throwable t) {
- LOG.warn("TxChainManager for device {} failed by closing.", deviceCtx.getDeviceState().getNodeId(), t);
+ LOG.warn("TxChainManager for device {} failed by closing.", nodeId, t);
deviceTerminPhaseHandler.onDeviceContextLevelDown(deviceCtx);
}
});
@Override
public void run(final Timeout timeout) throws Exception {
if (!future.isDone()) {
- LOG.info("Shutting down TxChain for node {} not completed during 10 sec. Continue anyway.",
- deviceCtx.getDeviceState().getNodeId());
+ LOG.info("Shutting down TxChain for node {} not completed during 10 sec. Continue anyway.", nodeId);
future.cancel(false);
}
}
};
- deviceCtx.getTimer().newTimeout(timerTask, 10, TimeUnit.SECONDS);
+ conductor.newTimeout(timerTask, 10, TimeUnit.SECONDS);
}
}
+
+ @VisibleForTesting
+ void addDeviceContextToMap(final NodeId nodeId, final DeviceContext deviceContext){
+ deviceContexts.put(nodeId, deviceContext);
+ }
}
}
<T extends DataObject> void addDeleteOperationTotTxChain(final LogicalDatastoreType store,
- final InstanceIdentifier<T> path) {
+ final InstanceIdentifier<T> path) throws Exception {
final WriteTransaction writeTx = getTransactionSafely();
if (writeTx != null) {
+ LOG.trace("addDeleteOperation called with path {} ", path);
writeTx.delete(store, path);
} else {
LOG.debug("WriteTx is null for node {}. Delete {} was not realized.", nodeII, path);
+ throw new Exception("Cannot write into transaction.");
}
}
<T extends DataObject> void writeToTransaction(final LogicalDatastoreType store,
- final InstanceIdentifier<T> path, final T data) {
+ final InstanceIdentifier<T> path, final T data) throws Exception {
final WriteTransaction writeTx = getTransactionSafely();
if (writeTx != null) {
+ LOG.trace("writeToTransaction called with path {} ", path);
writeTx.put(store, path, data);
} else {
LOG.debug("WriteTx is null for node {}. Write data for {} was not realized.", nodeII, path);
+ throw new Exception("Cannot write into transaction.");
}
}
}
ListenableFuture<Void> shuttingDown() {
- LOG.debug("TxManager is going SUTTING_DOWN for node {}", nodeII);
+ LOG.debug("TxManager is going SHUTTING_DOWN for node {}", nodeII);
ListenableFuture<Void> future;
synchronized (txLock) {
this.transactionChainManagerStatus = TransactionChainManagerStatus.SHUTTING_DOWN;
*/
package org.opendaylight.openflowplugin.impl.role;
-import javax.annotation.Nullable;
-import java.util.concurrent.Future;
-
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.JdkFutureAdapters;
-import com.google.common.util.concurrent.ListenableFuture;
-import io.netty.util.Timeout;
-import io.netty.util.TimerTask;
-import java.util.concurrent.Future;
+
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
+import javax.annotation.Nonnull;
import javax.annotation.Nullable;
-import javax.annotation.concurrent.GuardedBy;
+
+import org.opendaylight.controller.md.sal.common.api.clustering.CandidateAlreadyRegisteredException;
import org.opendaylight.controller.md.sal.common.api.clustering.Entity;
import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipCandidateRegistration;
import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
-import org.opendaylight.openflowplugin.api.OFConstants;
-import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
-import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
-import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
import org.opendaylight.openflowplugin.api.openflow.device.RequestContext;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
import org.opendaylight.openflowplugin.api.openflow.role.RoleContext;
+import org.opendaylight.openflowplugin.impl.LifecycleConductorImpl;
import org.opendaylight.openflowplugin.impl.rpc.AbstractRequestContext;
-import org.opendaylight.openflowplugin.impl.services.SalRoleServiceImpl;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SalRoleService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SetRoleInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SetRoleInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SetRoleOutput;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * Created by kramesha on 9/12/15.
+ * Role context hold information about entity ownership registration,
+ * register and unregister candidate (main and tx)
*/
-public class RoleContextImpl implements RoleContext {
+class RoleContextImpl implements RoleContext {
+
private static final Logger LOG = LoggerFactory.getLogger(RoleContextImpl.class);
+ private static final int TIMEOUT = 12;
+ private final NodeId nodeId;
private final EntityOwnershipService entityOwnershipService;
- private EntityOwnershipCandidateRegistration entityOwnershipCandidateRegistration;
- private EntityOwnershipCandidateRegistration txEntityOwnershipCandidateRegistration;
-
- private final DeviceContext deviceContext;
+ private volatile EntityOwnershipCandidateRegistration entityOwnershipCandidateRegistration = null;
+ private volatile EntityOwnershipCandidateRegistration txEntityOwnershipCandidateRegistration = null;
- @GuardedBy("mainCandidateGuard")
private final Entity entity;
- @GuardedBy("txCandidateGuard")
private final Entity txEntity;
- private SalRoleService salRoleService;
+ private SalRoleService salRoleService = null;
private final Semaphore roleChangeGuard = new Semaphore(1, true);
- @GuardedBy("roleChangeGuard")
- private OfpRole clusterRole;
+ private final LifecycleConductor conductor;
- public RoleContextImpl(final DeviceContext deviceContext, final EntityOwnershipService entityOwnershipService,
- final Entity entity, final Entity txEntity) {
- this.entityOwnershipService = Preconditions.checkNotNull(entityOwnershipService);
- this.deviceContext = Preconditions.checkNotNull(deviceContext);
- this.entity = Preconditions.checkNotNull(entity);
- this.txEntity = Preconditions.checkNotNull(txEntity);
- salRoleService = new SalRoleServiceImpl(this, deviceContext);
- clusterRole = OfpRole.BECOMESLAVE;
+ public RoleContextImpl(final NodeId nodeId, final EntityOwnershipService entityOwnershipService, final Entity entity, final Entity txEntity, final LifecycleConductor lifecycleConductor) {
+ this.entityOwnershipService = entityOwnershipService;
+ this.entity = entity;
+ this.txEntity = txEntity;
+ this.nodeId = nodeId;
+ this.conductor = lifecycleConductor;
}
@Override
- public void initializationRoleContext() {
- LOG.trace("Initialization MainCandidate for Node {}", deviceContext.getDeviceState().getNodeId());
- final AsyncFunction<RpcResult<SetRoleOutput>, Void> initFunction = new AsyncFunction<RpcResult<SetRoleOutput>, Void>() {
- @Override
- public ListenableFuture<Void> apply(final RpcResult<SetRoleOutput> input) throws Exception {
- LOG.debug("Initialization request OpenflowEntityOwnership for entity {}", entity);
- getDeviceState().setRole(OfpRole.BECOMESLAVE);
- entityOwnershipCandidateRegistration = entityOwnershipService.registerCandidate(entity);
- LOG.debug("RoleContextImpl : Candidate registered with ownership service for device :{}", deviceContext
- .getPrimaryConnectionContext().getNodeId().getValue());
- return Futures.immediateFuture(null);
- }
- };
-
- try {
- roleChangeGuard.acquire();
- final ListenableFuture<Void> roleChange = sendRoleChangeToDevice(OfpRole.BECOMESLAVE, initFunction);
- Futures.addCallback(roleChange, new FutureCallback<Void>() {
-
- @Override
- public void onSuccess(final Void result) {
- LOG.debug("Initial RoleContext for Node {} is successful", deviceContext.getDeviceState().getNodeId());
- roleChangeGuard.release();
- }
-
- @Override
- public void onFailure(final Throwable t) {
- LOG.warn("Initial RoleContext for Node {} fail", deviceContext.getDeviceState().getNodeId(), t);
- roleChangeGuard.release();
- deviceContext.shutdownConnection();
- }
- });
- } catch (final Exception e) {
- LOG.warn("Unexpected exception bu Initialization RoleContext for Node {}", deviceContext.getDeviceState().getNodeId(), e);
- roleChangeGuard.release();
- deviceContext.shutdownConnection();
- }
- }
-
- @Override
- public void terminationRoleContext() {
- LOG.trace("Termination MainCandidate for Node {}", deviceContext.getDeviceState().getNodeId());
- if (null != entityOwnershipCandidateRegistration) {
- LOG.debug("Closing EntityOwnershipCandidateRegistration for {}", entity);
- try {
- roleChangeGuard.acquire();
- } catch (final InterruptedException e) {
- LOG.warn("Unexpected exception in closing EntityOwnershipCandidateRegistration process for entity {}", entity);
- } finally {
- entityOwnershipCandidateRegistration.close();
- entityOwnershipCandidateRegistration = null;
- // FIXME: call suspendTxCandidate here means lost protection for possible Delete Node before take ownership
- // by another ClusterNode, but it stabilized cluster behavior in general - So try to find another solution
- suspendTxCandidate();
- roleChangeGuard.release();
- }
- }
- }
-
- @Override
- public void onDeviceTryToTakeClusterLeadership() {
- LOG.trace("onDeviceTryToTakeClusterLeadership method call for Entity {}", entity);
- boolean callShutdown = false;
- try {
- roleChangeGuard.acquire();
- Verify.verify(null != entityOwnershipCandidateRegistration);
- Verify.verify(OfpRole.BECOMESLAVE.equals(clusterRole));
-
- clusterRole = OfpRole.BECOMEMASTER;
- /* register TxCandidate and wait for mainCandidateGuard release from onDeviceTakeLeadership method */
- setupTxCandidate();
-
- } catch (final Exception e) {
- LOG.warn("Unexpected exception in roleChange process for entity {}", entity);
- callShutdown = true;
- } finally {
- roleChangeGuard.release();
- }
- if (callShutdown) {
- deviceContext.shutdownConnection();
- }
+ public boolean initialization() {
+ LOG.info("Initialization main candidate for node {}", nodeId);
+ return registerCandidate(this.entity);
}
@Override
- public void onDeviceTakeClusterLeadership() {
- LOG.trace("onDeviceTakeClusterLeadership for entity {}", txEntity);
- try {
- roleChangeGuard.acquire();
- Verify.verify(null != txEntityOwnershipCandidateRegistration);
- Verify.verify(OfpRole.BECOMEMASTER.equals(clusterRole));
-
- if (null == entityOwnershipCandidateRegistration) {
- LOG.debug("EntityOwnership candidate for entity {} is closed.", txEntity);
- suspendTxCandidate();
- roleChangeGuard.release();
- return;
- }
-
- final ListenableFuture<Void> future = onRoleChanged(OfpRole.BECOMESLAVE, OfpRole.BECOMEMASTER);
- Futures.addCallback(future, new FutureCallback<Void>() {
-
- @Override
- public void onSuccess(final Void result) {
- LOG.debug("Take Leadership for node {} was successful", getDeviceState().getNodeId());
- roleChangeGuard.release();
- }
-
- @Override
- public void onFailure(final Throwable t) {
- LOG.warn("Take Leadership for node {} failed", getDeviceState().getNodeId(), t);
- roleChangeGuard.release();
- deviceContext.shutdownConnection();
- }
- });
-
- } catch (final Exception e) {
- LOG.warn("Unexpected exception in roleChange process for entity {}", txEntity);
- roleChangeGuard.release();
- deviceContext.shutdownConnection();
+ public void unregisterAllCandidates() {
+ LOG.info("Role context closed, unregistering all candidates for ownership for node {}", nodeId);
+ if (isMainCandidateRegistered()) {
+ unregisterCandidate(this.entity);
}
- };
-
- @Override
- public void onDeviceLostClusterLeadership() {
- LOG.trace("onDeviceLostClusterLeadership method call for Entity {}", entity);
- try {
- roleChangeGuard.acquire();
- Verify.verify(null != entityOwnershipCandidateRegistration);
- Verify.verify(null != txEntityOwnershipCandidateRegistration);
- Verify.verify(OfpRole.BECOMEMASTER.equals(clusterRole));
-
- clusterRole = OfpRole.BECOMESLAVE;
-
- final ListenableFuture<Void> future = onRoleChanged(OfpRole.BECOMEMASTER, OfpRole.BECOMESLAVE);
- Futures.addCallback(future, new FutureCallback<Void>() {
-
- @Override
- public void onSuccess(final Void result) {
- LOG.debug("Lost Leadership for node {} was successful", getDeviceState().getNodeId());
- suspendTxCandidate();
- roleChangeGuard.release();
- }
-
- @Override
- public void onFailure(final Throwable t) {
- LOG.debug("Lost Leadership for node {} faild", getDeviceState().getNodeId(), t);
- roleChangeGuard.release();
- deviceContext.shutdownConnection();
- }
-
- });
-
- } catch (final Exception e) {
- LOG.warn("Unexpected exception in roleChange process for entity {}", entity);
- roleChangeGuard.release();
- deviceContext.shutdownConnection();
+ if (isTxCandidateRegistered()) {
+ unregisterCandidate(this.txEntity);
}
}
+ @Nullable
@Override
- public boolean isMainCandidateRegistered() {
- final boolean result;
- try {
- roleChangeGuard.acquire();
- } catch (final InterruptedException e) {
- LOG.warn("Unexpected exception in check EntityOwnershipCandidateRegistration process for entity {}", entity);
- } finally {
- result = entityOwnershipCandidateRegistration != null;
- roleChangeGuard.release();
- }
- return result;
- }
-
- @Override
- public boolean isTxCandidateRegistered() {
- final boolean result;
- try {
- roleChangeGuard.acquire();
- } catch (final InterruptedException e) {
- LOG.warn("Unexpected exception in check TxEntityOwnershipCandidateRegistration process for txEntity {}", txEntity);
- } finally {
- result = txEntityOwnershipCandidateRegistration != null;
- roleChangeGuard.release();
- }
- return result;
- }
-
- @VisibleForTesting
- ListenableFuture<Void> onRoleChanged(final OfpRole oldRole, final OfpRole newRole) {
- LOG.trace("onRoleChanged method call for Entity {}", entity);
-
- if (!isDeviceConnected()) {
- // this can happen as after the disconnect, we still get a last message from EntityOwnershipService.
- LOG.debug("Device {} is disconnected from this node. Hence not attempting a role change.", deviceContext
- .getPrimaryConnectionContext().getNodeId());
- // we don't need to do anything
- return Futures.immediateFuture(null);
- }
-
- final AsyncFunction<RpcResult<SetRoleOutput>, Void> roleChangeFunction = new AsyncFunction<RpcResult<SetRoleOutput>, Void>() {
+ public <T> RequestContext<T> createRequestContext() {
+ return new AbstractRequestContext<T>(conductor.reserveXidForDeviceMessage(nodeId)) {
@Override
- public ListenableFuture<Void> apply(final RpcResult<SetRoleOutput> setRoleOutputRpcResult) throws Exception {
- LOG.debug("Role change {} successful made on switch :{}", newRole, deviceContext.getDeviceState().getNodeId());
- getDeviceState().setRole(newRole);
- return deviceContext.onClusterRoleChange(oldRole, newRole);
+ public void close() {
}
};
- return sendRoleChangeToDevice(newRole, roleChangeFunction);
}
- @GuardedBy("roleChangeGuard")
- private void setupTxCandidate() throws Exception {
- LOG.debug("setupTxCandidate for entity {} and Transaction entity {}", entity, txEntity);
- Verify.verify(txEntity != null);
- Verify.verify(entityOwnershipCandidateRegistration != null);
- Verify.verify(txEntityOwnershipCandidateRegistration == null);
- txEntityOwnershipCandidateRegistration = entityOwnershipService.registerCandidate(txEntity);
- }
-
- @GuardedBy("roleChangeGuard")
- private void suspendTxCandidate() {
- LOG.trace("Suspend TxCandidate for Node {}", deviceContext.getDeviceState().getNodeId());
- if (null != txEntityOwnershipCandidateRegistration) {
- LOG.debug("Closing TxEntityOwnershipCandidateRegistration for {}", txEntity);
- txEntityOwnershipCandidateRegistration.close();
- txEntityOwnershipCandidateRegistration = null;
- }
+ @Override
+ public void setSalRoleService(@Nonnull final SalRoleService salRoleService) {
+ Preconditions.checkNotNull(salRoleService);
+ this.salRoleService = salRoleService;
}
@Override
- public void close() {
- LOG.trace("Close RoleCtx for Node {}", deviceContext.getDeviceState().getNodeId());
- if (null != entityOwnershipCandidateRegistration) {
- LOG.info("Close Node Entity {} registration", entity);
- entityOwnershipCandidateRegistration.close();
- entityOwnershipCandidateRegistration = null;
- }
- if (null != txEntityOwnershipCandidateRegistration) {
- LOG.info("Close Tx Entity {} registration", txEntity);
- txEntityOwnershipCandidateRegistration.close();
- txEntityOwnershipCandidateRegistration = null;
- }
+ public SalRoleService getSalRoleService() {
+ return this.salRoleService;
}
@Override
public Entity getEntity() {
- return entity;
+ return this.entity;
}
@Override
public Entity getTxEntity() {
- return txEntity;
- }
-
- private boolean isDeviceConnected() {
- return ConnectionContext.CONNECTION_STATE.WORKING.equals(
- deviceContext.getPrimaryConnectionContext().getConnectionState());
+ return this.txEntity;
}
- @Nullable
@Override
- public <T> RequestContext<T> createRequestContext() {
- final AbstractRequestContext<T> ret = new AbstractRequestContext<T>(deviceContext.reservedXidForDeviceMessage()) {
- @Override
- public void close() {
- }
- };
- return ret;
- }
-
- @VisibleForTesting
- void setSalRoleService(final SalRoleService salRoleService) {
- this.salRoleService = salRoleService;
+ public NodeId getNodeId() {
+ return nodeId;
}
@Override
- public DeviceState getDeviceState() {
- return deviceContext.getDeviceState();
+ public boolean isMainCandidateRegistered() {
+ return entityOwnershipCandidateRegistration != null;
}
@Override
- public DeviceContext getDeviceContext() {
- return deviceContext;
+ public boolean isTxCandidateRegistered() {
+ return txEntityOwnershipCandidateRegistration != null;
}
- private ListenableFuture<Void> sendRoleChangeToDevice(final OfpRole newRole, final AsyncFunction<RpcResult<SetRoleOutput>, Void> function) {
- LOG.debug("Send new role {} to device {}", newRole, deviceContext.getDeviceState().getNodeId());
- final Future<RpcResult<SetRoleOutput>> setRoleOutputFuture;
- if (deviceContext.getDeviceState().getFeatures().getVersion() < OFConstants.OFP_VERSION_1_3) {
- LOG.debug("Device OF version {} not support ROLE", deviceContext.getDeviceState().getFeatures().getVersion());
- setRoleOutputFuture = Futures.immediateFuture(RpcResultBuilder.<SetRoleOutput> success().build());
- } else {
- final SetRoleInput setRoleInput = (new SetRoleInputBuilder()).setControllerRole(newRole)
- .setNode(new NodeRef(deviceContext.getDeviceState().getNodeInstanceIdentifier())).build();
- setRoleOutputFuture = salRoleService.setRole(setRoleInput);
- final TimerTask timerTask = new TimerTask() {
-
- @Override
- public void run(final Timeout timeout) throws Exception {
- if (!setRoleOutputFuture.isDone()) {
- LOG.info("New role {} was not propagated to device {} during 10 sec. Close connection immediately.",
- newRole, deviceContext.getDeviceState().getNodeId());
- setRoleOutputFuture.cancel(true);
- }
+ @Override
+ public boolean registerCandidate(final Entity entity_) {
+ boolean permit = false;
+ try {
+ permit = roleChangeGuard.tryAcquire(TIMEOUT, TimeUnit.SECONDS);
+ if(permit) {
+ LOG.debug("Register candidate for entity {}", entity_);
+ if (entity_.equals(this.entity)) {
+ entityOwnershipCandidateRegistration = entityOwnershipService.registerCandidate(entity_);
+ } else {
+ txEntityOwnershipCandidateRegistration = entityOwnershipService.registerCandidate(entity_);
}
- };
- deviceContext.getTimer().newTimeout(timerTask, 10, TimeUnit.SECONDS);
+ } else {
+ return false;
+ }
+ } catch (final CandidateAlreadyRegisteredException e) {
+ LOG.warn("Candidate for entity {} is already registered.", entity_.getType());
+ return false;
+ } catch (final InterruptedException e) {
+ LOG.warn("Cannot acquire semaphore for register entity {} candidate.", entity_.getType());
+ return false;
+ } finally {
+ if (permit) {
+ roleChangeGuard.release();
+ }
}
- return Futures.transform(JdkFutureAdapters.listenInPoolThread(setRoleOutputFuture), function);
+ return true;
}
-
@Override
- public OfpRole getClusterRole() {
- final OfpRole role;
+ public boolean unregisterCandidate(final Entity entity_) {
+ boolean permit = false;
try {
- roleChangeGuard.acquire();
+ permit = roleChangeGuard.tryAcquire(TIMEOUT, TimeUnit.SECONDS);
+ if(permit) {
+ if (entity_.equals(this.entity)) {
+ if (entityOwnershipCandidateRegistration != null) {
+ LOG.debug("Unregister candidate for entity {}", entity_);
+ entityOwnershipCandidateRegistration.close();
+ entityOwnershipCandidateRegistration = null;
+ }
+ } else {
+ if (txEntityOwnershipCandidateRegistration != null) {
+ LOG.debug("Unregister candidate for tx entity {}", entity_);
+ txEntityOwnershipCandidateRegistration.close();
+ txEntityOwnershipCandidateRegistration = null;
+ }
+ }
+ } else {
+ return false;
+ }
} catch (final InterruptedException e) {
- LOG.warn("Unexpected exception in get ClusterRole process for entity {}", entity);
+ LOG.warn("Cannot acquire semaphore for unregister entity {} candidate.", entity_.getType());
+ return false;
} finally {
- role = OfpRole.forValue(clusterRole.getIntValue());
- roleChangeGuard.release();
+ if (permit) {
+ roleChangeGuard.release();
+ }
}
- return role;
+ return true;
}
+ @Override
+ public void close() {
+ unregisterAllCandidates();
+ }
+
+ public boolean isMaster(){
+ return (txEntityOwnershipCandidateRegistration != null && entityOwnershipCandidateRegistration != null);
+ }
}
*/
package org.opendaylight.openflowplugin.impl.role;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Verify;
import com.google.common.collect.Iterators;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
import io.netty.util.Timeout;
import io.netty.util.TimerTask;
+
+import java.util.ArrayList;
import java.util.Iterator;
+import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import javax.annotation.CheckForNull;
import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.clustering.Entity;
import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.openflowplugin.api.OFConstants;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
-import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
-import org.opendaylight.openflowplugin.api.openflow.role.RoleChangeListener;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.RoleChangeListener;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.ServiceChangeListener;
import org.opendaylight.openflowplugin.api.openflow.role.RoleContext;
import org.opendaylight.openflowplugin.api.openflow.role.RoleManager;
+import org.opendaylight.openflowplugin.impl.services.SalRoleServiceImpl;
+import org.opendaylight.openflowplugin.impl.util.DeviceStateUtil;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SetRoleInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SetRoleInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SetRoleOutput;
+import org.opendaylight.yangtools.yang.common.RpcResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Gets invoked from RpcManagerInitial, registers a candidate with EntityOwnershipService.
- * On receipt of the ownership notification, makes an rpc call to SalRoleSevice.
+ * On receipt of the ownership notification, makes an rpc call to SalRoleService.
*
* Hands over to StatisticsManager at the end.
*/
-public class RoleManagerImpl implements RoleManager, EntityOwnershipListener {
+public class RoleManagerImpl implements RoleManager, EntityOwnershipListener, ServiceChangeListener {
private static final Logger LOG = LoggerFactory.getLogger(RoleManagerImpl.class);
private DeviceInitializationPhaseHandler deviceInitializationPhaseHandler;
private DeviceTerminationPhaseHandler deviceTerminationPhaseHandler;
private final DataBroker dataBroker;
private final EntityOwnershipService entityOwnershipService;
- private final ConcurrentMap<Entity, RoleContext> contexts = new ConcurrentHashMap<>();
- private final ConcurrentMap<Entity, RoleContext> txContexts = new ConcurrentHashMap<>();
+ private final ConcurrentMap<NodeId, RoleContext> contexts = new ConcurrentHashMap<>();
+ private final ConcurrentMap<Entity, RoleContext> watchingEntities = new ConcurrentHashMap<>();
private final EntityOwnershipListenerRegistration entityOwnershipListenerRegistration;
private final EntityOwnershipListenerRegistration txEntityOwnershipListenerRegistration;
+ private List<RoleChangeListener> listeners = new ArrayList<>();
+
+ private final LifecycleConductor conductor;
- public RoleManagerImpl(final EntityOwnershipService entityOwnershipService, final DataBroker dataBroker) {
+ public RoleManagerImpl(final EntityOwnershipService entityOwnershipService, final DataBroker dataBroker, final LifecycleConductor lifecycleConductor) {
this.entityOwnershipService = Preconditions.checkNotNull(entityOwnershipService);
this.dataBroker = Preconditions.checkNotNull(dataBroker);
this.entityOwnershipListenerRegistration = Preconditions.checkNotNull(entityOwnershipService.registerListener(RoleManager.ENTITY_TYPE, this));
this.txEntityOwnershipListenerRegistration = Preconditions.checkNotNull(entityOwnershipService.registerListener(TX_ENTITY_TYPE, this));
+ this.conductor = lifecycleConductor;
LOG.debug("Register OpenflowOwnershipListener to all entity ownership changes");
}
}
@Override
- public void onDeviceContextLevelUp(@CheckForNull final DeviceContext deviceContext) throws Exception {
- LOG.trace("Role manager called for device:{}", deviceContext.getPrimaryConnectionContext().getNodeId());
- final RoleContext roleContext = new RoleContextImpl(deviceContext, entityOwnershipService,
- makeEntity(deviceContext.getDeviceState().getNodeId()),
- makeTxEntity(deviceContext.getDeviceState().getNodeId()));
-
- Verify.verify(contexts.putIfAbsent(roleContext.getEntity(), roleContext) == null, "Role context for master Node {} is still not closed.", deviceContext.getDeviceState().getNodeId());
- Verify.verify(!txContexts.containsKey(roleContext.getTxEntity()),
- "Role context for master Node {} is still not closed. TxEntity was not unregistered yet.", deviceContext.getDeviceState().getNodeId());
-
- // if the device context gets closed (mostly on connection close), we would need to cleanup
- deviceContext.addDeviceContextClosedHandler(this);
- roleContext.initializationRoleContext();
- deviceInitializationPhaseHandler.onDeviceContextLevelUp(deviceContext);
+ public void onDeviceContextLevelUp(@CheckForNull final NodeId nodeId) throws Exception {
+ final DeviceContext deviceContext = Preconditions.checkNotNull(conductor.getDeviceContext(nodeId));
+ final RoleContext roleContext = new RoleContextImpl(nodeId, entityOwnershipService, makeEntity(nodeId), makeTxEntity(nodeId), conductor);
+ roleContext.setSalRoleService(new SalRoleServiceImpl(roleContext, deviceContext));
+ Verify.verify(contexts.putIfAbsent(nodeId, roleContext) == null, "Role context for master Node %s is still not closed.", nodeId);
+ makeDeviceRoleChange(OfpRole.BECOMESLAVE, roleContext, true);
+ notifyListenersRoleInitializationDone(roleContext.getNodeId(), roleContext.initialization());
+ watchingEntities.put(roleContext.getEntity(), roleContext);
+ deviceInitializationPhaseHandler.onDeviceContextLevelUp(nodeId);
}
@Override
public void close() {
+ LOG.debug("Close method on role manager was called.");
entityOwnershipListenerRegistration.close();
txEntityOwnershipListenerRegistration.close();
for (final Iterator<RoleContext> iterator = Iterators.consumingIterator(contexts.values().iterator()); iterator.hasNext();) {
// got here because last known role is LEADER and DS might need clearing up
- final RoleContext roleCtx = iterator.next();
- final NodeId nodeId = roleCtx.getDeviceState().getNodeId();
- if (OfpRole.BECOMEMASTER.equals(roleCtx.getClusterRole())) {
- LOG.debug("Last role is LEADER and ownershipService returned hasOwner=false for node: {}; "
- + "cleaning DS as being probably the last owner", nodeId);
- removeDeviceFromOperDS(roleCtx);
+ final RoleContext roleContext = iterator.next();
+ watchingEntities.remove(roleContext.getEntity());
+ watchingEntities.remove(roleContext.getTxEntity());
+ contexts.remove(roleContext.getNodeId());
+ if (roleContext.isTxCandidateRegistered()) {
+ LOG.info("Node {} was holder txEntity, so trying to remove device from operational DS.");
+ removeDeviceFromOperationalDS(roleContext.getNodeId());
} else {
- // NOOP - there is another owner
- LOG.debug("Last role is LEADER and ownershipService returned hasOwner=true for node: {}; "
- + "leaving DS untouched", nodeId);
+ roleContext.close();
}
- txContexts.remove(roleCtx.getTxEntity(), roleCtx);
- roleCtx.close();
}
}
@Override
public void onDeviceContextLevelDown(final DeviceContext deviceContext) {
- final NodeId nodeId = deviceContext.getDeviceState().getNodeId();
+ final NodeId nodeId = deviceContext.getPrimaryConnectionContext().getNodeId();
LOG.trace("onDeviceContextLevelDown for node {}", nodeId);
- final Entity entity = makeEntity(nodeId);
- final RoleContext roleContext = contexts.get(entity);
+ final RoleContext roleContext = contexts.get(nodeId);
if (roleContext != null) {
- LOG.debug("Found roleContext associated to deviceContext: {}, now closing the roleContext", nodeId);
- roleContext.terminationRoleContext();
- final TimerTask timerTask = new TimerTask() {
-
- @Override
- public void run(final Timeout timeout) throws Exception {
- final RoleContext foundMainRoleCtx = contexts.get(roleContext.getEntity());
- final RoleContext foundTxRoleCtx = txContexts.get(roleContext.getTxEntity());
-
- if (roleContext.equals(foundMainRoleCtx)) {
- LOG.info("OldRoleCtx was not remove for entity {} from contexts", roleContext.getEntity());
- contexts.remove(roleContext.getEntity(), roleContext);
- foundMainRoleCtx.close();
- }
-
- if (roleContext.equals(foundTxRoleCtx)) {
- LOG.info("OldRoleCtx was not remove for txEntity {} from contexts", roleContext.getTxEntity());
- txContexts.remove(roleContext.getTxEntity(), roleContext);
- foundTxRoleCtx.close();
- }
- }
- };
- deviceContext.getTimer().newTimeout(timerTask, 10, TimeUnit.SECONDS);
+ LOG.debug("Found roleContext associated to deviceContext: {}, now trying close the roleContext", nodeId);
+ if (roleContext.isMainCandidateRegistered()) {
+ roleContext.unregisterCandidate(roleContext.getEntity());
+ } else {
+ contexts.remove(nodeId, roleContext);
+ roleContext.close();
+ }
}
deviceTerminationPhaseHandler.onDeviceContextLevelDown(deviceContext);
}
- private static Entity makeEntity(final NodeId nodeId) {
+ @VisibleForTesting
+ static Entity makeEntity(final NodeId nodeId) {
return new Entity(RoleManager.ENTITY_TYPE, nodeId.getValue());
}
- private static Entity makeTxEntity(final NodeId nodeId) {
+ @VisibleForTesting
+ static Entity makeTxEntity(final NodeId nodeId) {
return new Entity(RoleManager.TX_ENTITY_TYPE, nodeId.getValue());
}
@Override
public void ownershipChanged(final EntityOwnershipChange ownershipChange) {
+
Preconditions.checkArgument(ownershipChange != null);
- RoleContext roleContext = null;
- try {
- roleContext = contexts.get(ownershipChange.getEntity());
- if (roleContext != null) {
- changeOwnershipForMainEntity(ownershipChange, roleContext);
- return;
- }
+ final RoleContext roleContext = watchingEntities.get(ownershipChange.getEntity());
+
+ LOG.debug("Received EOS message: wasOwner:{} isOwner:{} hasOwner:{} for entity type {} and node {}",
+ ownershipChange.wasOwner(), ownershipChange.isOwner(), ownershipChange.hasOwner(),
+ ownershipChange.getEntity().getType(),
+ roleContext != null ? roleContext.getNodeId() : "-> no watching entity, disregarding notification <-");
- roleContext = txContexts.get(ownershipChange.getEntity());
- if (roleContext != null) {
+ if (roleContext != null) {
+ if (ownershipChange.getEntity().equals(roleContext.getEntity())) {
+ changeOwnershipForMainEntity(ownershipChange, roleContext);
+ } else {
changeOwnershipForTxEntity(ownershipChange, roleContext);
- return;
- }
- } catch (final Exception e) {
- LOG.warn("fail to acquire semaphore: {}", ownershipChange.getEntity(), e);
- if (roleContext != null) {
- roleContext.getDeviceContext().shutdownConnection();
}
+ } else {
+ LOG.debug("OwnershipChange {}", ownershipChange);
}
- LOG.debug("We are not able to find Entity {} ownershipChange {} - disregarding ownership notification",
- ownershipChange.getEntity(), ownershipChange);
}
- private void changeOwnershipForMainEntity(final EntityOwnershipChange ownershipChange,
- @CheckForNull final RoleContext roleContext) {
+ @VisibleForTesting
+ void changeOwnershipForMainEntity(final EntityOwnershipChange ownershipChange, final RoleContext roleContext) {
- LOG.debug("Received Main-EntityOwnershipChange:{}", ownershipChange);
- Preconditions.checkArgument(roleContext != null);
if (roleContext.isMainCandidateRegistered()) {
- LOG.debug("Main-EntityOwnershipRegistration is active for entity {}", ownershipChange.getEntity());
+ LOG.debug("Main-EntityOwnershipRegistration is active for entity type {} and node {}",
+ ownershipChange.getEntity().getType(), roleContext.getNodeId());
if (!ownershipChange.wasOwner() && ownershipChange.isOwner()) {
// SLAVE -> MASTER
- txContexts.put(roleContext.getTxEntity(), roleContext);
- roleContext.onDeviceTryToTakeClusterLeadership();
+ LOG.debug("SLAVE to MASTER for node {}", roleContext.getNodeId());
+ if (roleContext.registerCandidate(roleContext.getTxEntity())) {
+ LOG.debug("Starting watching tx entity for node {}", roleContext.getNodeId());
+ watchingEntities.putIfAbsent(roleContext.getTxEntity(), roleContext);
+ }
} else if (ownershipChange.wasOwner() && !ownershipChange.isOwner()) {
// MASTER -> SLAVE
- roleContext.onDeviceLostClusterLeadership();
- } else if (LOG.isDebugEnabled()) {
- LOG.debug("Not processed Ownership Main Entity {} Event {}", ownershipChange.getEntity(), ownershipChange);
+ LOG.debug("MASTER to SLAVE for node {}", roleContext.getNodeId());
+ conductor.addOneTimeListenerWhenServicesChangesDone(this, roleContext.getNodeId());
+ makeDeviceRoleChange(OfpRole.BECOMESLAVE, roleContext, false);
}
} else {
- LOG.debug("Main-EntityOwnershipRegistration is not active for entity {}", ownershipChange.getEntity());
- contexts.remove(ownershipChange.getEntity(), roleContext);
- if (!ownershipChange.hasOwner() && !ownershipChange.isOwner() && ownershipChange.wasOwner()) {
- /* Method has to clean all context and registrations */
- unregistrationHelper(ownershipChange, roleContext);
+ LOG.debug("Main-EntityOwnershipRegistration is not active for entity type {} and node {}",
+ ownershipChange.getEntity(), roleContext.getNodeId());
+ watchingEntities.remove(ownershipChange.getEntity(), roleContext);
+ if (roleContext.isTxCandidateRegistered()) {
+ LOG.debug("tx candidate still registered for node {}, probably connection lost, trying to unregister tx candidate", roleContext.getNodeId());
+ roleContext.unregisterCandidate(roleContext.getTxEntity());
+ if (ownershipChange.wasOwner() && !ownershipChange.isOwner() && !ownershipChange.hasOwner()) {
+ LOG.debug("Trying to remove from operational node: {}", roleContext.getNodeId());
+ removeDeviceFromOperationalDS(roleContext.getNodeId());
+ }
} else {
- txContexts.remove(roleContext.getTxEntity(), roleContext);
+ final NodeId nodeId = roleContext.getNodeId();
+ contexts.remove(nodeId, roleContext);
roleContext.close();
+ conductor.closeConnection(nodeId);
}
}
}
- private void changeOwnershipForTxEntity(final EntityOwnershipChange ownershipChange,
+ @VisibleForTesting
+ void changeOwnershipForTxEntity(final EntityOwnershipChange ownershipChange,
@Nonnull final RoleContext roleContext) {
- LOG.debug("Received TX-EntityOwnershipChange:{}", ownershipChange);
- Preconditions.checkArgument(roleContext != null);
if (roleContext.isTxCandidateRegistered()) {
- LOG.debug("Tx-EntityOwnershipRegistration is active for entity {}", ownershipChange.getEntity());
+ LOG.debug("Tx-EntityOwnershipRegistration is active for entity type {} and node {}",
+ ownershipChange.getEntity().getType(),
+ roleContext.getNodeId());
if (!ownershipChange.wasOwner() && ownershipChange.isOwner()) {
// SLAVE -> MASTER
- roleContext.onDeviceTakeClusterLeadership();
+ LOG.debug("SLAVE to MASTER for node {}", roleContext.getNodeId());
+ makeDeviceRoleChange(OfpRole.BECOMEMASTER, roleContext,false);
} else if (ownershipChange.wasOwner() && !ownershipChange.isOwner()) {
// MASTER -> SLAVE
- LOG.warn("Tx-EntityOwnershipRegistration unexpected lost Leadership entity {}", ownershipChange.getEntity());
- roleContext.getDeviceContext().shutdownConnection();
- } else {
- LOG.debug("NOOP state transition for TxEntity {} ", roleContext.getTxEntity());
+ LOG.debug("MASTER to SLAVE for node {}", roleContext.getNodeId());
+ LOG.warn("Tx-EntityOwnershipRegistration lost leadership entity type {} and node {}",
+ ownershipChange.getEntity().getType(),roleContext.getNodeId());
+ watchingEntities.remove(roleContext.getTxEntity(), roleContext);
+ watchingEntities.remove(roleContext.getEntity(), roleContext);
+ roleContext.unregisterCandidate(roleContext.getEntity());
+ roleContext.unregisterCandidate(roleContext.getTxEntity());
+ if (!ownershipChange.hasOwner()) {
+ LOG.debug("Trying to remove from operational node: {}", roleContext.getNodeId());
+ removeDeviceFromOperationalDS(roleContext.getNodeId());
+ } else {
+ final NodeId nodeId = roleContext.getNodeId();
+ contexts.remove(nodeId, roleContext);
+ roleContext.close();
+ conductor.closeConnection(nodeId);
+ }
}
} else {
- LOG.debug("Tx-EntityOwnershipRegistration is not active for entity {}", ownershipChange.getEntity());
- txContexts.remove(ownershipChange.getEntity(), roleContext);
+ LOG.debug("Tx-EntityOwnershipRegistration is not active for entity {}", ownershipChange.getEntity().getType());
+ watchingEntities.remove(roleContext.getTxEntity(), roleContext);
+ final NodeId nodeId = roleContext.getNodeId();
+ contexts.remove(nodeId, roleContext);
+ roleContext.close();
+ conductor.closeConnection(nodeId);
}
}
- private CheckedFuture<Void, TransactionCommitFailedException> removeDeviceFromOperDS(
- final RoleChangeListener roleChangeListener) {
- Preconditions.checkArgument(roleChangeListener != null);
- final DeviceState deviceState = roleChangeListener.getDeviceState();
- final WriteTransaction delWtx = dataBroker.newWriteOnlyTransaction();
- delWtx.delete(LogicalDatastoreType.OPERATIONAL, deviceState.getNodeInstanceIdentifier());
- final CheckedFuture<Void, TransactionCommitFailedException> delFuture = delWtx.submit();
- Futures.addCallback(delFuture, new FutureCallback<Void>() {
-
+ @VisibleForTesting
+ void makeDeviceRoleChange(final OfpRole role, final RoleContext roleContext, final Boolean init) {
+ final ListenableFuture<RpcResult<SetRoleOutput>> roleChangeFuture = sendRoleChangeToDevice(role, roleContext);
+ Futures.addCallback(roleChangeFuture, new FutureCallback<RpcResult<SetRoleOutput>>() {
@Override
- public void onSuccess(final Void result) {
- LOG.debug("Delete Node {} was successful", deviceState.getNodeId());
+ public void onSuccess(@Nullable final RpcResult<SetRoleOutput> setRoleOutputRpcResult) {
+ LOG.info("Role {} successfully set on device {}", role, roleContext.getNodeId());
+ notifyListenersRoleChangeOnDevice(roleContext.getNodeId(), true, role, init);
}
@Override
- public void onFailure(final Throwable t) {
- LOG.warn("Delete Node {} failed.", deviceState.getNodeId(), t);
+ public void onFailure(@Nonnull final Throwable throwable) {
+ LOG.warn("Unable to set role {} on device {}", role, roleContext.getNodeId());
+ notifyListenersRoleChangeOnDevice(roleContext.getNodeId(), false, role, init);
}
});
- return delFuture;
}
- private void unregistrationHelper(final EntityOwnershipChange ownershipChange, final RoleContext roleContext) {
- LOG.info("Initiate removal from operational. Possibly the last node to be disconnected for :{}. ",
- ownershipChange);
- Futures.addCallback(removeDeviceFromOperDS(roleContext), new FutureCallback<Void>() {
+
+ private ListenableFuture<RpcResult<SetRoleOutput>> sendRoleChangeToDevice(final OfpRole newRole, final RoleContext roleContext) {
+ LOG.debug("Sending new role {} to device {}", newRole, roleContext.getNodeId());
+ final Future<RpcResult<SetRoleOutput>> setRoleOutputFuture;
+ final Short version = conductor.gainVersionSafely(roleContext.getNodeId());
+ if (null == version) {
+ LOG.debug("Device version is null");
+ return Futures.immediateFuture(null);
+ }
+ if (version < OFConstants.OFP_VERSION_1_3) {
+ LOG.debug("Device version not support ROLE");
+ return Futures.immediateFuture(null);
+ } else {
+ final SetRoleInput setRoleInput = (new SetRoleInputBuilder()).setControllerRole(newRole)
+ .setNode(new NodeRef(DeviceStateUtil.createNodeInstanceIdentifier(roleContext.getNodeId()))).build();
+ setRoleOutputFuture = roleContext.getSalRoleService().setRole(setRoleInput);
+ final TimerTask timerTask = new TimerTask() {
+
+ @Override
+ public void run(final Timeout timeout) throws Exception {
+ if (!setRoleOutputFuture.isDone()) {
+ LOG.warn("New role {} was not propagated to device {} during 10 sec", newRole, roleContext.getNodeId());
+ setRoleOutputFuture.cancel(true);
+ }
+ }
+ };
+ conductor.newTimeout(timerTask, 10, TimeUnit.SECONDS);
+ }
+ return JdkFutureAdapters.listenInPoolThread(setRoleOutputFuture);
+ }
+
+ @VisibleForTesting
+ CheckedFuture<Void, TransactionCommitFailedException> removeDeviceFromOperationalDS(final NodeId nodeId) {
+
+ final WriteTransaction delWtx = dataBroker.newWriteOnlyTransaction();
+ delWtx.delete(LogicalDatastoreType.OPERATIONAL, DeviceStateUtil.createNodeInstanceIdentifier(nodeId));
+ final CheckedFuture<Void, TransactionCommitFailedException> delFuture = delWtx.submit();
+ Futures.addCallback(delFuture, new FutureCallback<Void>() {
+
@Override
- public void onSuccess(final Void aVoid) {
- LOG.debug("Removing context for device: {}", roleContext.getDeviceState().getNodeId());
- txContexts.remove(roleContext.getTxEntity(), roleContext);
- roleContext.close();
+ public void onSuccess(final Void result) {
+ LOG.debug("Delete Node {} was successful", nodeId);
+ final RoleContext roleContext = contexts.remove(nodeId);
+ if (roleContext != null) {
+ roleContext.close();
+ }
}
@Override
- public void onFailure(final Throwable throwable) {
- LOG.warn("Removing role context for device: {}, but {}", roleContext.getDeviceState().getNodeId(),
- throwable.getMessage());
- txContexts.remove(roleContext.getTxEntity(), roleContext);
- roleContext.close();
+ public void onFailure(@Nonnull final Throwable t) {
+ LOG.warn("Delete Node {} failed. {}", nodeId, t);
+ contexts.remove(nodeId);
+ final RoleContext roleContext = contexts.remove(nodeId);
+ if (roleContext != null) {
+ roleContext.close();
+ }
}
});
+ return delFuture;
}
@Override
public void setDeviceTerminationPhaseHandler(final DeviceTerminationPhaseHandler handler) {
deviceTerminationPhaseHandler = handler;
}
+
+ @Override
+ public void servicesChangeDone(final NodeId nodeId, final boolean success) {
+ LOG.debug("Services stopping done for node {} as " + (success ? "successful" : "unsuccessful"), nodeId);
+ final RoleContext roleContext = contexts.get(nodeId);
+ if (null != roleContext) {
+ /* Services stopped or failure */
+ roleContext.unregisterCandidate(roleContext.getTxEntity());
+ }
+ }
+
+ @VisibleForTesting
+ RoleContext getRoleContext(final NodeId nodeId){
+ return contexts.get(nodeId);
+ }
+
+ @Override
+ public void addRoleChangeListener(final RoleChangeListener roleChangeListener) {
+ this.listeners.add(roleChangeListener);
+ }
+
+ /**
+ * Invoked when initialization phase is done
+ * @param nodeId node identification
+ * @param success true if initialization done ok, false otherwise
+ */
+ @VisibleForTesting
+ void notifyListenersRoleInitializationDone(final NodeId nodeId, final boolean success){
+ LOG.debug("Notifying registered listeners for role initialization done, no. of listeners {}", listeners.size());
+ for (final RoleChangeListener listener : listeners) {
+ listener.roleInitializationDone(nodeId, success);
+ }
+ }
+
+ /**
+ * Notifies registered listener on role change. Role is the new role on device
+ * If initialization phase is true, we may skip service starting
+ * @param success true if role change on device done ok, false otherwise
+ * @param role new role meant to be set on device
+ * @param initializationPhase if true, then skipp services start
+ */
+ @VisibleForTesting
+ void notifyListenersRoleChangeOnDevice(final NodeId nodeId, final boolean success, final OfpRole role, final boolean initializationPhase){
+ LOG.debug("Notifying registered listeners for role change, no. of listeners {}", listeners.size());
+ for (final RoleChangeListener listener : listeners) {
+ listener.roleChangeOnDevice(nodeId, success, role, initializationPhase);
+ }
+ }
+
}
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Semaphore;
-import java.util.concurrent.atomic.AtomicLong;
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
+
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
import org.opendaylight.openflowplugin.api.openflow.device.RequestContext;
+import org.opendaylight.openflowplugin.api.openflow.device.XidSequencer;
import org.opendaylight.openflowplugin.api.openflow.rpc.RpcContext;
import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.MessageSpy;
-import org.opendaylight.openflowplugin.impl.util.MdSalRegistrationUtils;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeContext;
+import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.RpcService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RpcContextImpl implements RpcContext {
private static final Logger LOG = LoggerFactory.getLogger(RpcContextImpl.class);
private final RpcProviderRegistry rpcProviderRegistry;
- private final DeviceContext deviceContext;
private final MessageSpy messageSpy;
private final Semaphore tracker;
+ private final XidSequencer xidSequencer;
// TODO: add private Sal salBroker
private final ConcurrentMap<Class<?>, RoutedRpcRegistration<?>> rpcRegistrations = new ConcurrentHashMap<>();
- private final boolean isStatisticsRpcEnabled;
- private final NotificationPublishService notificationPublishService;
+ private final KeyedInstanceIdentifier<Node, NodeKey> nodeInstanceIdentifier;
- public RpcContextImpl(final MessageSpy messageSpy, final RpcProviderRegistry rpcProviderRegistry, final DeviceContext deviceContext,
- final int maxRequests, final boolean isStatisticsRpcEnabled,
- final NotificationPublishService notificationPublishService) {
- this.deviceContext = Preconditions.checkNotNull(deviceContext);
+ public RpcContextImpl(final RpcProviderRegistry rpcProviderRegistry,
+ final XidSequencer xidSequencer,
+ final MessageSpy messageSpy,
+ final int maxRequests,
+ final KeyedInstanceIdentifier<Node, NodeKey> nodeInstanceIdentifier) {
+ this.xidSequencer = Preconditions.checkNotNull(xidSequencer);
this.messageSpy = Preconditions.checkNotNull(messageSpy);
this.rpcProviderRegistry = Preconditions.checkNotNull(rpcProviderRegistry);
- this.isStatisticsRpcEnabled = isStatisticsRpcEnabled;
- this.notificationPublishService = notificationPublishService;
+ this.nodeInstanceIdentifier = nodeInstanceIdentifier;
+
tracker = new Semaphore(maxRequests, true);
- deviceContext.setRpcContext(RpcContextImpl.this);
}
/**
@Override
public <S extends RpcService> void registerRpcServiceImplementation(final Class<S> serviceClass,
final S serviceInstance) {
- LOG.trace("Try to register service {} for device {}.", serviceClass, deviceContext.getDeviceState().getNodeInstanceIdentifier());
+ LOG.trace("Try to register service {} for device {}.", serviceClass, nodeInstanceIdentifier);
if (! rpcRegistrations.containsKey(serviceClass)) {
final RoutedRpcRegistration<S> routedRpcReg = rpcProviderRegistry.addRoutedRpcImplementation(serviceClass, serviceInstance);
- routedRpcReg.registerPath(NodeContext.class, deviceContext.getDeviceState().getNodeInstanceIdentifier());
+ routedRpcReg.registerPath(NodeContext.class, nodeInstanceIdentifier);
rpcRegistrations.put(serviceClass, routedRpcReg);
- LOG.debug("Registration of service {} for device {}.", serviceClass, deviceContext.getDeviceState().getNodeInstanceIdentifier());
- }
- }
-
- @Override
- public void registerStatCompatibilityServices() {
- if (isStatisticsRpcEnabled) {
- MdSalRegistrationUtils.registerStatCompatibilityServices(RpcContextImpl.this, deviceContext,
- notificationPublishService, new AtomicLong());
+ LOG.debug("Registration of service {} for device {}.", serviceClass, nodeInstanceIdentifier);
}
}
final RpcService rpcService = rpcRegistrations.get(serviceClass).getInstance();
return (S) rpcService;
}
+
/**
* Unregisters all services.
*
for (final Iterator<Entry<Class<?>, RoutedRpcRegistration<?>>> iterator = Iterators
.consumingIterator(rpcRegistrations.entrySet().iterator()); iterator.hasNext();) {
final RoutedRpcRegistration<?> rpcRegistration = iterator.next().getValue();
- rpcRegistration.unregisterPath(NodeContext.class, deviceContext.getDeviceState().getNodeInstanceIdentifier());
+ rpcRegistration.unregisterPath(NodeContext.class, nodeInstanceIdentifier);
rpcRegistration.close();
LOG.debug("Closing RPC Registration of service {} for device {}.", rpcRegistration.getServiceType(),
- deviceContext.getDeviceState().getNodeInstanceIdentifier());
+ nodeInstanceIdentifier);
}
}
LOG.trace("Device queue {} at capacity", this);
return null;
} else {
- LOG.trace("Acquired semaphore for {}, available permits:{} ", deviceContext.getDeviceState().getNodeId(), tracker.availablePermits());
+ LOG.trace("Acquired semaphore for {}, available permits:{} ", nodeInstanceIdentifier.getKey().getId(), tracker.availablePermits());
}
- final Long xid = deviceContext.reservedXidForDeviceMessage();
+ final Long xid = xidSequencer.reserveXidForDeviceMessage();
if (xid == null) {
- LOG.warn("Xid cannot be reserved for new RequestContext, node:{}", deviceContext.getDeviceState().getNodeId());
+ LOG.warn("Xid cannot be reserved for new RequestContext, node:{}", nodeInstanceIdentifier.getKey().getId());
tracker.release();
return null;
}
@Override
public <S extends RpcService> void unregisterRpcServiceImplementation(final Class<S> serviceClass) {
- LOG.trace("Try to unregister serviceClass {} for Node {}", serviceClass, deviceContext.getDeviceState().getNodeId());
+ LOG.trace("Try to unregister serviceClass {} for Node {}", serviceClass, nodeInstanceIdentifier.getKey().getId());
final RoutedRpcRegistration<?> rpcRegistration = rpcRegistrations.remove(serviceClass);
if (rpcRegistration != null) {
- rpcRegistration.unregisterPath(NodeContext.class, deviceContext.getDeviceState().getNodeInstanceIdentifier());
+ rpcRegistration.unregisterPath(NodeContext.class, nodeInstanceIdentifier);
rpcRegistration.close();
- LOG.debug("Unregistration serviceClass {} for Node {}", serviceClass, deviceContext.getDeviceState().getNodeId());
+ LOG.debug("Unregistration serviceClass {} for Node {}", serviceClass, nodeInstanceIdentifier.getKey().getId());
}
}
}
*/
package org.opendaylight.openflowplugin.impl.rpc;
+import com.google.common.base.Preconditions;
import com.google.common.base.Verify;
import com.google.common.collect.Iterators;
import java.util.Iterator;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
import org.opendaylight.openflowplugin.api.openflow.rpc.RpcContext;
import org.opendaylight.openflowplugin.api.openflow.rpc.RpcManager;
-import org.opendaylight.openflowplugin.impl.util.MdSalRegistrationUtils;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private DeviceTerminationPhaseHandler deviceTerminPhaseHandler;
private final int maxRequestsQuota;
private final ConcurrentMap<NodeId, RpcContext> contexts = new ConcurrentHashMap<>();
- private boolean isStatisticsRpcEnabled;
- private NotificationPublishService notificationPublishService;
+
+ private final LifecycleConductor conductor;
public RpcManagerImpl(final RpcProviderRegistry rpcProviderRegistry,
- final int quotaValue) {
+ final int quotaValue,
+ final LifecycleConductor lifecycleConductor) {
this.rpcProviderRegistry = rpcProviderRegistry;
maxRequestsQuota = quotaValue;
+ this.conductor = lifecycleConductor;
}
@Override
}
@Override
- public void onDeviceContextLevelUp(final DeviceContext deviceContext) throws Exception {
- final NodeId nodeId = deviceContext.getDeviceState().getNodeId();
- final OfpRole ofpRole = deviceContext.getDeviceState().getRole();
+ public void onDeviceContextLevelUp(final NodeId nodeId) throws Exception {
- LOG.debug("Node:{}, deviceContext.getDeviceState().getRole():{}", nodeId, ofpRole);
- final RpcContext rpcContext = new RpcContextImpl(deviceContext.getMessageSpy(), rpcProviderRegistry,
- deviceContext, maxRequestsQuota, isStatisticsRpcEnabled, notificationPublishService);
+ final DeviceContext deviceContext = Preconditions.checkNotNull(conductor.getDeviceContext(nodeId));
- Verify.verify(contexts.putIfAbsent(nodeId, rpcContext) == null, "RpcCtx still not closed for node {}", nodeId);
- deviceContext.addDeviceContextClosedHandler(this);
+ final RpcContext rpcContext = new RpcContextImpl(
+ rpcProviderRegistry,
+ deviceContext,
+ deviceContext.getMessageSpy(),
+ maxRequestsQuota,
+ deviceContext.getDeviceState().getNodeInstanceIdentifier());
- if (OfpRole.BECOMEMASTER.equals(ofpRole)) {
- LOG.info("Registering Openflow Master RPCs for node:{}, role:{}", nodeId, ofpRole);
- MdSalRegistrationUtils.registerMasterServices(rpcContext, deviceContext, ofpRole);
+ deviceContext.setRpcContext(rpcContext);
- } else if(OfpRole.BECOMESLAVE.equals(ofpRole)) {
- // if slave, we need to de-register rpcs if any have been registered, in case of master to slave
- LOG.info("Unregister RPC services (if any) for slave role for node:{}", deviceContext.getDeviceState().getNodeId());
- MdSalRegistrationUtils.registerSlaveServices(rpcContext, ofpRole);
- } else {
- // if we don't know role, we need to unregister rpcs if any have been registered
- LOG.info("Unregister RPC services (if any) for slave role for node:{}", deviceContext.getDeviceState().getNodeId());
- MdSalRegistrationUtils.unregisterServices(rpcContext);
- }
+ Verify.verify(contexts.putIfAbsent(nodeId, rpcContext) == null, "RpcCtx still not closed for node {}", nodeId);
// finish device initialization cycle back to DeviceManager
- deviceInitPhaseHandler.onDeviceContextLevelUp(deviceContext);
+ deviceInitPhaseHandler.onDeviceContextLevelUp(nodeId);
}
@Override
}
}
-
@Override
public void onDeviceContextLevelDown(final DeviceContext deviceContext) {
final RpcContext removedContext = contexts.remove(deviceContext.getDeviceState().getNodeId());
}
deviceTerminPhaseHandler.onDeviceContextLevelDown(deviceContext);
}
- @Override
- public void setStatisticsRpcEnabled(final boolean isStatisticsRpcEnabled) {
- this.isStatisticsRpcEnabled = isStatisticsRpcEnabled;
- }
-
- @Override
- public void setNotificationPublishService(final NotificationPublishService notificationPublishService) {
- this.notificationPublishService = notificationPublishService;
- }
@Override
public void setDeviceTerminationPhaseHandler(final DeviceTerminationPhaseHandler handler) {
import org.opendaylight.yangtools.yang.binding.Identifiable;
import org.opendaylight.yangtools.yang.binding.Identifier;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* General implementation of {@link ItemLifecycleListener} - keeping of DS/operational reflection up-to-date
*/
public class ItemLifecycleListenerImpl implements ItemLifecycleListener {
+ private static final Logger LOG = LoggerFactory.getLogger(ItemLifecycleListenerImpl.class);
+
private final DeviceContext deviceContext;
public ItemLifecycleListenerImpl(DeviceContext deviceContext) {
@Override
public <I extends Identifiable<K> & DataObject, K extends Identifier<I>> void onAdded(KeyedInstanceIdentifier<I, K> itemPath, I itemBody) {
- deviceContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, itemPath, itemBody);
- deviceContext.submitTransaction();
+ try {
+ deviceContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, itemPath, itemBody);
+ deviceContext.submitTransaction();
+ } catch (Exception e) {
+ LOG.warn("Not able to write to transaction: {}", e.getMessage());
+ }
}
@Override
public <I extends Identifiable<K> & DataObject, K extends Identifier<I>> void onRemoved(KeyedInstanceIdentifier<I, K> itemPath) {
- deviceContext.addDeleteToTxChain(LogicalDatastoreType.OPERATIONAL, itemPath);
- deviceContext.submitTransaction();
+ try {
+ deviceContext.addDeleteToTxChain(LogicalDatastoreType.OPERATIONAL, itemPath);
+ deviceContext.submitTransaction();
+ } catch (Exception e) {
+ LOG.warn("Not able to write to transaction: {}", e.getMessage());
+ }
}
}
}
context.setResult(builder.build());
- RequestContextUtil.closeRequstContext(context);
+ RequestContextUtil.closeRequestContext(context);
}
}
public static <T> ListenableFuture<RpcResult<T>> closeRequestContextWithRpcError(final RequestContext<T> requestContext, final String errorMessage) {
RpcResultBuilder<T> rpcResultBuilder = RpcResultBuilder.<T>failed().withRpcError(RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "", errorMessage));
requestContext.setResult(rpcResultBuilder.build());
- closeRequstContext(requestContext);
+ closeRequestContext(requestContext);
return requestContext.getFuture();
}
- public static void closeRequstContext(final RequestContext<?> requestContext) {
+ public static void closeRequestContext(final RequestContext<?> requestContext) {
try {
requestContext.close();
} catch (Exception e) {
- LOG.debug("Request context wasn't closed. Exception message: {}", e.getMessage());
+ LOG.error("Request context failed to close", e);
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.opendaylight.openflowplugin.impl.services.batch.BatchPlanStep;
+import org.opendaylight.openflowplugin.impl.services.batch.FlatBatchFlowAdapters;
+import org.opendaylight.openflowplugin.impl.services.batch.FlatBatchGroupAdapters;
+import org.opendaylight.openflowplugin.impl.services.batch.FlatBatchMeterAdapters;
+import org.opendaylight.openflowplugin.impl.util.FlatBatchUtil;
+import org.opendaylight.openflowplugin.impl.util.PathUtil;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.SalFlatBatchService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.SalFlowsBatchService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.SalGroupsBatchService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.SalMetersBatchService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchOutput;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * default implementation of {@link SalFlowsBatchService} - delegates work to {@link SalFlowService}
+ */
+public class SalFlatBatchServiceImpl implements SalFlatBatchService {
+ private static final Logger LOG = LoggerFactory.getLogger(SalFlatBatchServiceImpl.class);
+
+ private final SalFlowsBatchService salFlowService;
+ private final SalGroupsBatchService salGroupService;
+ private final SalMetersBatchService salMeterService;
+
+ public SalFlatBatchServiceImpl(final SalFlowsBatchService salFlowBatchService,
+ final SalGroupsBatchService salGroupsBatchService,
+ final SalMetersBatchService salMetersBatchService) {
+ this.salFlowService = Preconditions.checkNotNull(salFlowBatchService, "delegate flow service must not be null");
+ this.salGroupService = Preconditions.checkNotNull(salGroupsBatchService, "delegate group service must not be null");
+ this.salMeterService = Preconditions.checkNotNull(salMetersBatchService, "delegate meter service must not be null");
+ }
+
+ @Override
+ public Future<RpcResult<ProcessFlatBatchOutput>> processFlatBatch(final ProcessFlatBatchInput input) {
+ LOG.trace("processing flat batch @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatch().size());
+
+ // create plan
+ final List<BatchPlanStep> batchPlan = FlatBatchUtil.assembleBatchPlan(input.getBatch());
+ // add barriers where needed
+ FlatBatchUtil.markBarriersWhereNeeded(batchPlan);
+ // prepare chain elements
+ final List<AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>>> batchChainElements =
+ prepareBatchChain(batchPlan, input.getNode(), input.isExitOnFirstError());
+ // execute plan with barriers and collect outputs chain correspondingly, collect results
+ return executeBatchPlan(batchChainElements);
+ }
+
+ @VisibleForTesting
+ Future<RpcResult<ProcessFlatBatchOutput>> executeBatchPlan(final List<AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>>> batchChainElements) {
+ ListenableFuture<RpcResult<ProcessFlatBatchOutput>> chainSummaryResult =
+ RpcResultBuilder.success(new ProcessFlatBatchOutputBuilder().build()).buildFuture();
+
+ for (AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>> chainElement : batchChainElements) {
+ chainSummaryResult = Futures.transform(chainSummaryResult, chainElement);
+ }
+
+ return chainSummaryResult;
+
+ }
+
+ @VisibleForTesting
+ List<AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>>> prepareBatchChain(
+ final List<BatchPlanStep> batchPlan,
+ final NodeRef node,
+ final boolean exitOnFirstError) {
+
+ // create batch API calls based on plan steps
+ final List<AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>>> chainJobs = new ArrayList<>();
+ int stepOffset = 0;
+ for (final BatchPlanStep planStep : batchPlan) {
+ final int currentOffset = stepOffset;
+ chainJobs.add(new AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>>() {
+ @Override
+ public ListenableFuture<RpcResult<ProcessFlatBatchOutput>> apply(final RpcResult<ProcessFlatBatchOutput> chainInput) throws Exception {
+ if (exitOnFirstError && !chainInput.isSuccessful()) {
+ LOG.debug("error on flat batch chain occurred -> skipping step {}", planStep.getStepType());
+ return Futures.immediateFuture(chainInput);
+ }
+
+ LOG.trace("batch progressing on step type {}", planStep.getStepType());
+ LOG.trace("batch progressing previous step result: {}", chainInput.isSuccessful());
+
+ final ListenableFuture<RpcResult<ProcessFlatBatchOutput>> chainOutput;
+ switch (planStep.getStepType()) {
+ case FLOW_ADD:
+ final AddFlowsBatchInput addFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchAddFlow(
+ planStep, node);
+ final Future<RpcResult<AddFlowsBatchOutput>> resultAddFlowFuture = salFlowService.addFlowsBatch(addFlowsBatchInput);
+ chainOutput = FlatBatchFlowAdapters.adaptFlowBatchFutureForChain(chainInput, resultAddFlowFuture, currentOffset);
+ break;
+ case FLOW_REMOVE:
+ final RemoveFlowsBatchInput removeFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchRemoveFlow(
+ planStep, node);
+ final Future<RpcResult<RemoveFlowsBatchOutput>> resultRemoveFlowFuture = salFlowService.removeFlowsBatch(removeFlowsBatchInput);
+ chainOutput = FlatBatchFlowAdapters.adaptFlowBatchFutureForChain(chainInput, resultRemoveFlowFuture, currentOffset);
+ break;
+ case FLOW_UPDATE:
+ final UpdateFlowsBatchInput updateFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchUpdateFlow(
+ planStep, node);
+ final Future<RpcResult<UpdateFlowsBatchOutput>> resultUpdateFlowFuture = salFlowService.updateFlowsBatch(updateFlowsBatchInput);
+ chainOutput = FlatBatchFlowAdapters.adaptFlowBatchFutureForChain(chainInput, resultUpdateFlowFuture, currentOffset);
+ break;
+ case GROUP_ADD:
+ final AddGroupsBatchInput addGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchAddGroup(
+ planStep, node);
+ final Future<RpcResult<AddGroupsBatchOutput>> resultAddGroupFuture = salGroupService.addGroupsBatch(addGroupsBatchInput);
+ chainOutput = FlatBatchGroupAdapters.adaptGroupBatchFutureForChain(chainInput, resultAddGroupFuture, currentOffset);
+ break;
+ case GROUP_REMOVE:
+ final RemoveGroupsBatchInput removeGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchRemoveGroup(
+ planStep, node);
+ final Future<RpcResult<RemoveGroupsBatchOutput>> resultRemoveGroupFuture = salGroupService.removeGroupsBatch(removeGroupsBatchInput);
+ chainOutput = FlatBatchGroupAdapters.adaptGroupBatchFutureForChain(chainInput, resultRemoveGroupFuture, currentOffset);
+ break;
+ case GROUP_UPDATE:
+ final UpdateGroupsBatchInput updateGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchUpdateGroup(
+ planStep, node);
+ final Future<RpcResult<UpdateGroupsBatchOutput>> resultUpdateGroupFuture = salGroupService.updateGroupsBatch(updateGroupsBatchInput);
+ chainOutput = FlatBatchGroupAdapters.adaptGroupBatchFutureForChain(chainInput, resultUpdateGroupFuture, currentOffset);
+ break;
+ case METER_ADD:
+ final AddMetersBatchInput addMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchAddMeter(
+ planStep, node);
+ final Future<RpcResult<AddMetersBatchOutput>> resultAddMeterFuture = salMeterService.addMetersBatch(addMetersBatchInput);
+ chainOutput = FlatBatchMeterAdapters.adaptMeterBatchFutureForChain(chainInput, resultAddMeterFuture, currentOffset);
+ break;
+ case METER_REMOVE:
+ final RemoveMetersBatchInput removeMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchRemoveMeter(
+ planStep, node);
+ final Future<RpcResult<RemoveMetersBatchOutput>> resultRemoveMeterFuture = salMeterService.removeMetersBatch(removeMetersBatchInput);
+ chainOutput = FlatBatchMeterAdapters.adaptMeterBatchFutureForChain(chainInput, resultRemoveMeterFuture, currentOffset);
+ break;
+ case METER_UPDATE:
+ final UpdateMetersBatchInput updateMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchUpdateMeter(
+ planStep, node);
+ final Future<RpcResult<UpdateMetersBatchOutput>> resultUpdateMeterFuture = salMeterService.updateMetersBatch(updateMetersBatchInput);
+ chainOutput = FlatBatchMeterAdapters.adaptMeterBatchFutureForChain(chainInput, resultUpdateMeterFuture, currentOffset);
+ break;
+ default:
+ LOG.warn("Unsupported plan-step type occurred: {} -> OMITTING", planStep.getStepType());
+ chainOutput = Futures.immediateFuture(chainInput);
+ }
+ return chainOutput;
+ }
+ });
+ stepOffset += planStep.getTaskBag().size();
+ }
+
+ return chainJobs;
+ }
+
+}
} else {
flowId = FlowUtil.createAlienFlowId(input.getTableId());
}
-
+ LOG.trace("Calling add flow for flow with ID ={}.", flowId);
final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(input);
final FlowDescriptor flowDescriptor = FlowDescriptorFactory.create(input.getTableId(), flowId);
deviceContext.getDeviceFlowRegistry().store(flowRegistryKey, flowDescriptor);
- final ListenableFuture<RpcResult<AddFlowOutput>> future = flowAdd.processFlowModInputBuilders(flowAdd.toFlowModInputs(input));
+ final ListenableFuture<RpcResult<AddFlowOutput>> future =
+ flowAdd.processFlowModInputBuilders(flowAdd.toFlowModInputs(input));
Futures.addCallback(future, new FutureCallback<RpcResult<AddFlowOutput>>() {
@Override
public void onSuccess(final RpcResult<AddFlowOutput> rpcResult) {
if (rpcResult.isSuccessful()) {
- LOG.debug("flow add finished without error, id={}", flowId.getValue());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("flow add with id={},finished without error,", flowId.getValue());
+ }
if (itemLifecycleListener != null) {
KeyedInstanceIdentifier<Flow, FlowKey> flowPath = createFlowPath(flowDescriptor,
deviceContext.getDeviceState().getNodeInstanceIdentifier());
itemLifecycleListener.onAdded(flowPath, flowBuilder.build());
}
} else {
- LOG.debug("flow add failed with error, id={}", flowId.getValue());
- }
+ LOG.error("flow add failed for id={}, errors={}", flowId.getValue(), errorsToString(rpcResult.getErrors()));
+ }
}
@Override
public void onFailure(final Throwable throwable) {
deviceContext.getDeviceFlowRegistry().markToBeremoved(flowRegistryKey);
- LOG.trace("Service call for adding flows failed, id={}.", flowId.getValue(), throwable);
+ LOG.error("Service call for adding flow with id={} failed, reason {} .", flowId.getValue(), throwable);
}
});
public Future<RpcResult<RemoveFlowOutput>> removeFlow(final RemoveFlowInput input) {
LOG.trace("Calling remove flow for flow with ID ={}.", input.getFlowRef());
- final ListenableFuture<RpcResult<RemoveFlowOutput>> future = flowRemove.processFlowModInputBuilders(flowRemove.toFlowModInputs(input));
+ final ListenableFuture<RpcResult<RemoveFlowOutput>> future =
+ flowRemove.processFlowModInputBuilders(flowRemove.toFlowModInputs(input));
Futures.addCallback(future, new FutureCallback<RpcResult<RemoveFlowOutput>>() {
@Override
public void onSuccess(final RpcResult<RemoveFlowOutput> result) {
if (result.isSuccessful()) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("flow removed finished without error,");
+ }
FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(input);
deviceContext.getDeviceFlowRegistry().markToBeremoved(flowRegistryKey);
if (itemLifecycleListener != null) {
- final FlowDescriptor flowDescriptor = deviceContext.getDeviceFlowRegistry().retrieveIdForFlow(flowRegistryKey);
+ final FlowDescriptor flowDescriptor =
+ deviceContext.getDeviceFlowRegistry().retrieveIdForFlow(flowRegistryKey);
if (flowDescriptor != null) {
KeyedInstanceIdentifier<Flow, FlowKey> flowPath = createFlowPath(flowDescriptor,
deviceContext.getDeviceState().getNodeInstanceIdentifier());
}
}
} else {
- if (LOG.isTraceEnabled()) {
- StringBuilder errors = new StringBuilder();
- Collection<RpcError> rpcErrors = result.getErrors();
- if (null != rpcErrors && rpcErrors.size() > 0) {
- for (RpcError rpcError : rpcErrors) {
- errors.append(rpcError.getMessage());
- }
- }
- LOG.trace("Flow modification failed. Errors : {}", errors.toString());
- }
+ LOG.error("Flow remove failed with errors : {}",errorsToString(result.getErrors()));
}
}
@Override
public void onFailure(final Throwable throwable) {
- LOG.trace("Flow modification failed..", throwable);
+ LOG.error("Service call for removing flow with id {} failed ,reason {}",input.getFlowRef().getValue(), throwable);
}
});
return future;
}
+ private final String errorsToString(final Collection<RpcError> rpcErrors) {
+ final StringBuilder errors = new StringBuilder();
+ if ((null != rpcErrors) && (rpcErrors.size() > 0)) {
+ for (final RpcError rpcError : rpcErrors) {
+ errors.append(rpcError.getMessage());
+ }
+ }
+ return errors.toString();
+ }
+
@Override
public Future<RpcResult<UpdateFlowOutput>> updateFlow(final UpdateFlowInput input) {
final UpdateFlowInput in = input;
@Override
public void onFailure(final Throwable throwable) {
- LOG.debug("Flow update failed", throwable);
+ LOG.error("Service call for updating flow failed, reason{}", throwable);
}
});
return future;
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.openflowplugin.impl.services;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.opendaylight.openflowplugin.impl.util.BarrierUtil;
+import org.opendaylight.openflowplugin.impl.util.FlowUtil;
+import org.opendaylight.openflowplugin.impl.util.PathUtil;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.flow.update.OriginalFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.flow.update.UpdatedFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.FlowCapableTransactionService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.BatchFlowInputGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.BatchFlowInputUpdateGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.SalFlowsBatchService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.output.list.grouping.BatchFailedFlowsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.update.flows.batch.input.BatchUpdateFlows;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * default implementation of {@link SalFlowsBatchService} - delegates work to {@link SalFlowService}
+ */
+public class SalFlowsBatchServiceImpl implements SalFlowsBatchService {
+ private static final Logger LOG = LoggerFactory.getLogger(SalFlowsBatchServiceImpl.class);
+
+ private final SalFlowService salFlowService;
+ private final FlowCapableTransactionService transactionService;
+
+ public SalFlowsBatchServiceImpl(final SalFlowService salFlowService,
+ final FlowCapableTransactionService transactionService) {
+ this.salFlowService = Preconditions.checkNotNull(salFlowService, "delegate flow service must not be null");
+ this.transactionService = Preconditions.checkNotNull(transactionService, "delegate transaction service must not be null");
+ }
+
+ @Override
+ public Future<RpcResult<RemoveFlowsBatchOutput>> removeFlowsBatch(final RemoveFlowsBatchInput input) {
+ LOG.trace("Removing flows @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatchRemoveFlows().size());
+ final ArrayList<ListenableFuture<RpcResult<RemoveFlowOutput>>> resultsLot = new ArrayList<>();
+ for (BatchFlowInputGrouping batchFlow : input.getBatchRemoveFlows()) {
+ final RemoveFlowInput removeFlowInput = new RemoveFlowInputBuilder(batchFlow)
+ .setFlowRef(createFlowRef(input.getNode(), batchFlow))
+ .setNode(input.getNode())
+ .build();
+ resultsLot.add(JdkFutureAdapters.listenInPoolThread(salFlowService.removeFlow(removeFlowInput)));
+ }
+
+ final ListenableFuture<RpcResult<List<BatchFailedFlowsOutput>>> commonResult =
+ Futures.transform(Futures.successfulAsList(resultsLot),
+ FlowUtil.<RemoveFlowOutput>createCumulatingFunction(input.getBatchRemoveFlows()));
+
+ ListenableFuture<RpcResult<RemoveFlowsBatchOutput>> removeFlowsBulkFuture = Futures.transform(commonResult, FlowUtil.FLOW_REMOVE_TRANSFORM);
+
+ if (input.isBarrierAfter()) {
+ removeFlowsBulkFuture = BarrierUtil.chainBarrier(removeFlowsBulkFuture, input.getNode(),
+ transactionService, FlowUtil.FLOW_REMOVE_COMPOSING_TRANSFORM);
+ }
+
+ return removeFlowsBulkFuture;
+ }
+
+ @Override
+ public Future<RpcResult<AddFlowsBatchOutput>> addFlowsBatch(final AddFlowsBatchInput input) {
+ LOG.trace("Adding flows @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatchAddFlows().size());
+ final ArrayList<ListenableFuture<RpcResult<AddFlowOutput>>> resultsLot = new ArrayList<>();
+ for (BatchFlowInputGrouping batchFlow : input.getBatchAddFlows()) {
+ final AddFlowInput addFlowInput = new AddFlowInputBuilder(batchFlow)
+ .setFlowRef(createFlowRef(input.getNode(), batchFlow))
+ .setNode(input.getNode())
+ .build();
+ resultsLot.add(JdkFutureAdapters.listenInPoolThread(salFlowService.addFlow(addFlowInput)));
+ }
+
+ final ListenableFuture<RpcResult<List<BatchFailedFlowsOutput>>> commonResult =
+ Futures.transform(Futures.successfulAsList(resultsLot),
+ FlowUtil.<AddFlowOutput>createCumulatingFunction(input.getBatchAddFlows()));
+
+ ListenableFuture<RpcResult<AddFlowsBatchOutput>> addFlowsBulkFuture =
+ Futures.transform(commonResult, FlowUtil.FLOW_ADD_TRANSFORM);
+
+ if (input.isBarrierAfter()) {
+ addFlowsBulkFuture = BarrierUtil.chainBarrier(addFlowsBulkFuture, input.getNode(),
+ transactionService, FlowUtil.FLOW_ADD_COMPOSING_TRANSFORM);
+ }
+
+ return addFlowsBulkFuture;
+ }
+
+ private static FlowRef createFlowRef(final NodeRef nodeRef, final BatchFlowInputGrouping batchFlow) {
+ return FlowUtil.buildFlowPath((InstanceIdentifier<Node>) nodeRef.getValue(),
+ batchFlow.getTableId(), batchFlow.getFlowId());
+ }
+
+ private static FlowRef createFlowRef(final NodeRef nodeRef, final BatchFlowInputUpdateGrouping batchFlow) {
+ return FlowUtil.buildFlowPath((InstanceIdentifier<Node>) nodeRef.getValue(),
+ batchFlow.getOriginalBatchedFlow().getTableId(), batchFlow.getFlowId());
+ }
+
+ @Override
+ public Future<RpcResult<UpdateFlowsBatchOutput>> updateFlowsBatch(final UpdateFlowsBatchInput input) {
+ LOG.trace("Updating flows @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatchUpdateFlows().size());
+ final ArrayList<ListenableFuture<RpcResult<UpdateFlowOutput>>> resultsLot = new ArrayList<>();
+ for (BatchUpdateFlows batchFlow : input.getBatchUpdateFlows()) {
+ final UpdateFlowInput updateFlowInput = new UpdateFlowInputBuilder(input)
+ .setOriginalFlow(new OriginalFlowBuilder(batchFlow.getOriginalBatchedFlow()).build())
+ .setUpdatedFlow(new UpdatedFlowBuilder(batchFlow.getUpdatedBatchedFlow()).build())
+ .setFlowRef(createFlowRef(input.getNode(), batchFlow))
+ .setNode(input.getNode())
+ .build();
+ resultsLot.add(JdkFutureAdapters.listenInPoolThread(salFlowService.updateFlow(updateFlowInput)));
+ }
+
+ final ListenableFuture<RpcResult<List<BatchFailedFlowsOutput>>> commonResult =
+ Futures.transform(Futures.successfulAsList(resultsLot), FlowUtil.<UpdateFlowOutput>createCumulatingFunction(input.getBatchUpdateFlows()));
+
+ ListenableFuture<RpcResult<UpdateFlowsBatchOutput>> updateFlowsBulkFuture = Futures.transform(commonResult, FlowUtil.FLOW_UPDATE_TRANSFORM);
+
+ if (input.isBarrierAfter()) {
+ updateFlowsBulkFuture = BarrierUtil.chainBarrier(updateFlowsBulkFuture, input.getNode(),
+ transactionService, FlowUtil.FLOW_UPDATE_COMPOSING_TRANSFORM);
+ }
+
+ return updateFlowsBulkFuture;
+ }
+
+}
*/
package org.opendaylight.openflowplugin.impl.services;
+import java.util.Collection;
import java.util.concurrent.Future;
import com.google.common.annotations.VisibleForTesting;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Override
public void onSuccess(RpcResult<AddGroupOutput> result) {
if (result.isSuccessful()) {
- LOG.debug("group add finished without error, id={}", input.getGroupId().getValue());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("group add with id={} finished without error", input.getGroupId().getValue());
+ }
addIfNecessaryToDS(input.getGroupId(), input);
- }
+ } else {
+ LOG.error("group add with id={} failed, errors={}", input.getGroupId().getValue(),
+ errorsToString(result.getErrors()));
+ }
}
@Override
public void onFailure(Throwable t) {
- LOG.error("group add failed for id={}. Exception: {}", input.getGroupId().getValue(), t);
+ LOG.error("Service call for group add failed for id={}. Exception: {}",
+ input.getGroupId().getValue(), t);
}
});
@Override
public void onSuccess(@Nullable RpcResult<UpdateGroupOutput> result) {
if (result.isSuccessful()) {
- LOG.debug("Group update succeded");
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Group update for original id {} succeded", input.getOriginalGroup().getGroupId().getValue());
+ }
removeIfNecessaryFromDS(input.getOriginalGroup().getGroupId());
addIfNecessaryToDS(input.getUpdatedGroup().getGroupId(), input.getUpdatedGroup());
+ }else{
+ LOG.error("group update failed with id={}, errors={}", input.getOriginalGroup().getGroupId(),
+ errorsToString(result.getErrors()));
}
}
@Override
public void onFailure(Throwable t) {
- LOG.debug("Group update failed for id={}. Exception: {}", input.getOriginalGroup().getGroupId(), t);
+ LOG.error("Service call for group update failed for id={}. Exception: {}",
+ input.getOriginalGroup().getGroupId(), t);
}
});
return resultFuture;
@Override
public void onSuccess(@Nullable RpcResult<RemoveGroupOutput> result) {
if (result.isSuccessful()) {
- LOG.debug("Group remove succeded");
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Group remove for id {} succeded", input.getGroupId().getValue());
+ }
removeIfNecessaryFromDS(input.getGroupId());
+ }else{
+ LOG.error("group remove failed with id={}, errors={}", input.getGroupId().getValue(),
+ errorsToString(result.getErrors()));
}
}
@Override
public void onFailure(Throwable t) {
- LOG.error("Group remove failed for id={}. Exception: {}", input.getGroupId(), t);
+ LOG.error("Service call for group remove failed for id={}. Exception: {}",
+ input.getGroupId().getValue(), t);
}
});
return resultFuture;
}
}
- static KeyedInstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group, GroupKey> createGroupPath(final GroupId groupId, final KeyedInstanceIdentifier<Node, NodeKey> nodePath) {
- return nodePath.augmentation(FlowCapableNode.class).child(org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group.class, new GroupKey(groupId));
+ static KeyedInstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group, GroupKey>
+ createGroupPath(final GroupId groupId, final KeyedInstanceIdentifier<Node, NodeKey> nodePath) {
+ return nodePath.augmentation(FlowCapableNode.class).
+ child(org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group.class, new GroupKey(groupId));
+ }
+
+ private final String errorsToString(final Collection<RpcError> rpcErrors) {
+ final StringBuilder errors = new StringBuilder();
+ if ((null != rpcErrors) && (rpcErrors.size() > 0)) {
+ for (final RpcError rpcError : rpcErrors) {
+ errors.append(rpcError.getMessage());
+ }
+ }
+ return errors.toString();
}
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services;
+
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import javax.annotation.Nullable;
+import org.opendaylight.openflowplugin.impl.util.BarrierUtil;
+import org.opendaylight.openflowplugin.impl.util.GroupUtil;
+import org.opendaylight.openflowplugin.impl.util.PathUtil;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.FlowCapableTransactionService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.group.update.OriginalGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.group.update.UpdatedGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.SalGroupsBatchService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.add.groups.batch.input.BatchAddGroups;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.output.list.grouping.BatchFailedGroupsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.remove.groups.batch.input.BatchRemoveGroups;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.update.groups.batch.input.BatchUpdateGroups;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * default implementation of {@link SalGroupsBatchService} - delegates work to {@link SalGroupService}
+ */
+public class SalGroupsBatchServiceImpl implements SalGroupsBatchService {
+
+ private static final Logger LOG = LoggerFactory.getLogger(SalGroupsBatchServiceImpl.class);
+
+ private final SalGroupService salGroupService;
+ private final FlowCapableTransactionService transactionService;
+
+ public SalGroupsBatchServiceImpl(final SalGroupService salGroupService, final FlowCapableTransactionService transactionService) {
+ this.salGroupService = Preconditions.checkNotNull(salGroupService);
+ this.transactionService = Preconditions.checkNotNull(transactionService);
+ }
+
+ @Override
+ public Future<RpcResult<UpdateGroupsBatchOutput>> updateGroupsBatch(final UpdateGroupsBatchInput input) {
+ final List<BatchUpdateGroups> batchUpdateGroups = input.getBatchUpdateGroups();
+ LOG.trace("Updating groups @ {} : {}", PathUtil.extractNodeId(input.getNode()), batchUpdateGroups.size());
+
+ final ArrayList<ListenableFuture<RpcResult<UpdateGroupOutput>>> resultsLot = new ArrayList<>();
+ for (BatchUpdateGroups batchGroup : batchUpdateGroups) {
+ final UpdateGroupInput updateGroupInput = new UpdateGroupInputBuilder(input)
+ .setOriginalGroup(new OriginalGroupBuilder(batchGroup.getOriginalBatchedGroup()).build())
+ .setUpdatedGroup(new UpdatedGroupBuilder(batchGroup.getUpdatedBatchedGroup()).build())
+ .setGroupRef(createGroupRef(input.getNode(), batchGroup))
+ .setNode(input.getNode())
+ .build();
+ resultsLot.add(JdkFutureAdapters.listenInPoolThread(salGroupService.updateGroup(updateGroupInput)));
+ }
+
+ final Iterable<Group> groups = Iterables.transform(batchUpdateGroups, new Function<BatchUpdateGroups, Group>() {
+ @Nullable
+ @Override
+ public Group apply(@Nullable final BatchUpdateGroups input) {
+ return input.getUpdatedBatchedGroup();
+ }
+ }
+ );
+
+ final ListenableFuture<RpcResult<List<BatchFailedGroupsOutput>>> commonResult =
+ Futures.transform(Futures.allAsList(resultsLot), GroupUtil.<UpdateGroupOutput>createCumulatingFunction(
+ groups, batchUpdateGroups.size()));
+
+ ListenableFuture<RpcResult<UpdateGroupsBatchOutput>> updateGroupsBulkFuture = Futures.transform(
+ commonResult, GroupUtil.GROUP_UPDATE_TRANSFORM);
+
+ if (input.isBarrierAfter()) {
+ updateGroupsBulkFuture = BarrierUtil.chainBarrier(updateGroupsBulkFuture, input.getNode(),
+ transactionService, GroupUtil.GROUP_UPDATE_COMPOSING_TRANSFORM);
+ }
+
+ return updateGroupsBulkFuture;
+ }
+
+ @Override
+ public Future<RpcResult<AddGroupsBatchOutput>> addGroupsBatch(final AddGroupsBatchInput input) {
+ LOG.trace("Adding groups @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatchAddGroups().size());
+ final ArrayList<ListenableFuture<RpcResult<AddGroupOutput>>> resultsLot = new ArrayList<>();
+ for (BatchAddGroups addGroup : input.getBatchAddGroups()) {
+ final AddGroupInput addGroupInput = new AddGroupInputBuilder(addGroup)
+ .setGroupRef(createGroupRef(input.getNode(), addGroup))
+ .setNode(input.getNode())
+ .build();
+ resultsLot.add(JdkFutureAdapters.listenInPoolThread(salGroupService.addGroup(addGroupInput)));
+ }
+
+ final ListenableFuture<RpcResult<List<BatchFailedGroupsOutput>>> commonResult =
+ Futures.transform(Futures.allAsList(resultsLot),
+ GroupUtil.<AddGroupOutput>createCumulatingFunction(input.getBatchAddGroups()));
+
+ ListenableFuture<RpcResult<AddGroupsBatchOutput>> addGroupsBulkFuture =
+ Futures.transform(commonResult, GroupUtil.GROUP_ADD_TRANSFORM);
+
+ if (input.isBarrierAfter()) {
+ addGroupsBulkFuture = BarrierUtil.chainBarrier(addGroupsBulkFuture, input.getNode(),
+ transactionService, GroupUtil.GROUP_ADD_COMPOSING_TRANSFORM);
+ }
+
+ return addGroupsBulkFuture;
+ }
+
+ @Override
+ public Future<RpcResult<RemoveGroupsBatchOutput>> removeGroupsBatch(final RemoveGroupsBatchInput input) {
+ LOG.trace("Removing groups @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatchRemoveGroups().size());
+ final ArrayList<ListenableFuture<RpcResult<RemoveGroupOutput>>> resultsLot = new ArrayList<>();
+ for (BatchRemoveGroups addGroup : input.getBatchRemoveGroups()) {
+ final RemoveGroupInput removeGroupInput = new RemoveGroupInputBuilder(addGroup)
+ .setGroupRef(createGroupRef(input.getNode(), addGroup))
+ .setNode(input.getNode())
+ .build();
+ resultsLot.add(JdkFutureAdapters.listenInPoolThread(salGroupService.removeGroup(removeGroupInput)));
+ }
+
+ final ListenableFuture<RpcResult<List<BatchFailedGroupsOutput>>> commonResult =
+ Futures.transform(Futures.allAsList(resultsLot),
+ GroupUtil.<RemoveGroupOutput>createCumulatingFunction(input.getBatchRemoveGroups()));
+
+ ListenableFuture<RpcResult<RemoveGroupsBatchOutput>> removeGroupsBulkFuture =
+ Futures.transform(commonResult, GroupUtil.GROUP_REMOVE_TRANSFORM);
+
+ if (input.isBarrierAfter()) {
+ removeGroupsBulkFuture = BarrierUtil.chainBarrier(removeGroupsBulkFuture, input.getNode(),
+ transactionService, GroupUtil.GROUP_REMOVE_COMPOSING_TRANSFORM);
+ }
+
+ return removeGroupsBulkFuture;
+ }
+
+ private static GroupRef createGroupRef(final NodeRef nodeRef, final Group batchGroup) {
+ return GroupUtil.buildGroupPath((InstanceIdentifier<Node>) nodeRef.getValue(), batchGroup.getGroupId());
+ }
+
+ private static GroupRef createGroupRef(final NodeRef nodeRef, final BatchUpdateGroups batchGroup) {
+ return GroupUtil.buildGroupPath((InstanceIdentifier<Node>) nodeRef.getValue(),
+ batchGroup.getUpdatedBatchedGroup().getGroupId());
+ }
+}
*/
package org.opendaylight.openflowplugin.impl.services;
+import java.util.Collection;
import java.util.concurrent.Future;
import com.google.common.util.concurrent.FutureCallback;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Override
public void onSuccess(@Nullable RpcResult<AddMeterOutput> result) {
if (result.isSuccessful()) {
- LOG.debug("Meter add finished without error, id={}", input.getMeterId());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Meter add finished without error, id={}", input.getMeterId());
+ }
addIfNecessaryToDS(input.getMeterId(),input);
+ }else{
+ LOG.error("Meter add with id {} failed with error {}", input.getMeterId(),
+ errorsToString(result.getErrors()));
}
}
@Override
public void onSuccess(@Nullable RpcResult<UpdateMeterOutput> result) {
if (result.isSuccessful()) {
- LOG.debug("Meter update finished without error, id={}", input.getOriginalMeter().getMeterId());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Meter update finished without error, id={}", input.getOriginalMeter().getMeterId());
+ }
if (itemLifecycleListener != null) {
removeIfNecessaryFromDS(input.getOriginalMeter().getMeterId());
addIfNecessaryToDS(input.getUpdatedMeter().getMeterId(),input.getUpdatedMeter());
}
+ }else{
+ LOG.error("Meter update with id {} failed with error {}", input.getOriginalMeter().getMeterId(),
+ errorsToString(result.getErrors()));
}
}
@Override
public void onFailure(Throwable t) {
- LOG.error("Meter update failed. for id={}. Exception {}.",input.getOriginalMeter().getMeterId(),t);
+ LOG.error("Service call for meter update failed. for id={}. Exception {}.",
+ input.getOriginalMeter().getMeterId(),t);
}
});
return resultFuture;
@Override
public void onSuccess(@Nullable RpcResult<RemoveMeterOutput> result) {
if (result.isSuccessful()) {
- LOG.debug("Meter remove finished without error, id={}", input.getMeterId());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Meter remove finished without error, id={}", input.getMeterId());
+ }
removeIfNecessaryFromDS(input.getMeterId());
+ }else{
+ LOG.error("Meter remove with id {} failed with error {}", input.getMeterId(),
+ errorsToString(result.getErrors()));
}
}
@Override
public void onFailure(Throwable t) {
- LOG.error("Meter remove failed for id={}. Exception {}",input.getMeterId(),t);
+ LOG.error("Service call for meter remove failed for id={}. Exception {}",input.getMeterId(),t);
}
});
return nodePath.augmentation(FlowCapableNode.class).child(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter.class, new MeterKey(meterId));
}
+ private final String errorsToString(final Collection<RpcError> rpcErrors) {
+ final StringBuilder errors = new StringBuilder();
+ if ((null != rpcErrors) && (rpcErrors.size() > 0)) {
+ for (final RpcError rpcError : rpcErrors) {
+ errors.append(rpcError.getMessage());
+ }
+ }
+ return errors.toString();
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services;
+
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import javax.annotation.Nullable;
+import org.opendaylight.openflowplugin.impl.util.BarrierUtil;
+import org.opendaylight.openflowplugin.impl.util.MeterUtil;
+import org.opendaylight.openflowplugin.impl.util.PathUtil;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.FlowCapableTransactionService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.SalMeterService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.meter.update.OriginalMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.meter.update.UpdatedMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.SalMetersBatchService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.add.meters.batch.input.BatchAddMeters;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.output.list.grouping.BatchFailedMetersOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.remove.meters.batch.input.BatchRemoveMeters;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.update.meters.batch.input.BatchUpdateMeters;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * default implementation of {@link SalMetersBatchService} - delegates work to {@link SalMeterService}
+ */
+public class SalMetersBatchServiceImpl implements SalMetersBatchService {
+
+ private static final Logger LOG = LoggerFactory.getLogger(SalMetersBatchServiceImpl.class);
+
+ private final SalMeterService salMeterService;
+ private final FlowCapableTransactionService transactionService;
+
+ public SalMetersBatchServiceImpl(final SalMeterService salMeterService, final FlowCapableTransactionService transactionService) {
+ this.salMeterService = Preconditions.checkNotNull(salMeterService);
+ this.transactionService = Preconditions.checkNotNull(transactionService);
+ }
+
+ @Override
+ public Future<RpcResult<UpdateMetersBatchOutput>> updateMetersBatch(final UpdateMetersBatchInput input) {
+ final List<BatchUpdateMeters> batchUpdateMeters = input.getBatchUpdateMeters();
+ LOG.trace("Updating meters @ {} : {}", PathUtil.extractNodeId(input.getNode()), batchUpdateMeters.size());
+
+ final ArrayList<ListenableFuture<RpcResult<UpdateMeterOutput>>> resultsLot = new ArrayList<>();
+ for (BatchUpdateMeters batchMeter : batchUpdateMeters) {
+ final UpdateMeterInput updateMeterInput = new UpdateMeterInputBuilder(input)
+ .setOriginalMeter(new OriginalMeterBuilder(batchMeter.getOriginalBatchedMeter()).build())
+ .setUpdatedMeter(new UpdatedMeterBuilder(batchMeter.getUpdatedBatchedMeter()).build())
+ .setMeterRef(createMeterRef(input.getNode(), batchMeter))
+ .setNode(input.getNode())
+ .build();
+ resultsLot.add(JdkFutureAdapters.listenInPoolThread(salMeterService.updateMeter(updateMeterInput)));
+ }
+
+ final Iterable<Meter> meters = Iterables.transform(batchUpdateMeters, new Function<BatchUpdateMeters, Meter>() {
+ @Nullable
+ @Override
+ public Meter apply(@Nullable final BatchUpdateMeters input) {
+ return input.getUpdatedBatchedMeter();
+ }
+ }
+ );
+
+ final ListenableFuture<RpcResult<List<BatchFailedMetersOutput>>> commonResult =
+ Futures.transform(Futures.allAsList(resultsLot), MeterUtil.<UpdateMeterOutput>createCumulativeFunction(
+ meters, batchUpdateMeters.size()));
+
+ ListenableFuture<RpcResult<UpdateMetersBatchOutput>> updateMetersBulkFuture =
+ Futures.transform(commonResult, MeterUtil.METER_UPDATE_TRANSFORM);
+
+ if (input.isBarrierAfter()) {
+ updateMetersBulkFuture = BarrierUtil.chainBarrier(updateMetersBulkFuture, input.getNode(),
+ transactionService, MeterUtil.METER_UPDATE_COMPOSING_TRANSFORM);
+ }
+
+ return updateMetersBulkFuture;
+ }
+
+ @Override
+ public Future<RpcResult<AddMetersBatchOutput>> addMetersBatch(final AddMetersBatchInput input) {
+ LOG.trace("Adding meters @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatchAddMeters().size());
+ final ArrayList<ListenableFuture<RpcResult<AddMeterOutput>>> resultsLot = new ArrayList<>();
+ for (BatchAddMeters addMeter : input.getBatchAddMeters()) {
+ final AddMeterInput addMeterInput = new AddMeterInputBuilder(addMeter)
+ .setMeterRef(createMeterRef(input.getNode(), addMeter))
+ .setNode(input.getNode())
+ .build();
+ resultsLot.add(JdkFutureAdapters.listenInPoolThread(salMeterService.addMeter(addMeterInput)));
+ }
+
+ final ListenableFuture<RpcResult<List<BatchFailedMetersOutput>>> commonResult =
+ Futures.transform(Futures.allAsList(resultsLot),
+ MeterUtil.<AddMeterOutput>createCumulativeFunction(input.getBatchAddMeters()));
+
+ ListenableFuture<RpcResult<AddMetersBatchOutput>> addMetersBulkFuture =
+ Futures.transform(commonResult, MeterUtil.METER_ADD_TRANSFORM);
+
+ if (input.isBarrierAfter()) {
+ addMetersBulkFuture = BarrierUtil.chainBarrier(addMetersBulkFuture, input.getNode(),
+ transactionService, MeterUtil.METER_ADD_COMPOSING_TRANSFORM);
+ }
+
+ return addMetersBulkFuture;
+ }
+
+ @Override
+ public Future<RpcResult<RemoveMetersBatchOutput>> removeMetersBatch(final RemoveMetersBatchInput input) {
+ LOG.trace("Removing meters @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatchRemoveMeters().size());
+ final ArrayList<ListenableFuture<RpcResult<RemoveMeterOutput>>> resultsLot = new ArrayList<>();
+ for (BatchRemoveMeters addMeter : input.getBatchRemoveMeters()) {
+ final RemoveMeterInput removeMeterInput = new RemoveMeterInputBuilder(addMeter)
+ .setMeterRef(createMeterRef(input.getNode(), addMeter))
+ .setNode(input.getNode())
+ .build();
+ resultsLot.add(JdkFutureAdapters.listenInPoolThread(salMeterService.removeMeter(removeMeterInput)));
+ }
+
+ final ListenableFuture<RpcResult<List<BatchFailedMetersOutput>>> commonResult =
+ Futures.transform(Futures.allAsList(resultsLot),
+ MeterUtil.<RemoveMeterOutput>createCumulativeFunction(input.getBatchRemoveMeters()));
+
+ ListenableFuture<RpcResult<RemoveMetersBatchOutput>> removeMetersBulkFuture =
+ Futures.transform(commonResult, MeterUtil.METER_REMOVE_TRANSFORM);
+
+ if (input.isBarrierAfter()) {
+ removeMetersBulkFuture = BarrierUtil.chainBarrier(removeMetersBulkFuture, input.getNode(),
+ transactionService, MeterUtil.METER_REMOVE_COMPOSING_TRANSFORM);
+ }
+
+ return removeMetersBulkFuture;
+ }
+
+ private static MeterRef createMeterRef(final NodeRef nodeRef, final Meter batchMeter) {
+ return MeterUtil.buildMeterPath((InstanceIdentifier<Node>) nodeRef.getValue(), batchMeter.getMeterId());
+ }
+
+ private static MeterRef createMeterRef(final NodeRef nodeRef, final BatchUpdateMeters batchMeter) {
+ return MeterUtil.buildMeterPath((InstanceIdentifier<Node>) nodeRef.getValue(),
+ batchMeter.getUpdatedBatchedMeter().getMeterId());
+ }
+}
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.TableFeaturesConvertor;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.TableFeaturesReplyConvertor;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.TransactionId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
final UpdateTableOutputBuilder updateTableOutputBuilder = new UpdateTableOutputBuilder();
updateTableOutputBuilder.setTransactionId(new TransactionId(BigInteger.valueOf(xid)));
finalFuture.set(RpcResultBuilder.success(updateTableOutputBuilder.build()).build());
- writeResponseToOperationalDatastore(multipartReplies);
+ try {
+ writeResponseToOperationalDatastore(multipartReplies);
+ } catch (Exception e) {
+ LOG.warn("Not able to write to operational datastore: {}", e.getMessage());
+ }
}
} else {
LOG.debug("OnSuccess, rpc result unsuccessful, multipart response for rpc update-table was unsuccessful.");
/**
* @param multipartReplies
*/
- private void writeResponseToOperationalDatastore(final List<MultipartReply> multipartReplies) {
+ private void writeResponseToOperationalDatastore(final List<MultipartReply> multipartReplies) throws Exception {
final List<org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures> salTableFeatures = convertToSalTableFeatures(multipartReplies);
for (final org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures tableFeatureData : salTableFeatures) {
final Short tableId = tableFeatureData.getTableId();
final KeyedInstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures, TableFeaturesKey> tableFeaturesII = flowCapableNodeII
- .child(Table.class, new TableKey(tableId))
.child(org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures.class,
new TableFeaturesKey(tableId));
deviceContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, tableFeaturesII,
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services.batch;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.service.batch.common.rev160322.BatchOrderGrouping;
+
+/**
+ * Container of CRUD actions for one type of object (flow, group, meter, ..) of same type (add, remove, update)
+ */
+public class BatchPlanStep {
+ private final List<? extends BatchOrderGrouping> taskBag;
+ private final BatchStepType stepType;
+ private boolean barrierAfter = false;
+
+ public BatchPlanStep(final BatchStepType stepType) {
+ this.stepType = stepType;
+ taskBag = new ArrayList<>();
+ }
+
+ public <T extends BatchOrderGrouping> List<T> getTaskBag() {
+ return (List<T>) taskBag;
+ }
+
+ public BatchStepType getStepType() {
+ return stepType;
+ }
+
+ public boolean isEmpty() {
+ return taskBag.isEmpty();
+ }
+
+ public void setBarrierAfter(final boolean barrier) {
+ this.barrierAfter = barrier;
+ }
+
+ public boolean isBarrierAfter() {
+ return barrierAfter;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services.batch;
+
+/**
+ * batch step types - holding combinations of target object type and action type (e.g.: flow + update)
+ */
+public enum BatchStepType {
+
+ /** flow -> add operation */FLOW_ADD,
+ /** flow -> remove operation */FLOW_REMOVE,
+ /** flow -> update operation */FLOW_UPDATE,
+
+ /** group -> add operation */GROUP_ADD,
+ /** group -> remove operation */GROUP_REMOVE,
+ /** group -> update operation */GROUP_UPDATE,
+
+ /** meter -> add operation */METER_ADD,
+ /** meter -> remove operation */METER_REMOVE,
+ /** meter -> update operation */METER_UPDATE
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services.batch;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Function;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import javax.annotation.Nullable;
+import org.opendaylight.openflowplugin.impl.util.FlatBatchUtil;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.flow._case.FlatBatchAddFlow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.flow._case.FlatBatchRemoveFlow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.flow._case.FlatBatchUpdateFlow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailure;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailureBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureFlowIdCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.BatchFlowOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.add.flows.batch.input.BatchAddFlows;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.add.flows.batch.input.BatchAddFlowsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.output.list.grouping.BatchFailedFlowsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.remove.flows.batch.input.BatchRemoveFlows;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.remove.flows.batch.input.BatchRemoveFlowsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.update.flows.batch.input.BatchUpdateFlows;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.update.flows.batch.input.BatchUpdateFlowsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * transform between FlatBatch API and flow batch API
+ */
+public class FlatBatchFlowAdapters {
+
+ private FlatBatchFlowAdapters() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
+ /**
+ * @param planStep batch step containing changes of the same type
+ * @param node pointer for RPC routing
+ * @return input suitable for {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.SalFlowsBatchService#addFlowsBatch(AddFlowsBatchInput)}
+ */
+ public static AddFlowsBatchInput adaptFlatBatchAddFlow(final BatchPlanStep planStep, final NodeRef node) {
+ final List<BatchAddFlows> batchFlows = new ArrayList<>();
+ for (FlatBatchAddFlow batchAddFlows : planStep.<FlatBatchAddFlow>getTaskBag()) {
+ final BatchAddFlows addFlows = new BatchAddFlowsBuilder((Flow) batchAddFlows)
+ .setFlowId(batchAddFlows.getFlowId())
+ .build();
+ batchFlows.add(addFlows);
+ }
+
+ return new AddFlowsBatchInputBuilder()
+ .setBarrierAfter(planStep.isBarrierAfter())
+ .setNode(node)
+ .setBatchAddFlows(batchFlows)
+ .build();
+ }
+
+ /**
+ * @param planStep batch step containing changes of the same type
+ * @param node pointer for RPC routing
+ * @return input suitable for {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.SalFlowsBatchService#removeFlowsBatch(RemoveFlowsBatchInput)}
+ */
+ public static RemoveFlowsBatchInput adaptFlatBatchRemoveFlow(final BatchPlanStep planStep, final NodeRef node) {
+ final List<BatchRemoveFlows> batchFlows = new ArrayList<>();
+ for (FlatBatchRemoveFlow batchRemoveFlow : planStep.<FlatBatchRemoveFlow>getTaskBag()) {
+ final BatchRemoveFlows removeFlows = new BatchRemoveFlowsBuilder((Flow) batchRemoveFlow)
+ .setFlowId(batchRemoveFlow.getFlowId())
+ .build();
+ batchFlows.add(removeFlows);
+ }
+
+ return new RemoveFlowsBatchInputBuilder()
+ .setBarrierAfter(planStep.isBarrierAfter())
+ .setNode(node)
+ .setBatchRemoveFlows(batchFlows)
+ .build();
+ }
+
+ /**
+ * @param planStep batch step containing changes of the same type
+ * @param node pointer for RPC routing
+ * @return input suitable for {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.SalFlowsBatchService#updateFlowsBatch(UpdateFlowsBatchInput)}
+ */
+ public static UpdateFlowsBatchInput adaptFlatBatchUpdateFlow(final BatchPlanStep planStep, final NodeRef node) {
+ final List<BatchUpdateFlows> batchFlows = new ArrayList<>();
+ for (FlatBatchUpdateFlow batchUpdateFlow : planStep.<FlatBatchUpdateFlow>getTaskBag()) {
+ final BatchUpdateFlows updateFlows = new BatchUpdateFlowsBuilder(batchUpdateFlow)
+ .build();
+ batchFlows.add(updateFlows);
+ }
+
+ return new UpdateFlowsBatchInputBuilder()
+ .setBarrierAfter(planStep.isBarrierAfter())
+ .setNode(node)
+ .setBatchUpdateFlows(batchFlows)
+ .build();
+ }
+
+ /**
+ * @param chainInput here all partial results are collected (values + errors)
+ * @param stepOffset offset of current batch plan step
+ * @return next chained result incorporating results of this step's batch
+ */
+ @VisibleForTesting
+ static <T extends BatchFlowOutputListGrouping> Function<RpcResult<T>, RpcResult<ProcessFlatBatchOutput>>
+ createBatchFlowChainingFunction(final RpcResult<ProcessFlatBatchOutput> chainInput,
+ final int stepOffset) {
+ return new Function<RpcResult<T>, RpcResult<ProcessFlatBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<ProcessFlatBatchOutput> apply(@Nullable final RpcResult<T> input) {
+ // create rpcResult builder honoring both success/failure of current input and chained input + join errors
+ final RpcResultBuilder<ProcessFlatBatchOutput> output = FlatBatchUtil.mergeRpcResults(chainInput, input);
+ // convert values and add to chain values
+ final ProcessFlatBatchOutputBuilder outputBuilder = new ProcessFlatBatchOutputBuilder(chainInput.getResult());
+ final List<BatchFailure> batchFailures = wrapBatchFlowFailuresForFlat(input, stepOffset);
+ // join values
+ if (outputBuilder.getBatchFailure() == null) {
+ outputBuilder.setBatchFailure(new ArrayList<BatchFailure>(batchFailures.size()));
+ }
+ outputBuilder.getBatchFailure().addAll(batchFailures);
+
+ return output.withResult(outputBuilder.build()).build();
+ }
+ };
+ }
+
+ private static <T extends BatchFlowOutputListGrouping> List<BatchFailure> wrapBatchFlowFailuresForFlat(
+ final RpcResult<T> input, final int stepOffset) {
+ final List<BatchFailure> batchFailures = new ArrayList<>();
+ if (input.getResult().getBatchFailedFlowsOutput() != null) {
+ for (BatchFailedFlowsOutput stepOutput : input.getResult().getBatchFailedFlowsOutput()) {
+ final BatchFailure batchFailure = new BatchFailureBuilder()
+ .setBatchOrder(stepOffset + stepOutput.getBatchOrder())
+ .setBatchItemIdChoice(new FlatBatchFailureFlowIdCaseBuilder()
+ .setFlowId(stepOutput.getFlowId())
+ .build())
+ .build();
+ batchFailures.add(batchFailure);
+ }
+ }
+ return batchFailures;
+ }
+
+ /**
+ * shortcut for {@link #createBatchFlowChainingFunction(RpcResult, int)} with conversion {@link ListenableFuture}
+ *
+ * @param <T> exact type of batch flow output
+ * @param chainInput here all partial results are collected (values + errors)
+ * @param resultUpdateFlowFuture batch flow rpc-result (add/remove/update)
+ * @param currentOffset offset of current batch plan step with respect to entire chain of steps
+ * @return next chained result incorporating results of this step's batch
+ */
+ public static <T extends BatchFlowOutputListGrouping> ListenableFuture<RpcResult<ProcessFlatBatchOutput>>
+ adaptFlowBatchFutureForChain(final RpcResult<ProcessFlatBatchOutput> chainInput,
+ final Future<RpcResult<T>> resultUpdateFlowFuture,
+ final int currentOffset) {
+ return Futures.transform(JdkFutureAdapters.listenInPoolThread(resultUpdateFlowFuture),
+ FlatBatchFlowAdapters.<T>createBatchFlowChainingFunction(chainInput, currentOffset));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services.batch;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Function;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import javax.annotation.Nullable;
+import org.opendaylight.openflowplugin.impl.util.FlatBatchUtil;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.group._case.FlatBatchAddGroup;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.group._case.FlatBatchRemoveGroup;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.group._case.FlatBatchUpdateGroup;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailure;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailureBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureGroupIdCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.BatchGroupOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.add.groups.batch.input.BatchAddGroups;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.add.groups.batch.input.BatchAddGroupsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.output.list.grouping.BatchFailedGroupsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.remove.groups.batch.input.BatchRemoveGroups;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.remove.groups.batch.input.BatchRemoveGroupsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.update.groups.batch.input.BatchUpdateGroups;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.update.groups.batch.input.BatchUpdateGroupsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * transform between FlatBatch API and group batch API
+ */
+public class FlatBatchGroupAdapters {
+
+ private FlatBatchGroupAdapters() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
+ /**
+ * @param planStep batch step containing changes of the same type
+ * @param node pointer for RPC routing
+ * @return input suitable for {@link org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.SalGroupsBatchService#addGroupsBatch(AddGroupsBatchInput)}
+ */
+ public static AddGroupsBatchInput adaptFlatBatchAddGroup(final BatchPlanStep planStep, final NodeRef node) {
+ final List<BatchAddGroups> batchGroups = new ArrayList<>();
+ for (FlatBatchAddGroup batchAddGroup : planStep.<FlatBatchAddGroup>getTaskBag()) {
+ final BatchAddGroups addGroups = new BatchAddGroupsBuilder(batchAddGroup)
+ .setGroupId(batchAddGroup.getGroupId())
+ .build();
+ batchGroups.add(addGroups);
+ }
+
+ return new AddGroupsBatchInputBuilder()
+ .setBarrierAfter(planStep.isBarrierAfter())
+ .setNode(node)
+ .setBatchAddGroups(batchGroups)
+ .build();
+ }
+
+ /**
+ * @param planStep batch step containing changes of the same type
+ * @param node pointer for RPC routing
+ * @return input suitable for {@link org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.SalGroupsBatchService#removeGroupsBatch(RemoveGroupsBatchInput)}
+ */
+ public static RemoveGroupsBatchInput adaptFlatBatchRemoveGroup(final BatchPlanStep planStep, final NodeRef node) {
+ final List<BatchRemoveGroups> batchGroups = new ArrayList<>();
+ for (FlatBatchRemoveGroup batchRemoveGroup : planStep.<FlatBatchRemoveGroup>getTaskBag()) {
+ final BatchRemoveGroups removeGroups = new BatchRemoveGroupsBuilder(batchRemoveGroup)
+ .setGroupId(batchRemoveGroup.getGroupId())
+ .build();
+ batchGroups.add(removeGroups);
+ }
+
+ return new RemoveGroupsBatchInputBuilder()
+ .setBarrierAfter(planStep.isBarrierAfter())
+ .setNode(node)
+ .setBatchRemoveGroups(batchGroups)
+ .build();
+ }
+
+ /**
+ * @param planStep batch step containing changes of the same type
+ * @param node pointer for RPC routing
+ * @return input suitable for {@link org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.SalGroupsBatchService#updateGroupsBatch(UpdateGroupsBatchInput)}
+ */
+ public static UpdateGroupsBatchInput adaptFlatBatchUpdateGroup(final BatchPlanStep planStep, final NodeRef node) {
+ final List<BatchUpdateGroups> batchGroups = new ArrayList<>();
+ for (FlatBatchUpdateGroup batchUpdateGroup : planStep.<FlatBatchUpdateGroup>getTaskBag()) {
+ final BatchUpdateGroups updateGroups = new BatchUpdateGroupsBuilder(batchUpdateGroup)
+ .build();
+ batchGroups.add(updateGroups);
+ }
+
+ return new UpdateGroupsBatchInputBuilder()
+ .setBarrierAfter(planStep.isBarrierAfter())
+ .setNode(node)
+ .setBatchUpdateGroups(batchGroups)
+ .build();
+ }
+
+ /**
+ * @param chainInput here all partial results are collected (values + errors)
+ * @param stepOffset offset of current batch plan step
+ * @return next chained result incorporating results of this step's batch
+ */
+ @VisibleForTesting
+ static <T extends BatchGroupOutputListGrouping> Function<RpcResult<T>, RpcResult<ProcessFlatBatchOutput>>
+ createBatchGroupChainingFunction(final RpcResult<ProcessFlatBatchOutput> chainInput,
+ final int stepOffset) {
+ return new Function<RpcResult<T>, RpcResult<ProcessFlatBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<ProcessFlatBatchOutput> apply(@Nullable final RpcResult<T> input) {
+ // create rpcResult builder honoring both success/failure of current input and chained input + join errors
+ final RpcResultBuilder<ProcessFlatBatchOutput> output = FlatBatchUtil.mergeRpcResults(chainInput, input);
+ // convert values and add to chain values
+ final ProcessFlatBatchOutputBuilder outputBuilder = new ProcessFlatBatchOutputBuilder(chainInput.getResult());
+ final List<BatchFailure> batchFailures = wrapBatchGroupFailuresForFlat(input, stepOffset);
+ // join values
+ if (outputBuilder.getBatchFailure() == null) {
+ outputBuilder.setBatchFailure(new ArrayList<BatchFailure>(batchFailures.size()));
+ }
+ outputBuilder.getBatchFailure().addAll(batchFailures);
+
+ return output.withResult(outputBuilder.build()).build();
+ }
+ };
+ }
+
+ private static <T extends BatchGroupOutputListGrouping> List<BatchFailure> wrapBatchGroupFailuresForFlat(
+ final RpcResult<T> input, final int stepOffset) {
+ final List<BatchFailure> batchFailures = new ArrayList<>();
+ if (input.getResult().getBatchFailedGroupsOutput() != null) {
+ for (BatchFailedGroupsOutput stepOutput : input.getResult().getBatchFailedGroupsOutput()) {
+ final BatchFailure batchFailure = new BatchFailureBuilder()
+ .setBatchOrder(stepOffset + stepOutput.getBatchOrder())
+ .setBatchItemIdChoice(new FlatBatchFailureGroupIdCaseBuilder()
+ .setGroupId(stepOutput.getGroupId())
+ .build())
+ .build();
+ batchFailures.add(batchFailure);
+ }
+ }
+ return batchFailures;
+ }
+
+ /**
+ * shortcut for {@link #createBatchGroupChainingFunction(RpcResult, int)} with conversion {@link ListenableFuture}
+ *
+ * @param <T> exact type of batch flow output
+ * @param chainInput here all partial results are collected (values + errors)
+ * @param resultUpdateGroupFuture batch group rpc-result (add/remove/update)
+ * @param currentOffset offset of current batch plan step with respect to entire chain of steps
+ * @return next chained result incorporating results of this step's batch
+ */
+ public static <T extends BatchGroupOutputListGrouping> ListenableFuture<RpcResult<ProcessFlatBatchOutput>>
+ adaptGroupBatchFutureForChain(final RpcResult<ProcessFlatBatchOutput> chainInput,
+ final Future<RpcResult<T>> resultUpdateGroupFuture,
+ final int currentOffset) {
+ return Futures.transform(JdkFutureAdapters.listenInPoolThread(resultUpdateGroupFuture),
+ FlatBatchGroupAdapters.<T>createBatchGroupChainingFunction(chainInput, currentOffset));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services.batch;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Function;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import javax.annotation.Nullable;
+import org.opendaylight.openflowplugin.impl.util.FlatBatchUtil;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.meter._case.FlatBatchAddMeter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.meter._case.FlatBatchRemoveMeter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.meter._case.FlatBatchUpdateMeter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailure;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailureBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureMeterIdCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.BatchMeterOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.add.meters.batch.input.BatchAddMeters;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.add.meters.batch.input.BatchAddMetersBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.output.list.grouping.BatchFailedMetersOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.remove.meters.batch.input.BatchRemoveMeters;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.remove.meters.batch.input.BatchRemoveMetersBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.update.meters.batch.input.BatchUpdateMeters;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.update.meters.batch.input.BatchUpdateMetersBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * transform between FlatBatch API and meter batch API
+ */
+public class FlatBatchMeterAdapters {
+
+ private FlatBatchMeterAdapters() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
+ /**
+ * @param planStep batch step containing changes of the same type
+ * @param node pointer for RPC routing
+ * @return input suitable for {@link org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.SalMetersBatchService#addMetersBatch(AddMetersBatchInput)}
+ */
+ public static AddMetersBatchInput adaptFlatBatchAddMeter(final BatchPlanStep planStep, final NodeRef node) {
+ final List<BatchAddMeters> batchMeters = new ArrayList<>();
+ for (FlatBatchAddMeter batchAddMeter : planStep.<FlatBatchAddMeter>getTaskBag()) {
+ final BatchAddMeters addMeters = new BatchAddMetersBuilder(batchAddMeter)
+ .setMeterId(batchAddMeter.getMeterId())
+ .build();
+ batchMeters.add(addMeters);
+ }
+
+ return new AddMetersBatchInputBuilder()
+ .setBarrierAfter(planStep.isBarrierAfter())
+ .setNode(node)
+ .setBatchAddMeters(batchMeters)
+ .build();
+ }
+
+ /**
+ * @param planStep batch step containing changes of the same type
+ * @param node pointer for RPC routing
+ * @return input suitable for {@link org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.SalMetersBatchService#removeMetersBatch(RemoveMetersBatchInput)}
+ */
+ public static RemoveMetersBatchInput adaptFlatBatchRemoveMeter(final BatchPlanStep planStep, final NodeRef node) {
+ final List<BatchRemoveMeters> batchMeters = new ArrayList<>();
+ for (FlatBatchRemoveMeter batchRemoveMeter : planStep.<FlatBatchRemoveMeter>getTaskBag()) {
+ final BatchRemoveMeters removeMeters = new BatchRemoveMetersBuilder(batchRemoveMeter)
+ .setMeterId(batchRemoveMeter.getMeterId())
+ .build();
+ batchMeters.add(removeMeters);
+ }
+
+ return new RemoveMetersBatchInputBuilder()
+ .setBarrierAfter(planStep.isBarrierAfter())
+ .setNode(node)
+ .setBatchRemoveMeters(batchMeters)
+ .build();
+ }
+
+ /**
+ * @param planStep batch step containing changes of the same type
+ * @param node pointer for RPC routing
+ * @return input suitable for {@link org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.SalMetersBatchService#updateMetersBatch(UpdateMetersBatchInput)}
+ */
+ public static UpdateMetersBatchInput adaptFlatBatchUpdateMeter(final BatchPlanStep planStep, final NodeRef node) {
+ final List<BatchUpdateMeters> batchMeters = new ArrayList<>();
+ for (FlatBatchUpdateMeter batchUpdateMeter : planStep.<FlatBatchUpdateMeter>getTaskBag()) {
+ final BatchUpdateMeters updateMeters = new BatchUpdateMetersBuilder(batchUpdateMeter)
+ .build();
+ batchMeters.add(updateMeters);
+ }
+
+ return new UpdateMetersBatchInputBuilder()
+ .setBarrierAfter(planStep.isBarrierAfter())
+ .setNode(node)
+ .setBatchUpdateMeters(batchMeters)
+ .build();
+ }
+
+ /**
+ * @param chainInput here all partial results are collected (values + errors)
+ * @param stepOffset offset of current batch plan step
+ * @return next chained result incorporating results of this step's batch
+ */
+ @VisibleForTesting
+ static <T extends BatchMeterOutputListGrouping> Function<RpcResult<T>, RpcResult<ProcessFlatBatchOutput>>
+ createBatchMeterChainingFunction(final RpcResult<ProcessFlatBatchOutput> chainInput,
+ final int stepOffset) {
+ return new Function<RpcResult<T>, RpcResult<ProcessFlatBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<ProcessFlatBatchOutput> apply(@Nullable final RpcResult<T> input) {
+ // create rpcResult builder honoring both success/failure of current input and chained input + join errors
+ final RpcResultBuilder<ProcessFlatBatchOutput> output = FlatBatchUtil.mergeRpcResults(chainInput, input);
+ // convert values and add to chain values
+ final ProcessFlatBatchOutputBuilder outputBuilder = new ProcessFlatBatchOutputBuilder(chainInput.getResult());
+ final List<BatchFailure> batchFailures = wrapBatchMeterFailuresForFlat(input, stepOffset);
+ // join values
+ if (outputBuilder.getBatchFailure() == null) {
+ outputBuilder.setBatchFailure(new ArrayList<BatchFailure>(batchFailures.size()));
+ }
+ outputBuilder.getBatchFailure().addAll(batchFailures);
+
+ return output.withResult(outputBuilder.build()).build();
+ }
+ };
+ }
+
+ private static <T extends BatchMeterOutputListGrouping> List<BatchFailure> wrapBatchMeterFailuresForFlat(
+ final RpcResult<T> input, final int stepOffset) {
+ final List<BatchFailure> batchFailures = new ArrayList<>();
+ if (input.getResult().getBatchFailedMetersOutput() != null) {
+ for (BatchFailedMetersOutput stepOutput : input.getResult().getBatchFailedMetersOutput()) {
+ final BatchFailure batchFailure = new BatchFailureBuilder()
+ .setBatchOrder(stepOffset + stepOutput.getBatchOrder())
+ .setBatchItemIdChoice(new FlatBatchFailureMeterIdCaseBuilder()
+ .setMeterId(stepOutput.getMeterId())
+ .build())
+ .build();
+ batchFailures.add(batchFailure);
+ }
+ }
+ return batchFailures;
+ }
+
+ /**
+ * shortcut for {@link #createBatchMeterChainingFunction(RpcResult, int)} with conversion {@link ListenableFuture}
+ *
+ * @param <T> exact type of batch flow output
+ * @param chainInput here all partial results are collected (values + errors)
+ * @param resultUpdateMeterFuture batch group rpc-result (add/remove/update)
+ * @param currentOffset offset of current batch plan step with respect to entire chain of steps
+ * @return next chained result incorporating results of this step's batch
+ */
+ public static <T extends BatchMeterOutputListGrouping> ListenableFuture<RpcResult<ProcessFlatBatchOutput>>
+ adaptMeterBatchFutureForChain(final RpcResult<ProcessFlatBatchOutput> chainInput,
+ final Future<RpcResult<T>> resultUpdateMeterFuture,
+ final int currentOffset) {
+ return Futures.transform(JdkFutureAdapters.listenInPoolThread(resultUpdateMeterFuture),
+ FlatBatchMeterAdapters.<T>createBatchMeterChainingFunction(chainInput, currentOffset));
+ }
+}
import java.util.Iterator;
import java.util.List;
import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
import javax.annotation.concurrent.GuardedBy;
import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
import org.opendaylight.openflowplugin.api.openflow.device.RequestContext;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
import org.opendaylight.openflowplugin.api.openflow.rpc.listener.ItemLifecycleListener;
import org.opendaylight.openflowplugin.api.openflow.statistics.StatisticsContext;
import org.opendaylight.openflowplugin.impl.rpc.AbstractRequestContext;
import org.opendaylight.openflowplugin.impl.services.RequestContextUtil;
import org.opendaylight.openflowplugin.impl.statistics.services.dedicated.StatisticsGatheringOnTheFlyService;
import org.opendaylight.openflowplugin.impl.statistics.services.dedicated.StatisticsGatheringService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.MultipartType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/**
- * Created by Martin Bobak <mbobak@cisco.com> on 1.4.2015.
- */
public class StatisticsContextImpl implements StatisticsContext {
private static final Logger LOG = LoggerFactory.getLogger(StatisticsContextImpl.class);
private StatisticsGatheringOnTheFlyService statisticsGatheringOnTheFlyService;
private Timeout pollTimeout;
- public StatisticsContextImpl(@CheckForNull final DeviceContext deviceContext,
- final boolean shuttingDownStatisticsPolling) {
- this.deviceContext = Preconditions.checkNotNull(deviceContext);
+ private final LifecycleConductor conductor;
+
+ public StatisticsContextImpl(@CheckForNull final NodeId nodeId, final boolean shuttingDownStatisticsPolling, final LifecycleConductor lifecycleConductor) {
+ this.conductor = lifecycleConductor;
+ this.deviceContext = Preconditions.checkNotNull(conductor.getDeviceContext(nodeId));
this.devState = Preconditions.checkNotNull(deviceContext.getDeviceState());
this.shuttingDownStatisticsPolling = shuttingDownStatisticsPolling;
- emptyFuture = Futures.immediateFuture(new Boolean(false));
+ emptyFuture = Futures.immediateFuture(false);
statisticsGatheringService = new StatisticsGatheringService(this, deviceContext);
statisticsGatheringOnTheFlyService = new StatisticsGatheringOnTheFlyService(this, deviceContext);
itemLifeCycleListener = new ItemLifecycleListenerImpl(deviceContext);
}
}
- @Override
- public ListenableFuture<Boolean> gatherDynamicData() {
- if (shuttingDownStatisticsPolling) {
- LOG.debug("Statistics for device {} is not enabled.", deviceContext.getDeviceState().getNodeId());
- return Futures.immediateFuture(Boolean.TRUE);
- }
- final ListenableFuture<Boolean> errorResultFuture = deviceConnectionCheck();
- if (errorResultFuture != null) {
- return errorResultFuture;
- }
- synchronized (COLLECTION_STAT_TYPE_LOCK) {
- final Iterator<MultipartType> statIterator = collectingStatType.iterator();
- final SettableFuture<Boolean> settableStatResultFuture = SettableFuture.create();
- statChainFuture(statIterator, settableStatResultFuture);
- return settableStatResultFuture;
- }
+ @Override
+ public ListenableFuture<Boolean> gatherDynamicData() {
+ if (shuttingDownStatisticsPolling) {
+ LOG.debug("Statistics for device {} is not enabled.", deviceContext.getDeviceState().getNodeId());
+ return Futures.immediateFuture(Boolean.TRUE);
}
+ final ListenableFuture<Boolean> errorResultFuture = deviceConnectionCheck();
+ if (errorResultFuture != null) {
+ return errorResultFuture;
+ }
+ synchronized (COLLECTION_STAT_TYPE_LOCK) {
+ final Iterator<MultipartType> statIterator = collectingStatType.iterator();
+ final SettableFuture<Boolean> settableStatResultFuture = SettableFuture.create();
- private ListenableFuture<Boolean> chooseStat(final MultipartType multipartType){
- switch (multipartType) {
- case OFPMPFLOW:
- return collectFlowStatistics(multipartType);
- case OFPMPTABLE:
- return collectTableStatistics(multipartType);
- case OFPMPPORTSTATS:
- return collectPortStatistics(multipartType);
- case OFPMPQUEUE:
- return collectQueueStatistics(multipartType);
- case OFPMPGROUPDESC:
- return collectGroupDescStatistics(multipartType);
- case OFPMPGROUP:
- return collectGroupStatistics(multipartType);
- case OFPMPMETERCONFIG:
- return collectMeterConfigStatistics(multipartType);
- case OFPMPMETER:
- return collectMeterStatistics(multipartType);
- default:
- LOG.warn("Unsuported Statistics type {}", multipartType);
- return Futures.immediateCheckedFuture(Boolean.TRUE);
- }
+ // write start timestamp to state snapshot container
+ StatisticsGatheringUtils.markDeviceStateSnapshotStart(deviceContext);
+
+ statChainFuture(statIterator, settableStatResultFuture);
+
+ // write end timestamp to state snapshot container
+ Futures.addCallback(settableStatResultFuture, new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(@Nullable final Boolean result) {
+ StatisticsGatheringUtils.markDeviceStateSnapshotEnd(deviceContext, true);
+ }
+ @Override
+ public void onFailure(final Throwable t) {
+ StatisticsGatheringUtils.markDeviceStateSnapshotEnd(deviceContext, false);
+ }
+ });
+ return settableStatResultFuture;
+ }
+ }
+
+ private ListenableFuture<Boolean> chooseStat(final MultipartType multipartType){
+ switch (multipartType) {
+ case OFPMPFLOW:
+ return collectFlowStatistics(multipartType);
+ case OFPMPTABLE:
+ return collectTableStatistics(multipartType);
+ case OFPMPPORTSTATS:
+ return collectPortStatistics(multipartType);
+ case OFPMPQUEUE:
+ return collectQueueStatistics(multipartType);
+ case OFPMPGROUPDESC:
+ return collectGroupDescStatistics(multipartType);
+ case OFPMPGROUP:
+ return collectGroupStatistics(multipartType);
+ case OFPMPMETERCONFIG:
+ return collectMeterConfigStatistics(multipartType);
+ case OFPMPMETER:
+ return collectMeterStatistics(multipartType);
+ default:
+ LOG.warn("Unsuported Statistics type {}", multipartType);
+ return Futures.immediateCheckedFuture(Boolean.TRUE);
}
+ }
@Override
public <T> RequestContext<T> createRequestContext() {
- final AbstractRequestContext<T> ret = new AbstractRequestContext<T>(deviceContext.reservedXidForDeviceMessage()) {
+ final AbstractRequestContext<T> ret = new AbstractRequestContext<T>(deviceContext.reserveXidForDeviceMessage()) {
@Override
public void close() {
requestContexts.remove(this);
}
}
- @Override
- public void setPollTimeout (Timeout pollTimeout){
- this.pollTimeout = pollTimeout;
- }
+ @Override
+ public void setPollTimeout(final Timeout pollTimeout) {
+ this.pollTimeout = pollTimeout;
+ }
+
+ @Override
+ public Optional<Timeout> getPollTimeout() {
+ return Optional.fromNullable(pollTimeout);
+ }
- @Override
- public Optional<Timeout> getPollTimeout () {
- return Optional.fromNullable(pollTimeout);
+ private void statChainFuture(final Iterator<MultipartType> iterator, final SettableFuture<Boolean> resultFuture) {
+ if (ConnectionContext.CONNECTION_STATE.RIP.equals(deviceContext.getPrimaryConnectionContext().getConnectionState())) {
+ final String errMsg = String.format("Device connection is closed for Node : %s.",
+ deviceContext.getDeviceState().getNodeId());
+ LOG.debug(errMsg);
+ resultFuture.setException(new IllegalStateException(errMsg));
+ return;
+ }
+ if ( ! iterator.hasNext()) {
+ resultFuture.set(Boolean.TRUE);
+ LOG.debug("Stats collection successfully finished for node {}", deviceContext.getDeviceState().getNodeId());
+ return;
}
- void statChainFuture ( final Iterator<MultipartType> iterator, final SettableFuture<Boolean> resultFuture){
+ final MultipartType nextType = iterator.next();
+ LOG.debug("Stats iterating to next type for node {} of type {}", deviceContext.getDeviceState().getNodeId(), nextType);
- if (ConnectionContext.CONNECTION_STATE.RIP.equals(deviceContext.getPrimaryConnectionContext().getConnectionState())) {
- final String errMsg = String.format("Device connection is closed for Node : %s.",
- deviceContext.getDeviceState().getNodeId());
- LOG.debug(errMsg);
- resultFuture.setException(new IllegalStateException(errMsg));
- return;
+ final ListenableFuture<Boolean> deviceStatisticsCollectionFuture = chooseStat(nextType);
+ Futures.addCallback(deviceStatisticsCollectionFuture, new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(final Boolean result) {
+ statChainFuture(iterator, resultFuture);
}
-
- if (!iterator.hasNext()) {
- resultFuture.set(Boolean.TRUE);
- LOG.debug("Stats collection successfully finished for node {}", deviceContext.getDeviceState().getNodeId());
- return;
+ @Override
+ public void onFailure(@Nonnull final Throwable t) {
+ resultFuture.setException(t);
}
+ });
+ }
- final MultipartType nextType = iterator.next();
- LOG.debug("Stats iterating to next type for node {} of type {}", deviceContext.getDeviceState().getNodeId(), nextType);
-
- final ListenableFuture<Boolean> deviceStatisticsCollectionFuture = chooseStat(nextType);
- Futures.addCallback(deviceStatisticsCollectionFuture, new FutureCallback<Boolean>() {
- @Override
- public void onSuccess(final Boolean result) {
- statChainFuture(iterator, resultFuture);
- }
-
- @Override
- public void onFailure(final Throwable t) {
- resultFuture.setException(t);
- }
- });
- }
-
- /**
- * Method checks a device state. It returns null for be able continue. Otherwise it returns immediateFuture
- * which has to be returned from caller too
- *
- * @return
- */
- @VisibleForTesting
- ListenableFuture<Boolean> deviceConnectionCheck () {
- if (!ConnectionContext.CONNECTION_STATE.WORKING.equals(deviceContext.getPrimaryConnectionContext().getConnectionState())) {
- ListenableFuture<Boolean> resultingFuture = SettableFuture.create();
- switch (deviceContext.getPrimaryConnectionContext().getConnectionState()) {
- case RIP:
- final String errMsg = String.format("Device connection doesn't exist anymore. Primary connection status : %s",
- deviceContext.getPrimaryConnectionContext().getConnectionState());
- resultingFuture = Futures.immediateFailedFuture(new Throwable(errMsg));
- break;
- default:
- resultingFuture = Futures.immediateCheckedFuture(Boolean.TRUE);
- break;
- }
- return resultingFuture;
+ /**
+ * Method checks a device state. It returns null for be able continue. Otherwise it returns immediateFuture
+ * which has to be returned from caller too
+ *
+ * @return
+ */
+ @VisibleForTesting
+ ListenableFuture<Boolean> deviceConnectionCheck() {
+ if (!ConnectionContext.CONNECTION_STATE.WORKING.equals(deviceContext.getPrimaryConnectionContext().getConnectionState())) {
+ ListenableFuture<Boolean> resultingFuture = SettableFuture.create();
+ switch (deviceContext.getPrimaryConnectionContext().getConnectionState()) {
+ case RIP:
+ final String errMsg = String.format("Device connection doesn't exist anymore. Primary connection status : %s",
+ deviceContext.getPrimaryConnectionContext().getConnectionState());
+ resultingFuture = Futures.immediateFailedFuture(new Throwable(errMsg));
+ break;
+ default:
+ resultingFuture = Futures.immediateCheckedFuture(Boolean.TRUE);
+ break;
}
- return null;
+ return resultingFuture;
}
+ return null;
+ }
- private ListenableFuture<Boolean> collectFlowStatistics ( final MultipartType multipartType){
- return devState.isFlowStatisticsAvailable() ? StatisticsGatheringUtils.gatherStatistics(
- statisticsGatheringOnTheFlyService, deviceContext, /*MultipartType.OFPMPFLOW*/ multipartType) : emptyFuture;
- }
+ private ListenableFuture<Boolean> collectFlowStatistics(final MultipartType multipartType) {
+ return devState.isFlowStatisticsAvailable() ? StatisticsGatheringUtils.gatherStatistics(
+ statisticsGatheringOnTheFlyService, deviceContext, /*MultipartType.OFPMPFLOW*/ multipartType) : emptyFuture;
+ }
- private ListenableFuture<Boolean> collectTableStatistics ( final MultipartType multipartType){
- return devState.isTableStatisticsAvailable() ? StatisticsGatheringUtils.gatherStatistics(
- statisticsGatheringService, deviceContext, /*MultipartType.OFPMPTABLE*/ multipartType) : emptyFuture;
- }
+ private ListenableFuture<Boolean> collectTableStatistics(final MultipartType multipartType) {
+ return devState.isTableStatisticsAvailable() ? StatisticsGatheringUtils.gatherStatistics(
+ statisticsGatheringService, deviceContext, /*MultipartType.OFPMPTABLE*/ multipartType) : emptyFuture;
+ }
- private ListenableFuture<Boolean> collectPortStatistics ( final MultipartType multipartType){
- return devState.isPortStatisticsAvailable() ? StatisticsGatheringUtils.gatherStatistics(
- statisticsGatheringService, deviceContext, /*MultipartType.OFPMPPORTSTATS*/ multipartType) : emptyFuture;
- }
+ private ListenableFuture<Boolean> collectPortStatistics(final MultipartType multipartType) {
+ return devState.isPortStatisticsAvailable() ? StatisticsGatheringUtils.gatherStatistics(
+ statisticsGatheringService, deviceContext, /*MultipartType.OFPMPPORTSTATS*/ multipartType) : emptyFuture;
+ }
- private ListenableFuture<Boolean> collectQueueStatistics ( final MultipartType multipartType){
- return devState.isQueueStatisticsAvailable() ? StatisticsGatheringUtils.gatherStatistics(
- statisticsGatheringService, deviceContext, /*MultipartType.OFPMPQUEUE*/ multipartType) : emptyFuture;
- }
+ private ListenableFuture<Boolean> collectQueueStatistics(final MultipartType multipartType) {
+ return devState.isQueueStatisticsAvailable() ? StatisticsGatheringUtils.gatherStatistics(
+ statisticsGatheringService, deviceContext, /*MultipartType.OFPMPQUEUE*/ multipartType) : emptyFuture;
+ }
- private ListenableFuture<Boolean> collectGroupDescStatistics ( final MultipartType multipartType){
- return devState.isGroupAvailable() ? StatisticsGatheringUtils.gatherStatistics(
- statisticsGatheringService, deviceContext, /*MultipartType.OFPMPGROUPDESC*/ multipartType) : emptyFuture;
- }
+ private ListenableFuture<Boolean> collectGroupDescStatistics(final MultipartType multipartType) {
+ return devState.isGroupAvailable() ? StatisticsGatheringUtils.gatherStatistics(
+ statisticsGatheringService, deviceContext, /*MultipartType.OFPMPGROUPDESC*/ multipartType) : emptyFuture;
+ }
- private ListenableFuture<Boolean> collectGroupStatistics ( final MultipartType multipartType){
- return devState.isGroupAvailable() ? StatisticsGatheringUtils.gatherStatistics(
- statisticsGatheringService, deviceContext, /*MultipartType.OFPMPGROUP*/ multipartType) : emptyFuture;
- }
+ private ListenableFuture<Boolean> collectGroupStatistics(final MultipartType multipartType) {
+ return devState.isGroupAvailable() ? StatisticsGatheringUtils.gatherStatistics(
+ statisticsGatheringService, deviceContext, /*MultipartType.OFPMPGROUP*/ multipartType) : emptyFuture;
+ }
- private ListenableFuture<Boolean> collectMeterConfigStatistics ( final MultipartType multipartType){
- return devState.isMetersAvailable() ? StatisticsGatheringUtils.gatherStatistics(
- statisticsGatheringService, deviceContext, /*MultipartType.OFPMPMETERCONFIG*/ multipartType) : emptyFuture;
- }
+ private ListenableFuture<Boolean> collectMeterConfigStatistics(final MultipartType multipartType) {
+ return devState.isMetersAvailable() ? StatisticsGatheringUtils.gatherStatistics(
+ statisticsGatheringService, deviceContext, /*MultipartType.OFPMPMETERCONFIG*/ multipartType) : emptyFuture;
+ }
- private ListenableFuture<Boolean> collectMeterStatistics ( final MultipartType multipartType){
- return devState.isMetersAvailable() ? StatisticsGatheringUtils.gatherStatistics(
- statisticsGatheringService, deviceContext, /*MultipartType.OFPMPMETER*/ multipartType) : emptyFuture;
- }
+ private ListenableFuture<Boolean> collectMeterStatistics(final MultipartType multipartType) {
+ return devState.isMetersAvailable() ? StatisticsGatheringUtils.gatherStatistics(
+ statisticsGatheringService, deviceContext, /*MultipartType.OFPMPMETER*/ multipartType) : emptyFuture;
+ }
@VisibleForTesting
- protected void setStatisticsGatheringService(final StatisticsGatheringService statisticsGatheringService) {
+ void setStatisticsGatheringService(final StatisticsGatheringService statisticsGatheringService) {
this.statisticsGatheringService = statisticsGatheringService;
}
@VisibleForTesting
- protected void setStatisticsGatheringOnTheFlyService(final StatisticsGatheringOnTheFlyService
+ void setStatisticsGatheringOnTheFlyService(final StatisticsGatheringOnTheFlyService
statisticsGatheringOnTheFlyService) {
this.statisticsGatheringOnTheFlyService = statisticsGatheringOnTheFlyService;
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.JdkFutureAdapters;
import com.google.common.util.concurrent.ListenableFuture;
+import java.text.SimpleDateFormat;
import java.util.Collections;
+import java.util.Date;
import java.util.List;
import javax.annotation.Nullable;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.StatisticsGatherer;
import org.opendaylight.openflowplugin.impl.registry.flow.FlowRegistryKeyFactory;
import org.opendaylight.openflowplugin.impl.statistics.ofpspecific.EventsTimeCounter;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.DateAndTime;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableStatisticsGatheringStatus;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableStatisticsGatheringStatusBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.snapshot.gathering.status.grouping.SnapshotGatheringStatusEnd;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.snapshot.gathering.status.grouping.SnapshotGatheringStatusEndBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.snapshot.gathering.status.grouping.SnapshotGatheringStatusStartBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
*/
public final class StatisticsGatheringUtils {
+ public static String DATE_AND_TIME_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSSXXX";
+
private static final Logger LOG = LoggerFactory.getLogger(StatisticsGatheringUtils.class);
private static final SinglePurposeMultipartReplyTranslator MULTIPART_REPLY_TRANSLATOR = new SinglePurposeMultipartReplyTranslator();
public static final String QUEUE2_REQCTX = "QUEUE2REQCTX-";
});
}
- private static void processMeterConfigStatsUpdated(final Iterable<MeterConfigStatsUpdated> data, final DeviceContext deviceContext) {
+ private static void processMeterConfigStatsUpdated(final Iterable<MeterConfigStatsUpdated> data, final DeviceContext deviceContext) throws Exception {
final InstanceIdentifier<FlowCapableNode> fNodeIdent = assembleFlowCapableNodeInstanceIdentifier(deviceContext);
deleteAllKnownMeters(deviceContext, fNodeIdent);
for (final MeterConfigStatsUpdated meterConfigStatsUpdated : data) {
}
private static ListenableFuture<Boolean> processFlowStatistics(final Iterable<FlowsStatisticsUpdate> data,
- final DeviceContext deviceContext, final EventIdentifier eventIdentifier) {
+ final DeviceContext deviceContext, final EventIdentifier eventIdentifier) {
final ListenableFuture<Void> deleFuture = deleteAllKnownFlows(deviceContext);
return Futures.transform(deleFuture, new Function<Void, Boolean>() {
public static void writeFlowStatistics(final Iterable<FlowsStatisticsUpdate> data, final DeviceContext deviceContext) {
final InstanceIdentifier<FlowCapableNode> fNodeIdent = assembleFlowCapableNodeInstanceIdentifier(deviceContext);
- for (final FlowsStatisticsUpdate flowsStatistics : data) {
- for (final FlowAndStatisticsMapList flowStat : flowsStatistics.getFlowAndStatisticsMapList()) {
- final FlowBuilder flowBuilder = new FlowBuilder(flowStat);
- flowBuilder.addAugmentation(FlowStatisticsData.class, refineFlowStatisticsAugmentation(flowStat).build());
-
- final short tableId = flowStat.getTableId();
- final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(flowBuilder.build());
- final FlowId flowId = deviceContext.getDeviceFlowRegistry().storeIfNecessary(flowRegistryKey, tableId);
-
- final FlowKey flowKey = new FlowKey(flowId);
- flowBuilder.setKey(flowKey);
- final TableKey tableKey = new TableKey(tableId);
- final InstanceIdentifier<Flow> flowIdent = fNodeIdent.child(Table.class, tableKey).child(Flow.class, flowKey);
- deviceContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, flowIdent, flowBuilder.build());
+ try {
+ for (final FlowsStatisticsUpdate flowsStatistics : data) {
+ for (final FlowAndStatisticsMapList flowStat : flowsStatistics.getFlowAndStatisticsMapList()) {
+ final FlowBuilder flowBuilder = new FlowBuilder(flowStat);
+ flowBuilder.addAugmentation(FlowStatisticsData.class, refineFlowStatisticsAugmentation(flowStat).build());
+
+ final short tableId = flowStat.getTableId();
+ final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(flowBuilder.build());
+ final FlowId flowId = deviceContext.getDeviceFlowRegistry().storeIfNecessary(flowRegistryKey, tableId);
+
+ final FlowKey flowKey = new FlowKey(flowId);
+ flowBuilder.setKey(flowKey);
+ final TableKey tableKey = new TableKey(tableId);
+ final InstanceIdentifier<Flow> flowIdent = fNodeIdent.child(Table.class, tableKey).child(Flow.class, flowKey);
+ deviceContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, flowIdent, flowBuilder.build());
+ }
}
+ } catch (Exception e) {
+ LOG.warn("Not able to write to transaction: {}", e.getMessage());
}
}
public ListenableFuture<Void> apply(final Optional<FlowCapableNode> flowCapNodeOpt) throws Exception {
if (flowCapNodeOpt.isPresent()) {
for (final Table tableData : flowCapNodeOpt.get().getTable()) {
- final Table table = new TableBuilder(tableData).setFlow(Collections.<Flow> emptyList()).build();
+ final Table table = new TableBuilder(tableData).setFlow(Collections.<Flow>emptyList()).build();
final InstanceIdentifier<Table> iiToTable = flowCapableNodePath.child(Table.class, tableData.getKey());
deviceContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, iiToTable, table);
}
return Futures.immediateFuture(null);
}
- private static void processQueueStatistics(final Iterable<QueueStatisticsUpdate> data, final DeviceContext deviceContext) {
+ private static void processQueueStatistics(final Iterable<QueueStatisticsUpdate> data, final DeviceContext deviceContext) throws Exception {
// TODO: clean all queues of all node-connectors before writing up-to-date stats
final InstanceIdentifier<Node> nodeIdent = deviceContext.getDeviceState().getNodeInstanceIdentifier();
for (final QueueStatisticsUpdate queueStatisticsUpdate : data) {
deviceContext.submitTransaction();
}
- private static void processFlowTableStatistics(final Iterable<FlowTableStatisticsUpdate> data, final DeviceContext deviceContext) {
+ private static void processFlowTableStatistics(final Iterable<FlowTableStatisticsUpdate> data, final DeviceContext deviceContext) throws Exception {
final InstanceIdentifier<FlowCapableNode> fNodeIdent = assembleFlowCapableNodeInstanceIdentifier(deviceContext);
for (final FlowTableStatisticsUpdate flowTableStatisticsUpdate : data) {
deviceContext.submitTransaction();
}
- private static void processNodeConnectorStatistics(final Iterable<NodeConnectorStatisticsUpdate> data, final DeviceContext deviceContext) {
+ private static void processNodeConnectorStatistics(final Iterable<NodeConnectorStatisticsUpdate> data, final DeviceContext deviceContext) throws Exception {
final InstanceIdentifier<Node> nodeIdent = deviceContext.getDeviceState().getNodeInstanceIdentifier();
for (final NodeConnectorStatisticsUpdate nodeConnectorStatisticsUpdate : data) {
for (final NodeConnectorStatisticsAndPortNumberMap nConnectPort : nodeConnectorStatisticsUpdate.getNodeConnectorStatisticsAndPortNumberMap()) {
}
private static void processMetersStatistics(final Iterable<MeterStatisticsUpdated> data,
- final DeviceContext deviceContext) {
+ final DeviceContext deviceContext) throws Exception {
final InstanceIdentifier<FlowCapableNode> fNodeIdent = assembleFlowCapableNodeInstanceIdentifier(deviceContext);
for (final MeterStatisticsUpdated meterStatisticsUpdated : data) {
for (final MeterStats mStat : meterStatisticsUpdated.getMeterStats()) {
deviceContext.submitTransaction();
}
- private static void deleteAllKnownMeters(final DeviceContext deviceContext, final InstanceIdentifier<FlowCapableNode> fNodeIdent) {
+ private static void deleteAllKnownMeters(final DeviceContext deviceContext, final InstanceIdentifier<FlowCapableNode> fNodeIdent) throws Exception {
for (final MeterId meterId : deviceContext.getDeviceMeterRegistry().getAllMeterIds()) {
final InstanceIdentifier<Meter> meterIdent = fNodeIdent.child(Meter.class, new MeterKey(meterId));
deviceContext.addDeleteToTxChain(LogicalDatastoreType.OPERATIONAL, meterIdent);
deviceContext.getDeviceMeterRegistry().removeMarked();
}
- private static void processGroupDescStats(final Iterable<GroupDescStatsUpdated> data, final DeviceContext deviceContext) {
+ private static void processGroupDescStats(final Iterable<GroupDescStatsUpdated> data, final DeviceContext deviceContext) throws Exception {
final InstanceIdentifier<FlowCapableNode> fNodeIdent =
deviceContext.getDeviceState().getNodeInstanceIdentifier().augmentation(FlowCapableNode.class);
deleteAllKnownGroups(deviceContext, fNodeIdent);
deviceContext.submitTransaction();
}
- private static void deleteAllKnownGroups(final DeviceContext deviceContext, final InstanceIdentifier<FlowCapableNode> fNodeIdent) {
+ private static void deleteAllKnownGroups(final DeviceContext deviceContext, final InstanceIdentifier<FlowCapableNode> fNodeIdent) throws Exception {
for (final GroupId groupId : deviceContext.getDeviceGroupRegistry().getAllGroupIds()) {
final InstanceIdentifier<Group> groupIdent = fNodeIdent.child(Group.class, new GroupKey(groupId));
deviceContext.addDeleteToTxChain(LogicalDatastoreType.OPERATIONAL, groupIdent);
deviceContext.getDeviceGroupRegistry().removeMarked();
}
- private static void processGroupStatistics(final Iterable<GroupStatisticsUpdated> data, final DeviceContext deviceContext) {
+ private static void processGroupStatistics(final Iterable<GroupStatisticsUpdated> data, final DeviceContext deviceContext) throws Exception {
final InstanceIdentifier<FlowCapableNode> fNodeIdent = assembleFlowCapableNodeInstanceIdentifier(deviceContext);
for (final GroupStatisticsUpdated groupStatistics : data) {
for (final GroupStats groupStats : groupStatistics.getGroupStats()) {
private static InstanceIdentifier<FlowCapableNode> assembleFlowCapableNodeInstanceIdentifier(final DeviceContext deviceContext) {
return deviceContext.getDeviceState().getNodeInstanceIdentifier().augmentation(FlowCapableNode.class);
}
+
+ /**
+ * Writes snapshot gathering start timestamp + cleans end mark
+ *
+ * @param deviceContext txManager + node path keeper
+ */
+ static void markDeviceStateSnapshotStart(final DeviceContext deviceContext) {
+ final InstanceIdentifier<FlowCapableStatisticsGatheringStatus> statusPath = deviceContext.getDeviceState()
+ .getNodeInstanceIdentifier().augmentation(FlowCapableStatisticsGatheringStatus.class);
+
+ final SimpleDateFormat simpleDateFormat = new SimpleDateFormat(DATE_AND_TIME_FORMAT);
+ final FlowCapableStatisticsGatheringStatus gatheringStatus = new FlowCapableStatisticsGatheringStatusBuilder()
+ .setSnapshotGatheringStatusStart(new SnapshotGatheringStatusStartBuilder()
+ .setBegin(new DateAndTime(simpleDateFormat.format(new Date())))
+ .build())
+ .setSnapshotGatheringStatusEnd(null) // TODO: reconsider if really need to clean end mark here
+ .build();
+ try {
+ deviceContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, statusPath, gatheringStatus);
+ } catch (final Exception e) {
+ LOG.warn("Can't write to transaction: {}", e);
+ }
+
+ deviceContext.submitTransaction();
+ }
+
+ /**
+ * Writes snapshot gathering end timestamp + outcome
+ *
+ * @param deviceContext txManager + node path keeper
+ * @param succeeded outcome of currently finished gathering
+ */
+ static void markDeviceStateSnapshotEnd(final DeviceContext deviceContext, final boolean succeeded) {
+ final InstanceIdentifier<SnapshotGatheringStatusEnd> statusEndPath = deviceContext.getDeviceState()
+ .getNodeInstanceIdentifier().augmentation(FlowCapableStatisticsGatheringStatus.class)
+ .child(SnapshotGatheringStatusEnd.class);
+
+ final SimpleDateFormat simpleDateFormat = new SimpleDateFormat(DATE_AND_TIME_FORMAT);
+ final SnapshotGatheringStatusEnd gatheringStatus = new SnapshotGatheringStatusEndBuilder()
+ .setEnd(new DateAndTime(simpleDateFormat.format(new Date())))
+ .setSucceeded(succeeded)
+ .build();
+ try {
+ deviceContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, statusEndPath, gatheringStatus);
+ } catch (Exception e) {
+ LOG.warn("Can't write to transaction: {}", e);
+ }
+
+ deviceContext.submitTransaction();
+ }
}
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import io.netty.util.HashedWheelTimer;
import io.netty.util.Timeout;
import io.netty.util.TimerTask;
import java.util.Iterator;
+import java.util.concurrent.CancellationException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
import org.opendaylight.openflowplugin.api.openflow.rpc.ItemLifeCycleSource;
import org.opendaylight.openflowplugin.api.openflow.statistics.StatisticsContext;
import org.opendaylight.openflowplugin.api.openflow.statistics.StatisticsManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/**
- * Created by Martin Bobak <mbobak@cisco.com> on 1.4.2015.
- */
public class StatisticsManagerImpl implements StatisticsManager, StatisticsManagerControlService {
private static final Logger LOG = LoggerFactory.getLogger(StatisticsManagerImpl.class);
private DeviceInitializationPhaseHandler deviceInitPhaseHandler;
private DeviceTerminationPhaseHandler deviceTerminPhaseHandler;
- private HashedWheelTimer hashedWheelTimer;
-
private final ConcurrentMap<NodeId, StatisticsContext> contexts = new ConcurrentHashMap<>();
private static final long basicTimerDelay = 3000;
private boolean shuttingDownStatisticsPolling;
private BindingAwareBroker.RpcRegistration<StatisticsManagerControlService> controlServiceRegistration;
+ private final LifecycleConductor conductor;
+
@Override
public void setDeviceInitializationPhaseHandler(final DeviceInitializationPhaseHandler handler) {
deviceInitPhaseHandler = handler;
}
public StatisticsManagerImpl(@CheckForNull final RpcProviderRegistry rpcProviderRegistry,
- final boolean shuttingDownStatisticsPolling) {
+ final boolean shuttingDownStatisticsPolling,
+ final LifecycleConductor lifecycleConductor) {
Preconditions.checkArgument(rpcProviderRegistry != null);
this.controlServiceRegistration = Preconditions.checkNotNull(rpcProviderRegistry.addRpcImplementation(
StatisticsManagerControlService.class, this));
this.shuttingDownStatisticsPolling = shuttingDownStatisticsPolling;
+ this.conductor = lifecycleConductor;
}
@Override
- public void onDeviceContextLevelUp(final DeviceContext deviceContext) throws Exception {
- final NodeId nodeId = deviceContext.getDeviceState().getNodeId();
- final OfpRole ofpRole = deviceContext.getDeviceState().getRole();
- LOG.debug("Node:{}, deviceContext.getDeviceState().getRole():{}", nodeId, ofpRole);
-
- if (null == hashedWheelTimer) {
- LOG.trace("This is first device that delivered timer. Starting statistics polling immediately.");
- hashedWheelTimer = deviceContext.getTimer();
- }
- final StatisticsContext statisticsContext = new StatisticsContextImpl(deviceContext, shuttingDownStatisticsPolling);
+ public void onDeviceContextLevelUp(final NodeId nodeId) throws Exception {
+
+ final DeviceContext deviceContext = Preconditions.checkNotNull(conductor.getDeviceContext(nodeId));
+ final StatisticsContext statisticsContext = new StatisticsContextImpl(nodeId, shuttingDownStatisticsPolling, conductor);
Verify.verify(contexts.putIfAbsent(nodeId, statisticsContext) == null, "StatisticsCtx still not closed for Node {}", nodeId);
- deviceContext.addDeviceContextClosedHandler(this);
if (shuttingDownStatisticsPolling) {
- LOG.info("Statistics is shutdown for node:{}", deviceContext.getDeviceState().getNodeId());
+ LOG.info("Statistics is shutdown for node:{}", nodeId);
} else {
- LOG.info("Schedule Statistics poll for node:{}", deviceContext.getDeviceState().getNodeId());
- if (OfpRole.BECOMEMASTER.equals(ofpRole)) {
- initialStatPollForMaster(statisticsContext, deviceContext);
- /* we want to wait for initial statCollecting response */
- return;
- }
+ LOG.info("Schedule Statistics poll for node:{}", nodeId);
scheduleNextPolling(deviceContext, statisticsContext, new TimeCounter());
}
- deviceContext.getDeviceState().setDeviceSynchronized(true);
- deviceInitPhaseHandler.onDeviceContextLevelUp(deviceContext);
- }
-
- private void initialStatPollForMaster(final StatisticsContext statisticsContext, final DeviceContext deviceContext) {
- final ListenableFuture<Boolean> weHaveDynamicData = statisticsContext.gatherDynamicData();
- Futures.addCallback(weHaveDynamicData, new FutureCallback<Boolean>() {
- @Override
- public void onSuccess(final Boolean statisticsGathered) {
- if (statisticsGathered) {
- //there are some statistics on device worth gathering
- final TimeCounter timeCounter = new TimeCounter();
- deviceContext.getDeviceState().setStatisticsPollingEnabledProp(true);
- scheduleNextPolling(deviceContext, statisticsContext, timeCounter);
- LOG.trace("Device dynamic info collecting done. Going to announce raise to next level.");
- try {
- deviceInitPhaseHandler.onDeviceContextLevelUp(deviceContext);
- } catch (final Exception e) {
- LOG.info("failed to complete levelUp on next handler for device {}", deviceContext.getDeviceState().getNodeId());
- deviceContext.shutdownConnection();
- return;
- }
- deviceContext.getDeviceState().setDeviceSynchronized(true);
- } else {
- final String deviceAddress = deviceContext.getPrimaryConnectionContext().getConnectionAdapter().getRemoteAddress().toString();
- LOG.info("Statistics for device {} could not be gathered. Closing its device context.", deviceAddress);
- deviceContext.shutdownConnection();
- }
- }
- @Override
- public void onFailure(final Throwable throwable) {
- LOG.warn("Statistics manager was not able to collect dynamic info for device.", deviceContext.getDeviceState().getNodeId(), throwable);
- deviceContext.shutdownConnection();
- }
- });
+ deviceContext.getDeviceState().setDeviceSynchronized(true);
+ deviceInitPhaseHandler.onDeviceContextLevelUp(nodeId);
}
private void pollStatistics(final DeviceContext deviceContext,
scheduleNextPolling(deviceContext, statisticsContext, timeCounter);
return;
}
+
if (!OfpRole.BECOMEMASTER.equals(deviceContext.getDeviceState().getRole())) {
LOG.debug("Role is not Master so we don't want to poll any stat for device: {}", deviceContext.getDeviceState().getNodeId());
scheduleNextPolling(deviceContext, statisticsContext, timeCounter);
return;
}
-
LOG.debug("POLLING ALL STATS for device: {}", deviceContext.getDeviceState().getNodeId().getValue());
timeCounter.markStart();
final ListenableFuture<Boolean> deviceStatisticsCollectionFuture = statisticsContext.gatherDynamicData();
}
@Override
- public void onFailure(final Throwable throwable) {
+ public void onFailure(@Nonnull final Throwable throwable) {
timeCounter.addTimeMark();
- LOG.info("Statistics gathering for single node was not successful: {}", throwable.getMessage());
- LOG.debug("Statistics gathering for single node was not successful.. ", throwable);
+ LOG.warn("Statistics gathering for single node was not successful: {}", throwable.getMessage());
+ LOG.trace("Statistics gathering for single node was not successful.. ", throwable);
calculateTimerDelay(timeCounter);
- scheduleNextPolling(deviceContext, statisticsContext, timeCounter);
+ if (throwable instanceof CancellationException) {
+ /** This often happens when something wrong with akka or DS, so closing connection will help to restart device **/
+ conductor.closeConnection(deviceContext.getDeviceState().getNodeId());
+ } else {
+ scheduleNextPolling(deviceContext, statisticsContext, timeCounter);
+ }
}
});
- final long averangeTime = TimeUnit.MILLISECONDS.toSeconds(timeCounter.getAverageTimeBetweenMarks());
- final long STATS_TIMEOUT_SEC = averangeTime > 0 ? 3 * averangeTime : DEFAULT_STATS_TIMEOUT_SEC;
+ final long averageTime = TimeUnit.MILLISECONDS.toSeconds(timeCounter.getAverageTimeBetweenMarks());
+ final long STATS_TIMEOUT_SEC = averageTime > 0 ? 3 * averageTime : DEFAULT_STATS_TIMEOUT_SEC;
final TimerTask timerTask = new TimerTask() {
@Override
}
}
};
- deviceContext.getTimer().newTimeout(timerTask, STATS_TIMEOUT_SEC, TimeUnit.SECONDS);
+ conductor.newTimeout(timerTask, STATS_TIMEOUT_SEC, TimeUnit.SECONDS);
}
private void scheduleNextPolling(final DeviceContext deviceContext,
final StatisticsContext statisticsContext,
final TimeCounter timeCounter) {
- if (null != hashedWheelTimer) {
- LOG.debug("SCHEDULING NEXT STATS POLLING for device: {}", deviceContext.getDeviceState().getNodeId().getValue());
- if (!shuttingDownStatisticsPolling) {
- final Timeout pollTimeout = hashedWheelTimer.newTimeout(new TimerTask() {
- @Override
- public void run(final Timeout timeout) throws Exception {
- pollStatistics(deviceContext, statisticsContext, timeCounter);
- }
- }, currentTimerDelay, TimeUnit.MILLISECONDS);
- statisticsContext.setPollTimeout(pollTimeout);
- }
- } else {
- LOG.debug("#!NOT SCHEDULING NEXT STATS POLLING for device: {}", deviceContext.getDeviceState().getNodeId().getValue());
+ LOG.debug("SCHEDULING NEXT STATS POLLING for device: {}", deviceContext.getDeviceState().getNodeId().getValue());
+ if (!shuttingDownStatisticsPolling) {
+ final Timeout pollTimeout = conductor.newTimeout(new TimerTask() {
+ @Override
+ public void run(final Timeout timeout) throws Exception {
+ pollStatistics(deviceContext, statisticsContext, timeCounter);
+ }
+ }, currentTimerDelay, TimeUnit.MILLISECONDS);
+ statisticsContext.setPollTimeout(pollTimeout);
}
}
@VisibleForTesting
- protected void calculateTimerDelay(final TimeCounter timeCounter) {
+ void calculateTimerDelay(final TimeCounter timeCounter) {
final long averageStatisticsGatheringTime = timeCounter.getAverageTimeBetweenMarks();
if (averageStatisticsGatheringTime > currentTimerDelay) {
currentTimerDelay *= 2;
}
@VisibleForTesting
- protected static long getCurrentTimerDelay() {
+ static long getCurrentTimerDelay() {
return currentTimerDelay;
}
packetReceivedBuilder.setFlowCookie(new FlowCookie(input.getCookie()));
}
- NodeConnectorRef nodeConnectorRef = NodeConnectorRefToPortTranslator.toNodeConnectorRef(deviceContext.getDeviceState());
+ // Try to create the NodeConnectorRef
+ BigInteger dataPathId = deviceContext.getDeviceState().getFeatures().getDatapathId();
+ NodeConnectorRef nodeConnectorRef = NodeConnectorRefToPortTranslator.toNodeConnectorRef(input, dataPathId);
+ // If we was able to create NodeConnectorRef, use it
if (nodeConnectorRef != null) {
packetReceivedBuilder.setIngress(nodeConnectorRef);
}
}
return matchBuilder.build();
}
-
- @VisibleForTesting
- static Long getPortNumberFromMatch(final List<MatchEntry> entries) {
- Long port = null;
- for (MatchEntry entry : entries) {
- if (InPortCase.class.equals(entry.getMatchEntryValue().getImplementedInterface())) {
- InPortCase inPortCase = ((InPortCase) entry.getMatchEntryValue());
- org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.match.entry.value.grouping.match.entry.value.in.port._case.InPort inPort = inPortCase.getInPort();
- if (inPort != null) {
- port = inPort.getPortNumber().getValue();
- break;
- }
- }
- }
- return port;
- }
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import com.google.common.base.Function;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
+import javax.annotation.Nullable;
+import org.apache.commons.lang3.tuple.MutablePair;
+import org.apache.commons.lang3.tuple.Pair;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.FlowCapableTransactionService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.SendBarrierInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.SendBarrierInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * provides barrier message chaining and factory methods
+ */
+public final class BarrierUtil {
+
+ private static final Logger LOG = LoggerFactory.getLogger(BarrierUtil.class);
+
+
+ private BarrierUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
+
+ /**
+ * chain a barrier message - regardless of previous result and use given {@link Function} to combine
+ * original result and barrier result
+ *
+ * @param <T> type of input future
+ * @param input future to chain barrier to
+ * @param nodeRef target device
+ * @param transactionService barrier service
+ * @param compositeTransform
+ * @return future holding both results (input and of the barrier)
+ */
+ public static <T> ListenableFuture<RpcResult<T>> chainBarrier(
+ final ListenableFuture<RpcResult<T>> input, final NodeRef nodeRef,
+ final FlowCapableTransactionService transactionService,
+ final Function<Pair<RpcResult<T>, RpcResult<Void>>, RpcResult<T>> compositeTransform) {
+ final MutablePair<RpcResult<T>, RpcResult<Void>> resultPair = new MutablePair<>();
+
+ // store input result and append barrier
+ final ListenableFuture<RpcResult<Void>> barrierResult = Futures.transform(input,
+ new AsyncFunction<RpcResult<T>, RpcResult<Void>>() {
+ @Override
+ public ListenableFuture<RpcResult<Void>> apply(@Nullable final RpcResult<T> interInput) throws Exception {
+ resultPair.setLeft(interInput);
+ final SendBarrierInput barrierInput = createSendBarrierInput(nodeRef);
+ return JdkFutureAdapters.listenInPoolThread(transactionService.sendBarrier(barrierInput));
+ }
+ });
+ // store barrier result and return initiated pair
+ final ListenableFuture<Pair<RpcResult<T>, RpcResult<Void>>> compositeResult = Futures.transform(
+ barrierResult, new Function<RpcResult<Void>, Pair<RpcResult<T>, RpcResult<Void>>>() {
+ @Nullable
+ @Override
+ public Pair<RpcResult<T>, RpcResult<Void>> apply(@Nullable final RpcResult<Void> input) {
+ resultPair.setRight(input);
+ return resultPair;
+ }
+ });
+ // append assembling transform to barrier result
+ return Futures.transform(compositeResult, compositeTransform);
+ }
+
+ /**
+ * @param nodeRef rpc routing context
+ * @return input for {@link FlowCapableTransactionService#sendBarrier(SendBarrierInput)}
+ */
+ public static SendBarrierInput createSendBarrierInput(final NodeRef nodeRef) {
+ return new SendBarrierInputBuilder()
+ .setNode(nodeRef)
+ .build();
+ }
+}
package org.opendaylight.openflowplugin.impl.util;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
import java.math.BigInteger;
import java.net.Inet4Address;
import java.net.Inet6Address;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
-import java.util.concurrent.Future;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.openflowjava.protocol.api.connection.OutboundQueue;
import org.opendaylight.openflowplugin.api.ConnectionException;
import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.FlowCapableNodeConnectorStatisticsData;
import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.FlowCapableNodeConnectorStatisticsDataBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
final ConnectionContext connectionContext = Preconditions.checkNotNull(deviceContext.getPrimaryConnectionContext());
final short version = deviceState.getVersion();
LOG.trace("initalizeNodeInformation for node {}", deviceState.getNodeId());
- final SettableFuture<Void> returnFuture = SettableFuture.<Void> create();
+ final SettableFuture<Void> returnFuture = SettableFuture.<Void>create();
addNodeToOperDS(deviceContext, returnFuture);
final ListenableFuture<List<RpcResult<List<MultipartReply>>>> deviceFeaturesFuture;
if (OFConstants.OFP_VERSION_1_0 == version) {
Preconditions.checkArgument(deviceContext != null);
final DeviceState deviceState = deviceContext.getDeviceState();
final NodeBuilder nodeBuilder = new NodeBuilder().setId(deviceState.getNodeId()).setNodeConnector(
- Collections.<NodeConnector> emptyList());
+ Collections.<NodeConnector>emptyList());
try {
deviceContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, deviceState.getNodeInstanceIdentifier(),
nodeBuilder.build());
}
static void translateAndWriteReply(final MultipartType type, final DeviceContext dContext,
- final InstanceIdentifier<Node> nodeII, final Collection<MultipartReply> result) {
+ final InstanceIdentifier<Node> nodeII, final Collection<MultipartReply> result) {
try {
for (final MultipartReply reply : result) {
final MultipartReplyBody body = reply.getMultipartReplyBody();
switch (type) {
- case OFPMPDESC:
- Preconditions.checkArgument(body instanceof MultipartReplyDescCase);
- final MultipartReplyDesc replyDesc = ((MultipartReplyDescCase) body).getMultipartReplyDesc();
- final FlowCapableNode fcNode = NodeStaticReplyTranslatorUtil.nodeDescTranslator(replyDesc,
- getIpAddressOf(dContext));
- final InstanceIdentifier<FlowCapableNode> fNodeII = nodeII.augmentation(FlowCapableNode.class);
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, fNodeII, fcNode);
- break;
-
- case OFPMPTABLEFEATURES:
- Preconditions.checkArgument(body instanceof MultipartReplyTableFeaturesCase);
- final MultipartReplyTableFeatures tableFeatures = ((MultipartReplyTableFeaturesCase) body)
- .getMultipartReplyTableFeatures();
- final List<TableFeatures> tables = NodeStaticReplyTranslatorUtil
- .nodeTableFeatureTranslator(tableFeatures);
- for (final TableFeatures table : tables) {
- final Short tableId = table.getTableId();
- final InstanceIdentifier<Table> tableII = nodeII.augmentation(FlowCapableNode.class).child(
- Table.class, new TableKey(tableId));
- final TableBuilder tableBuilder = new TableBuilder().setId(tableId).setTableFeatures(
- Collections.singletonList(table));
- tableBuilder.addAugmentation(FlowTableStatisticsData.class,
- new FlowTableStatisticsDataBuilder().build());
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, tableII, tableBuilder.build());
- }
- break;
-
- case OFPMPMETERFEATURES:
- Preconditions.checkArgument(body instanceof MultipartReplyMeterFeaturesCase);
- final MultipartReplyMeterFeatures meterFeatures = ((MultipartReplyMeterFeaturesCase) body)
- .getMultipartReplyMeterFeatures();
- final NodeMeterFeatures mFeature = NodeStaticReplyTranslatorUtil
- .nodeMeterFeatureTranslator(meterFeatures);
- final InstanceIdentifier<NodeMeterFeatures> mFeatureII = nodeII
- .augmentation(NodeMeterFeatures.class);
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, mFeatureII, mFeature);
- if (0L < mFeature.getMeterFeatures().getMaxMeter().getValue()) {
- dContext.getDeviceState().setMeterAvailable(true);
- }
- break;
-
- case OFPMPGROUPFEATURES:
- Preconditions.checkArgument(body instanceof MultipartReplyGroupFeaturesCase);
- final MultipartReplyGroupFeatures groupFeatures = ((MultipartReplyGroupFeaturesCase) body)
- .getMultipartReplyGroupFeatures();
- final NodeGroupFeatures gFeature = NodeStaticReplyTranslatorUtil
- .nodeGroupFeatureTranslator(groupFeatures);
- final InstanceIdentifier<NodeGroupFeatures> gFeatureII = nodeII
- .augmentation(NodeGroupFeatures.class);
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, gFeatureII, gFeature);
- break;
-
- case OFPMPPORTDESC:
- Preconditions.checkArgument(body instanceof MultipartReplyPortDescCase);
- final MultipartReplyPortDesc portDesc = ((MultipartReplyPortDescCase) body)
- .getMultipartReplyPortDesc();
- for (final PortGrouping port : portDesc.getPorts()) {
- final short ofVersion = dContext.getDeviceState().getVersion();
- final TranslatorKey translatorKey = new TranslatorKey(ofVersion, PortGrouping.class.getName());
- final MessageTranslator<PortGrouping, FlowCapableNodeConnector> translator = dContext.oook()
- .lookupTranslator(translatorKey);
- final FlowCapableNodeConnector fcNodeConnector = translator.translate(port, dContext, null);
-
- final BigInteger dataPathId = dContext.getPrimaryConnectionContext().getFeatures()
- .getDatapathId();
- final NodeConnectorId nodeConnectorId = NodeStaticReplyTranslatorUtil.nodeConnectorId(
- dataPathId.toString(), port.getPortNo(), ofVersion);
- final NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder().setId(nodeConnectorId);
- ncBuilder.addAugmentation(FlowCapableNodeConnector.class, fcNodeConnector);
-
- ncBuilder.addAugmentation(FlowCapableNodeConnectorStatisticsData.class,
- new FlowCapableNodeConnectorStatisticsDataBuilder().build());
- final NodeConnector connector = ncBuilder.build();
-
- final InstanceIdentifier<NodeConnector> connectorII = nodeII.child(NodeConnector.class,
- connector.getKey());
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, connectorII, connector);
- }
+ case OFPMPDESC:
+ Preconditions.checkArgument(body instanceof MultipartReplyDescCase);
+ final MultipartReplyDesc replyDesc = ((MultipartReplyDescCase) body).getMultipartReplyDesc();
+ final FlowCapableNode fcNode = NodeStaticReplyTranslatorUtil.nodeDescTranslator(replyDesc,
+ getIpAddressOf(dContext));
+ final InstanceIdentifier<FlowCapableNode> fNodeII = nodeII.augmentation(FlowCapableNode.class);
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, fNodeII, fcNode);
+ break;
+
+ case OFPMPTABLEFEATURES:
+ Preconditions.checkArgument(body instanceof MultipartReplyTableFeaturesCase);
+ final MultipartReplyTableFeatures tableFeaturesMP = ((MultipartReplyTableFeaturesCase) body)
+ .getMultipartReplyTableFeatures();
+ final List<TableFeatures> tableFeatures = NodeStaticReplyTranslatorUtil
+ .nodeTableFeatureTranslator(tableFeaturesMP);
+ for (final TableFeatures tableFeature : tableFeatures) {
+ final Short tableId = tableFeature.getTableId();
+ final KeyedInstanceIdentifier<TableFeatures, TableFeaturesKey> tableFeaturesII =
+ nodeII.augmentation(FlowCapableNode.class)
+ .child(TableFeatures.class, new TableFeaturesKey(tableId));
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, tableFeaturesII, tableFeature);
+
+ // write parent for table statistics
+ final KeyedInstanceIdentifier<Table, TableKey> tableII =
+ nodeII.augmentation(FlowCapableNode.class)
+ .child(Table.class, new TableKey(tableId));
+ final TableBuilder tableBld = new TableBuilder().setId(tableId)
+ .addAugmentation(FlowTableStatisticsData.class,
+ new FlowTableStatisticsDataBuilder().build());
+
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, tableII, tableBld.build());
+ }
+ break;
+
+ case OFPMPMETERFEATURES:
+ Preconditions.checkArgument(body instanceof MultipartReplyMeterFeaturesCase);
+ final MultipartReplyMeterFeatures meterFeatures = ((MultipartReplyMeterFeaturesCase) body)
+ .getMultipartReplyMeterFeatures();
+ final NodeMeterFeatures mFeature = NodeStaticReplyTranslatorUtil
+ .nodeMeterFeatureTranslator(meterFeatures);
+ final InstanceIdentifier<NodeMeterFeatures> mFeatureII = nodeII
+ .augmentation(NodeMeterFeatures.class);
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, mFeatureII, mFeature);
+ if (0L < mFeature.getMeterFeatures().getMaxMeter().getValue()) {
+ dContext.getDeviceState().setMeterAvailable(true);
+ }
+ break;
+
+ case OFPMPGROUPFEATURES:
+ Preconditions.checkArgument(body instanceof MultipartReplyGroupFeaturesCase);
+ final MultipartReplyGroupFeatures groupFeatures = ((MultipartReplyGroupFeaturesCase) body)
+ .getMultipartReplyGroupFeatures();
+ final NodeGroupFeatures gFeature = NodeStaticReplyTranslatorUtil
+ .nodeGroupFeatureTranslator(groupFeatures);
+ final InstanceIdentifier<NodeGroupFeatures> gFeatureII = nodeII
+ .augmentation(NodeGroupFeatures.class);
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, gFeatureII, gFeature);
+ break;
+
+ case OFPMPPORTDESC:
+ Preconditions.checkArgument(body instanceof MultipartReplyPortDescCase);
+ final MultipartReplyPortDesc portDesc = ((MultipartReplyPortDescCase) body)
+ .getMultipartReplyPortDesc();
+ for (final PortGrouping port : portDesc.getPorts()) {
+ final short ofVersion = dContext.getDeviceState().getVersion();
+ final TranslatorKey translatorKey = new TranslatorKey(ofVersion, PortGrouping.class.getName());
+ final MessageTranslator<PortGrouping, FlowCapableNodeConnector> translator = dContext.oook()
+ .lookupTranslator(translatorKey);
+ final FlowCapableNodeConnector fcNodeConnector = translator.translate(port, dContext, null);
+
+ final BigInteger dataPathId = dContext.getPrimaryConnectionContext().getFeatures()
+ .getDatapathId();
+ final NodeConnectorId nodeConnectorId = NodeStaticReplyTranslatorUtil.nodeConnectorId(
+ dataPathId.toString(), port.getPortNo(), ofVersion);
+ final NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder().setId(nodeConnectorId);
+ ncBuilder.addAugmentation(FlowCapableNodeConnector.class, fcNodeConnector);
+
+ ncBuilder.addAugmentation(FlowCapableNodeConnectorStatisticsData.class,
+ new FlowCapableNodeConnectorStatisticsDataBuilder().build());
+ final NodeConnector connector = ncBuilder.build();
+
+ final InstanceIdentifier<NodeConnector> connectorII = nodeII.child(NodeConnector.class,
+ connector.getKey());
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, connectorII, connector);
+ }
- break;
+ break;
- default:
- throw new IllegalArgumentException("Unnexpected MultipartType " + type);
+ default:
+ throw new IllegalArgumentException("Unnexpected MultipartType " + type);
}
}
} catch (final Exception e) {
// FIXME : remove after ovs tableFeatures fix
private static void makeEmptyTables(final DeviceContext dContext, final InstanceIdentifier<Node> nodeII,
- final Short nrOfTables) {
+ final Short nrOfTables) {
LOG.debug("About to create {} empty tables.", nrOfTables);
for (int i = 0; i < nrOfTables; i++) {
final short tId = (short) i;
}
static void createSuccessProcessingCallback(final MultipartType type, final DeviceContext deviceContext,
- final InstanceIdentifier<Node> nodeII,
- final ListenableFuture<RpcResult<List<MultipartReply>>> requestContextFuture) {
+ final InstanceIdentifier<Node> nodeII,
+ final ListenableFuture<RpcResult<List<MultipartReply>>> requestContextFuture) {
Futures.addCallback(requestContextFuture, new FutureCallback<RpcResult<List<MultipartReply>>>() {
@Override
public void onSuccess(final RpcResult<List<MultipartReply>> rpcResult) {
}
private static ListenableFuture<RpcResult<List<MultipartReply>>> getNodeStaticInfo(final MultipartType type,
- final DeviceContext deviceContext, final InstanceIdentifier<Node> nodeII, final short version) {
+ final DeviceContext deviceContext, final InstanceIdentifier<Node> nodeII, final short version) {
final OutboundQueue queue = deviceContext.getPrimaryConnectionContext().getOutboundQueueProvider();
- final Long reserved = deviceContext.reservedXidForDeviceMessage();
+ final Long reserved = deviceContext.reserveXidForDeviceMessage();
final RequestContext<List<MultipartReply>> requestContext = new AbstractRequestContext<List<MultipartReply>>(
reserved) {
@Override
public void onFailure(final Throwable t) {
LOG.info("Fail response from OutboundQueue for multipart type {}.", type);
final RpcResult<List<MultipartReply>> rpcResult = RpcResultBuilder
- .<List<MultipartReply>> failed().build();
+ .<List<MultipartReply>>failed().build();
requestContext.setResult(rpcResult);
if (MultipartType.OFPMPTABLEFEATURES.equals(type)) {
makeEmptyTables(deviceContext, nodeII, deviceContext.getPrimaryConnectionContext()
}
static void chainTableTrunkWriteOF10(final DeviceContext deviceContext,
- final ListenableFuture<List<RpcResult<List<MultipartReply>>>> deviceFeaturesFuture) {
+ final ListenableFuture<List<RpcResult<List<MultipartReply>>>> deviceFeaturesFuture) {
Futures.addCallback(deviceFeaturesFuture, new FutureCallback<List<RpcResult<List<MultipartReply>>>>() {
@Override
public void onSuccess(final List<RpcResult<List<MultipartReply>>> results) {
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import com.google.common.annotations.VisibleForTesting;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.List;
+import org.opendaylight.openflowplugin.impl.services.batch.BatchPlanStep;
+import org.opendaylight.openflowplugin.impl.services.batch.BatchStepType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.Batch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.BatchChoice;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchAddFlowCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchAddGroupCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchAddMeterCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchRemoveFlowCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchRemoveGroupCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchRemoveMeterCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchUpdateFlowCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchUpdateGroupCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchUpdateMeterCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.service.batch.common.rev160322.BatchOrderGrouping;
+import org.opendaylight.yangtools.yang.binding.DataContainer;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * provides flat batch util methods
+ */
+public final class FlatBatchUtil {
+
+ private static final Logger LOG = LoggerFactory.getLogger(FlatBatchUtil.class);
+
+ private FlatBatchUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
+ public static void markBarriersWhereNeeded(final List<BatchPlanStep> batchPlan) {
+ final EnumSet<BatchStepType> previousTypes = EnumSet.noneOf(BatchStepType.class);
+
+ BatchPlanStep previousPlanStep = null;
+ for (BatchPlanStep planStep : batchPlan) {
+ final BatchStepType type = planStep.getStepType();
+ if (!previousTypes.isEmpty() && decideBarrier(previousTypes, type)) {
+ previousPlanStep.setBarrierAfter(true);
+ previousTypes.clear();
+ }
+ previousTypes.add(type);
+ previousPlanStep = planStep;
+ }
+ }
+
+ @VisibleForTesting
+ static boolean decideBarrier(final EnumSet<BatchStepType> previousTypes, final BatchStepType type) {
+ final boolean needBarrier;
+ switch (type) {
+ case FLOW_ADD:
+ case FLOW_UPDATE:
+ needBarrier = previousTypes.contains(BatchStepType.GROUP_ADD)
+ || previousTypes.contains(BatchStepType.METER_ADD);
+ break;
+ case GROUP_ADD:
+ needBarrier = previousTypes.contains(BatchStepType.GROUP_ADD)
+ || previousTypes.contains(BatchStepType.GROUP_UPDATE);
+ break;
+ case GROUP_REMOVE:
+ needBarrier = previousTypes.contains(BatchStepType.FLOW_REMOVE)
+ || previousTypes.contains(BatchStepType.FLOW_UPDATE)
+ || previousTypes.contains(BatchStepType.GROUP_REMOVE)
+ || previousTypes.contains(BatchStepType.GROUP_UPDATE);
+ break;
+ case METER_REMOVE:
+ needBarrier = previousTypes.contains(BatchStepType.FLOW_REMOVE)
+ || previousTypes.contains(BatchStepType.FLOW_UPDATE);
+ break;
+ default:
+ needBarrier = false;
+ }
+ return needBarrier;
+ }
+
+ public static List<BatchPlanStep> assembleBatchPlan(List<Batch> batches) {
+ final List<BatchPlanStep> plan = new ArrayList<>();
+
+ BatchPlanStep planStep;
+ for (Batch batch : batches) {
+ final BatchStepType nextStepType = detectBatchStepType(batch.getBatchChoice());
+
+ planStep = new BatchPlanStep(nextStepType);
+ planStep.getTaskBag().addAll(extractBatchData(planStep.getStepType(), batch.getBatchChoice()));
+ if (!planStep.isEmpty()) {
+ plan.add(planStep);
+ }
+ }
+
+ return plan;
+ }
+
+ private static List<? extends BatchOrderGrouping> extractBatchData(final BatchStepType batchStepType,
+ final BatchChoice batchChoice) {
+ final List<? extends BatchOrderGrouping> batchData;
+ switch (batchStepType) {
+ case FLOW_ADD:
+ batchData = ((FlatBatchAddFlowCase) batchChoice).getFlatBatchAddFlow();
+ break;
+ case FLOW_REMOVE:
+ batchData = ((FlatBatchRemoveFlowCase) batchChoice).getFlatBatchRemoveFlow();
+ break;
+ case FLOW_UPDATE:
+ batchData = ((FlatBatchUpdateFlowCase) batchChoice).getFlatBatchUpdateFlow();
+ break;
+ case GROUP_ADD:
+ batchData = ((FlatBatchAddGroupCase) batchChoice).getFlatBatchAddGroup();
+ break;
+ case GROUP_REMOVE:
+ batchData = ((FlatBatchRemoveGroupCase) batchChoice).getFlatBatchRemoveGroup();
+ break;
+ case GROUP_UPDATE:
+ batchData = ((FlatBatchUpdateGroupCase) batchChoice).getFlatBatchUpdateGroup();
+ break;
+ case METER_ADD:
+ batchData = ((FlatBatchAddMeterCase) batchChoice).getFlatBatchAddMeter();
+ break;
+ case METER_REMOVE:
+ batchData = ((FlatBatchRemoveMeterCase) batchChoice).getFlatBatchRemoveMeter();
+ break;
+ case METER_UPDATE:
+ batchData = ((FlatBatchUpdateMeterCase) batchChoice).getFlatBatchUpdateMeter();
+ break;
+ default:
+ throw new IllegalArgumentException("Unsupported batch step type obtained: " + batchStepType);
+ }
+ return batchData;
+ }
+
+ @VisibleForTesting
+ static <T extends BatchChoice> BatchStepType detectBatchStepType(final T batchCase) {
+ final BatchStepType type;
+ final Class<? extends DataContainer> implementedInterface = batchCase.getImplementedInterface();
+
+ if (FlatBatchAddFlowCase.class.equals(implementedInterface)) {
+ type = BatchStepType.FLOW_ADD;
+ } else if (FlatBatchRemoveFlowCase.class.equals(implementedInterface)) {
+ type = BatchStepType.FLOW_REMOVE;
+ } else if (FlatBatchUpdateFlowCase.class.equals(implementedInterface)) {
+ type = BatchStepType.FLOW_UPDATE;
+ } else if (FlatBatchAddGroupCase.class.equals(implementedInterface)) {
+ type = BatchStepType.GROUP_ADD;
+ } else if (FlatBatchRemoveGroupCase.class.equals(implementedInterface)) {
+ type = BatchStepType.GROUP_REMOVE;
+ } else if (FlatBatchUpdateGroupCase.class.equals(implementedInterface)) {
+ type = BatchStepType.GROUP_UPDATE;
+ } else if (FlatBatchAddMeterCase.class.equals(implementedInterface)) {
+ type = BatchStepType.METER_ADD;
+ } else if (FlatBatchRemoveMeterCase.class.equals(implementedInterface)) {
+ type = BatchStepType.METER_REMOVE;
+ } else if (FlatBatchUpdateMeterCase.class.equals(implementedInterface)) {
+ type = BatchStepType.METER_UPDATE;
+ } else {
+ throw new IllegalArgumentException("Unsupported batch obtained: " + implementedInterface);
+ }
+ return type;
+ }
+
+ /**
+ * join errors of left and right rpc result into output
+ *
+ * @param output target result
+ * @param chainInput left part (chained rpc result)
+ * @param input right part (result of current operation)
+ * @param <L> chain type
+ * @param <R> current operation type
+ */
+ private static <L, R> void joinErrors(final RpcResultBuilder<L> output, final RpcResult<L> chainInput, final RpcResult<R> input) {
+ final Collection<RpcError> rpcErrors = new ArrayList<>(chainInput.getErrors());
+ rpcErrors.addAll(input.getErrors());
+ if (!rpcErrors.isEmpty()) {
+ output.withRpcErrors(rpcErrors);
+ }
+ }
+
+ /**
+ * create rpc result honoring success/fail outcomes of arguments
+ *
+ * @param chainInput left part (chained rpc result)
+ * @param input right part (results of current operation)
+ * @param <L> chain type
+ * @param <R> current operation type
+ * @return rpc result with combined status
+ */
+ private static <L, R> RpcResultBuilder<L> createNextRpcResultBuilder(final RpcResult<L> chainInput, final RpcResult<R> input) {
+ return RpcResultBuilder.<L>status(input.isSuccessful() && chainInput.isSuccessful());
+ }
+
+ /**
+ * Create rpc result builder with combined status and sum of all errors.
+ * <br>
+ * Shortcut for {@link #createNextRpcResultBuilder(RpcResult, RpcResult)} and
+ * {@link #joinErrors(RpcResultBuilder, RpcResult, RpcResult)}.
+ *
+ * @param chainInput left part (chained rpc result)
+ * @param input right part (results of current operation)
+ * @param <L> chain type
+ * @param <R> current operation type
+ * @return rpc result with combined status and all errors
+ */
+ public static <L, R> RpcResultBuilder<L> mergeRpcResults(final RpcResult<L> chainInput, final RpcResult<R> input) {
+ // create rpcResult builder honoring both success/failure of current input and chained input
+ final RpcResultBuilder<L> output = FlatBatchUtil.createNextRpcResultBuilder(chainInput, input);
+ // join errors
+ FlatBatchUtil.joinErrors(output, chainInput, input);
+ return output;
+ }
+}
package org.opendaylight.openflowplugin.impl.util;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
+import javax.annotation.Nullable;
+import org.apache.commons.lang3.tuple.Pair;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.BatchFlowIdGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.BatchFlowOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.output.list.grouping.BatchFailedFlowsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.output.list.grouping.BatchFailedFlowsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final String ALIEN_SYSTEM_FLOW_ID = "#UF$TABLE*";
private static final AtomicInteger unaccountedFlowsCounter = new AtomicInteger(0);
private static final Logger LOG = LoggerFactory.getLogger(FlowUtil.class);
+ private static final RpcResultBuilder<List<BatchFailedFlowsOutput>> SUCCESSFUL_FLOW_OUTPUT_RPC_RESULT =
+ RpcResultBuilder.success(Collections.<BatchFailedFlowsOutput>emptyList());
+ /** Attach barrier response to given {@link RpcResult}<RemoveFlowsBatchOutput> */
+ public static final Function<Pair<RpcResult<RemoveFlowsBatchOutput>, RpcResult<Void>>, RpcResult<RemoveFlowsBatchOutput>>
+ FLOW_REMOVE_COMPOSING_TRANSFORM = createComposingFunction();
+
+ /** Attach barrier response to given {@link RpcResult}<AddFlowsBatchOutput> */
+ public static final Function<Pair<RpcResult<AddFlowsBatchOutput>, RpcResult<Void>>, RpcResult<AddFlowsBatchOutput>>
+ FLOW_ADD_COMPOSING_TRANSFORM = createComposingFunction();
+
+ /** Attach barrier response to given {@link RpcResult}<UpdateFlowsBatchOutput> */
+ public static final Function<Pair<RpcResult<UpdateFlowsBatchOutput>, RpcResult<Void>>, RpcResult<UpdateFlowsBatchOutput>>
+ FLOW_UPDATE_COMPOSING_TRANSFORM = createComposingFunction();
+
+ /**
+ * Gather errors into collection and wrap it into {@link RpcResult} and propagate all {@link RpcError}
+ */
+ public static final Function<RpcResult<List<BatchFailedFlowsOutput>>, RpcResult<RemoveFlowsBatchOutput>> FLOW_REMOVE_TRANSFORM =
+ new Function<RpcResult<List<BatchFailedFlowsOutput>>, RpcResult<RemoveFlowsBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<RemoveFlowsBatchOutput> apply(@Nullable final RpcResult<List<BatchFailedFlowsOutput>> batchFlowsCumulativeResult) {
+ final RemoveFlowsBatchOutput batchOutput = new RemoveFlowsBatchOutputBuilder()
+ .setBatchFailedFlowsOutput(batchFlowsCumulativeResult.getResult()).build();
+
+ final RpcResultBuilder<RemoveFlowsBatchOutput> resultBld =
+ createCumulativeRpcResult(batchFlowsCumulativeResult, batchOutput);
+ return resultBld.build();
+ }
+ };
+
+ /**
+ * Gather errors into collection and wrap it into {@link RpcResult} and propagate all {@link RpcError}
+ */
+ public static final Function<RpcResult<List<BatchFailedFlowsOutput>>, RpcResult<AddFlowsBatchOutput>> FLOW_ADD_TRANSFORM =
+ new Function<RpcResult<List<BatchFailedFlowsOutput>>, RpcResult<AddFlowsBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<AddFlowsBatchOutput> apply(@Nullable final RpcResult<List<BatchFailedFlowsOutput>> batchFlowsCumulativeResult) {
+ final AddFlowsBatchOutput batchOutput = new AddFlowsBatchOutputBuilder()
+ .setBatchFailedFlowsOutput(batchFlowsCumulativeResult.getResult()).build();
+
+ final RpcResultBuilder<AddFlowsBatchOutput> resultBld =
+ createCumulativeRpcResult(batchFlowsCumulativeResult, batchOutput);
+ return resultBld.build();
+ }
+ };
+
+ /**
+ * Gather errors into collection and wrap it into {@link RpcResult} and propagate all {@link RpcError}
+ */
+ public static final Function<RpcResult<List<BatchFailedFlowsOutput>>, RpcResult<UpdateFlowsBatchOutput>> FLOW_UPDATE_TRANSFORM =
+ new Function<RpcResult<List<BatchFailedFlowsOutput>>, RpcResult<UpdateFlowsBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<UpdateFlowsBatchOutput> apply(@Nullable final RpcResult<List<BatchFailedFlowsOutput>> batchFlowsCumulativeResult) {
+ final UpdateFlowsBatchOutput batchOutput = new UpdateFlowsBatchOutputBuilder()
+ .setBatchFailedFlowsOutput(batchFlowsCumulativeResult.getResult()).build();
+
+ final RpcResultBuilder<UpdateFlowsBatchOutput> resultBld =
+ createCumulativeRpcResult(batchFlowsCumulativeResult, batchOutput);
+ return resultBld.build();
+ }
+ };
private FlowUtil() {
throw new IllegalStateException("This class should not be instantiated.");
}
+ /**
+ * Wrap given list of problematic flow-ids into {@link RpcResult} of given type.
+ *
+ * @param batchFlowsCumulativeResult list of ids failed flows
+ * @param batchOutput
+ * @param <T> flow operation type
+ * @return batch flow operation output of given type containing list of flow-ids and corresponding success flag
+ */
+ private static <T extends BatchFlowOutputListGrouping>
+ RpcResultBuilder<T> createCumulativeRpcResult(final @Nullable RpcResult<List<BatchFailedFlowsOutput>> batchFlowsCumulativeResult,
+ final T batchOutput) {
+ final RpcResultBuilder<T> resultBld;
+ if (batchFlowsCumulativeResult.isSuccessful()) {
+ resultBld = RpcResultBuilder.success(batchOutput);
+ } else {
+ resultBld = RpcResultBuilder.failed();
+ resultBld.withResult(batchOutput)
+ .withRpcErrors(batchFlowsCumulativeResult.getErrors());
+ }
+ return resultBld;
+ }
+
public static FlowId createAlienFlowId(final short tableId) {
final StringBuilder sBuilder = new StringBuilder(ALIEN_SYSTEM_FLOW_ID)
.append(tableId).append('-').append(unaccountedFlowsCounter.incrementAndGet());
- String alienId = sBuilder.toString();
+ String alienId = sBuilder.toString();
return new FlowId(alienId);
}
+
+ /**
+ * Factory method: create {@link Function} which attaches barrier response to given {@link RpcResult}<T>
+ * and changes success flag if needed.
+ * <br>
+ * Original rpcResult is the {@link Pair#getLeft()} and barrier result is the {@link Pair#getRight()}.
+ *
+ * @param <T> type of rpcResult value
+ * @return reusable static function
+ */
+ @VisibleForTesting
+ static <T extends BatchFlowOutputListGrouping>
+ Function<Pair<RpcResult<T>, RpcResult<Void>>, RpcResult<T>> createComposingFunction() {
+ return new Function<Pair<RpcResult<T>, RpcResult<Void>>, RpcResult<T>>() {
+ @Nullable
+ @Override
+ public RpcResult<T> apply(@Nullable final Pair<RpcResult<T>, RpcResult<Void>> input) {
+ final RpcResultBuilder<T> resultBld;
+ if (input.getLeft().isSuccessful() && input.getRight().isSuccessful()) {
+ resultBld = RpcResultBuilder.success();
+ } else {
+ resultBld = RpcResultBuilder.failed();
+ }
+
+ final ArrayList<RpcError> rpcErrors = new ArrayList<>(input.getLeft().getErrors());
+ rpcErrors.addAll(input.getRight().getErrors());
+ resultBld.withRpcErrors(rpcErrors);
+
+ resultBld.withResult(input.getLeft().getResult());
+
+ return resultBld.build();
+ }
+ };
+ }
+
+ /**
+ * @param nodePath path to {@link Node}
+ * @param tableId path to {@link Table} under {@link Node}
+ * @param flowId path to {@link Flow} under {@link Table}
+ * @return instance identifier assembled for given node, table and flow
+ */
+ public static FlowRef buildFlowPath(final InstanceIdentifier<Node> nodePath,
+ final short tableId, final FlowId flowId) {
+ final KeyedInstanceIdentifier<Flow, FlowKey> flowPath = nodePath
+ .augmentation(FlowCapableNode.class)
+ .child(Table.class, new TableKey(tableId))
+ .child(Flow.class, new FlowKey(new FlowId(flowId)));
+
+ return new FlowRef(flowPath);
+ }
+
+ /**
+ * Factory method: creates {@link Function} which keeps info of original inputs (passed to flow-rpc) and processes
+ * list of all flow-rpc results.
+ *
+ * @param inputBatchFlows collection of problematic flow-ids wrapped in container of given type <O>
+ * @param <O> result container type
+ * @return static reusable function
+ */
+ public static <O> Function<List<RpcResult<O>>, RpcResult<List<BatchFailedFlowsOutput>>> createCumulatingFunction(
+ final List<? extends BatchFlowIdGrouping> inputBatchFlows) {
+ return new Function<List<RpcResult<O>>, RpcResult<List<BatchFailedFlowsOutput>>>() {
+ @Nullable
+ @Override
+ public RpcResult<List<BatchFailedFlowsOutput>> apply(@Nullable final List<RpcResult<O>> innerInput) {
+ final int sizeOfFutures = innerInput.size();
+ final int sizeOfInputBatch = inputBatchFlows.size();
+ Preconditions.checkArgument(sizeOfFutures == sizeOfInputBatch,
+ "wrong amount of returned futures: {} <> {}", sizeOfFutures, sizeOfInputBatch);
+
+ final ArrayList<BatchFailedFlowsOutput> batchFlows = new ArrayList<>(sizeOfFutures);
+ final Iterator<? extends BatchFlowIdGrouping> batchFlowIterator = inputBatchFlows.iterator();
+
+ Collection<RpcError> flowErrors = new ArrayList<>(sizeOfFutures);
+
+ int batchOrder = 0;
+ for (RpcResult<O> flowModOutput : innerInput) {
+ final FlowId flowId = batchFlowIterator.next().getFlowId();
+
+ if (!flowModOutput.isSuccessful()) {
+ batchFlows.add(new BatchFailedFlowsOutputBuilder()
+ .setFlowId(flowId)
+ .setBatchOrder(batchOrder)
+ .build());
+ flowErrors.addAll(flowModOutput.getErrors());
+ }
+ batchOrder++;
+ }
+
+ final RpcResultBuilder<List<BatchFailedFlowsOutput>> resultBuilder;
+ if (!flowErrors.isEmpty()) {
+ resultBuilder = RpcResultBuilder.<List<BatchFailedFlowsOutput>>failed()
+ .withRpcErrors(flowErrors).withResult(batchFlows);
+ } else {
+ resultBuilder = SUCCESSFUL_FLOW_OUTPUT_RPC_RESULT;
+ }
+ return resultBuilder.build();
+ }
+ };
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import javax.annotation.Nullable;
+import org.apache.commons.lang3.tuple.Pair;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.BatchGroupOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.output.list.grouping.BatchFailedGroupsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.output.list.grouping.BatchFailedGroupsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * provides group util methods
+ */
+public final class GroupUtil {
+
+ private static final RpcResultBuilder<List<BatchFailedGroupsOutput>> SUCCESSFUL_GROUP_OUTPUT_RPC_RESULT =
+ RpcResultBuilder.success(Collections.<BatchFailedGroupsOutput>emptyList());
+
+ public static final Function<RpcResult<List<BatchFailedGroupsOutput>>, RpcResult<AddGroupsBatchOutput>> GROUP_ADD_TRANSFORM =
+ new Function<RpcResult<List<BatchFailedGroupsOutput>>, RpcResult<AddGroupsBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<AddGroupsBatchOutput> apply(@Nullable final RpcResult<List<BatchFailedGroupsOutput>> batchGroupsCumulatedResult) {
+ final AddGroupsBatchOutput batchOutput = new AddGroupsBatchOutputBuilder()
+ .setBatchFailedGroupsOutput(batchGroupsCumulatedResult.getResult()).build();
+
+ final RpcResultBuilder<AddGroupsBatchOutput> resultBld =
+ createCumulativeRpcResult(batchGroupsCumulatedResult, batchOutput);
+ return resultBld.build();
+ }
+ };
+ public static final Function<Pair<RpcResult<AddGroupsBatchOutput>, RpcResult<Void>>, RpcResult<AddGroupsBatchOutput>>
+ GROUP_ADD_COMPOSING_TRANSFORM = createComposingFunction();
+
+ public static final Function<RpcResult<List<BatchFailedGroupsOutput>>, RpcResult<RemoveGroupsBatchOutput>> GROUP_REMOVE_TRANSFORM =
+ new Function<RpcResult<List<BatchFailedGroupsOutput>>, RpcResult<RemoveGroupsBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<RemoveGroupsBatchOutput> apply(@Nullable final RpcResult<List<BatchFailedGroupsOutput>> batchGroupsCumulatedResult) {
+ final RemoveGroupsBatchOutput batchOutput = new RemoveGroupsBatchOutputBuilder()
+ .setBatchFailedGroupsOutput(batchGroupsCumulatedResult.getResult()).build();
+
+ final RpcResultBuilder<RemoveGroupsBatchOutput> resultBld =
+ createCumulativeRpcResult(batchGroupsCumulatedResult, batchOutput);
+ return resultBld.build();
+ }
+ };
+ public static final Function<Pair<RpcResult<RemoveGroupsBatchOutput>, RpcResult<Void>>, RpcResult<RemoveGroupsBatchOutput>>
+ GROUP_REMOVE_COMPOSING_TRANSFORM = createComposingFunction();
+
+ public static final Function<RpcResult<List<BatchFailedGroupsOutput>>, RpcResult<UpdateGroupsBatchOutput>> GROUP_UPDATE_TRANSFORM =
+ new Function<RpcResult<List<BatchFailedGroupsOutput>>, RpcResult<UpdateGroupsBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<UpdateGroupsBatchOutput> apply(@Nullable final RpcResult<List<BatchFailedGroupsOutput>> batchGroupsCumulatedResult) {
+ final UpdateGroupsBatchOutput batchOutput = new UpdateGroupsBatchOutputBuilder()
+ .setBatchFailedGroupsOutput(batchGroupsCumulatedResult.getResult()).build();
+
+ final RpcResultBuilder<UpdateGroupsBatchOutput> resultBld =
+ createCumulativeRpcResult(batchGroupsCumulatedResult, batchOutput);
+ return resultBld.build();
+ }
+ };
+ public static final Function<Pair<RpcResult<UpdateGroupsBatchOutput>, RpcResult<Void>>, RpcResult<UpdateGroupsBatchOutput>>
+ GROUP_UPDATE_COMPOSING_TRANSFORM = createComposingFunction();
+
+ private GroupUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
+ /**
+ * @param nodePath
+ * @param groupId
+ * @return instance identifier assembled for given node and group
+ */
+ public static GroupRef buildGroupPath(final InstanceIdentifier<Node> nodePath, final GroupId groupId) {
+ final KeyedInstanceIdentifier<Group, GroupKey> groupPath = nodePath
+ .augmentation(FlowCapableNode.class)
+ .child(Group.class, new GroupKey(groupId));
+
+ return new GroupRef(groupPath);
+ }
+
+ public static <O> Function<List<RpcResult<O>>, RpcResult<List<BatchFailedGroupsOutput>>> createCumulatingFunction(
+ final Iterable<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.Group> inputBatchGroups) {
+ return createCumulatingFunction(inputBatchGroups, Iterables.size(inputBatchGroups));
+ }
+
+ public static <O> Function<List<RpcResult<O>>, RpcResult<List<BatchFailedGroupsOutput>>> createCumulatingFunction(
+ final Iterable<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.Group> inputBatchGroups,
+ final int sizeOfInputBatch) {
+ return new Function<List<RpcResult<O>>, RpcResult<List<BatchFailedGroupsOutput>>>() {
+ @Nullable
+ @Override
+ public RpcResult<List<BatchFailedGroupsOutput>> apply(@Nullable final List<RpcResult<O>> innerInput) {
+ final int sizeOfFutures = innerInput.size();
+ Preconditions.checkArgument(sizeOfFutures == sizeOfInputBatch,
+ "wrong amount of returned futures: {} <> {}", sizeOfFutures, sizeOfInputBatch);
+
+ final List<BatchFailedGroupsOutput> batchGroups = new ArrayList<>();
+ final Iterator<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.Group>
+ batchGroupIterator = inputBatchGroups.iterator();
+
+ Collection<RpcError> groupErrors = new ArrayList<>(sizeOfFutures);
+
+ int batchOrder = 0;
+ for (RpcResult<O> groupModOutput : innerInput) {
+ final GroupId groupId = batchGroupIterator.next().getGroupId();
+
+ if (!groupModOutput.isSuccessful()) {
+ batchGroups.add(new BatchFailedGroupsOutputBuilder()
+ .setGroupId(groupId)
+ .setBatchOrder(batchOrder)
+ .build());
+ groupErrors.addAll(groupModOutput.getErrors());
+ }
+ batchOrder++;
+ }
+
+ final RpcResultBuilder<List<BatchFailedGroupsOutput>> resultBuilder;
+ if (!groupErrors.isEmpty()) {
+ resultBuilder = RpcResultBuilder.<List<BatchFailedGroupsOutput>>failed()
+ .withRpcErrors(groupErrors).withResult(batchGroups);
+ } else {
+ resultBuilder = SUCCESSFUL_GROUP_OUTPUT_RPC_RESULT;
+ }
+ return resultBuilder.build();
+ }
+ };
+ }
+
+ /**
+ * Factory method: create {@link Function} which attaches barrier response to given {@link RpcResult}<T>
+ * and changes success flag if needed.
+ * <br>
+ * Original rpcResult is the {@link Pair#getLeft()} and barrier result is the {@link Pair#getRight()}.
+ *
+ * @param <T> type of rpcResult value
+ * @return reusable static function
+ */
+ @VisibleForTesting
+ static <T extends BatchGroupOutputListGrouping>
+ Function<Pair<RpcResult<T>, RpcResult<Void>>, RpcResult<T>> createComposingFunction() {
+ return new Function<Pair<RpcResult<T>, RpcResult<Void>>, RpcResult<T>>() {
+ @Nullable
+ @Override
+ public RpcResult<T> apply(@Nullable final Pair<RpcResult<T>, RpcResult<Void>> input) {
+ final RpcResultBuilder<T> resultBld;
+ if (input.getLeft().isSuccessful() && input.getRight().isSuccessful()) {
+ resultBld = RpcResultBuilder.success();
+ } else {
+ resultBld = RpcResultBuilder.failed();
+ }
+
+ final ArrayList<RpcError> rpcErrors = new ArrayList<>(input.getLeft().getErrors());
+ rpcErrors.addAll(input.getRight().getErrors());
+ resultBld.withRpcErrors(rpcErrors);
+
+ resultBld.withResult(input.getLeft().getResult());
+
+ return resultBld.build();
+ }
+ };
+ }
+
+ /**
+ * Wrap given list of problematic group-ids into {@link RpcResult} of given type.
+ *
+ * @param batchGroupsCumulativeResult list of ids failed groups
+ * @param batchOutput
+ * @param <T> group operation type
+ * @return batch group operation output of given type containing list of group-ids and corresponding success flag
+ */
+ private static <T extends BatchGroupOutputListGrouping>
+ RpcResultBuilder<T> createCumulativeRpcResult(final @Nullable RpcResult<List<BatchFailedGroupsOutput>> batchGroupsCumulativeResult,
+ final T batchOutput) {
+ final RpcResultBuilder<T> resultBld;
+ if (batchGroupsCumulativeResult.isSuccessful()) {
+ resultBld = RpcResultBuilder.success(batchOutput);
+ } else {
+ resultBld = RpcResultBuilder.failed();
+ resultBld.withResult(batchOutput)
+ .withRpcErrors(batchGroupsCumulativeResult.getErrors());
+ }
+ return resultBld;
+ }
+}
import org.opendaylight.openflowplugin.impl.services.PacketProcessingServiceImpl;
import org.opendaylight.openflowplugin.impl.services.SalEchoServiceImpl;
import org.opendaylight.openflowplugin.impl.services.SalExperimenterMessageServiceImpl;
+import org.opendaylight.openflowplugin.impl.services.SalFlatBatchServiceImpl;
import org.opendaylight.openflowplugin.impl.services.SalFlowServiceImpl;
+import org.opendaylight.openflowplugin.impl.services.SalFlowsBatchServiceImpl;
import org.opendaylight.openflowplugin.impl.services.SalGroupServiceImpl;
+import org.opendaylight.openflowplugin.impl.services.SalGroupsBatchServiceImpl;
import org.opendaylight.openflowplugin.impl.services.SalMeterServiceImpl;
+import org.opendaylight.openflowplugin.impl.services.SalMetersBatchServiceImpl;
import org.opendaylight.openflowplugin.impl.services.SalPortServiceImpl;
import org.opendaylight.openflowplugin.impl.services.SalTableServiceImpl;
import org.opendaylight.openflowplugin.impl.statistics.services.OpendaylightFlowStatisticsServiceImpl;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.OpendaylightFlowStatisticsServiceDelegateImpl;
import org.opendaylight.yang.gen.v1.urn.opendaylight.echo.service.rev150305.SalEchoService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.experimenter.message.service.rev151020.SalExperimenterMessageService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.SalFlatBatchService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsService;
/**
* Method registers all OF services for role {@link OfpRole#BECOMEMASTER}
- * @param rpcContext - registration processing is implemented in {@link RpcContext}
+ *
+ * @param rpcContext - registration processing is implemented in {@link RpcContext}
* @param deviceContext - every service needs {@link DeviceContext} as input parameter
- * @param newRole - role validation for {@link OfpRole#BECOMEMASTER}
+ * @param newRole - role validation for {@link OfpRole#BECOMEMASTER}
*/
public static void registerMasterServices(@CheckForNull final RpcContext rpcContext,
- @CheckForNull final DeviceContext deviceContext, @CheckForNull final OfpRole newRole) {
+ @CheckForNull final DeviceContext deviceContext, @CheckForNull final OfpRole newRole) {
Preconditions.checkArgument(rpcContext != null);
Preconditions.checkArgument(deviceContext != null);
Preconditions.checkArgument(newRole != null);
Verify.verify(OfpRole.BECOMEMASTER.equals(newRole), "Service call with bad Role {} we expect role BECOMEMASTER", newRole);
- rpcContext.registerRpcServiceImplementation(SalEchoService.class, new SalEchoServiceImpl(rpcContext, deviceContext));
+ // create service instances
+ final SalFlowServiceImpl salFlowService = new SalFlowServiceImpl(rpcContext, deviceContext);
+ final FlowCapableTransactionServiceImpl flowCapableTransactionService = new FlowCapableTransactionServiceImpl(rpcContext, deviceContext);
+ final SalGroupServiceImpl salGroupService = new SalGroupServiceImpl(rpcContext, deviceContext);
+ final SalMeterServiceImpl salMeterService = new SalMeterServiceImpl(rpcContext, deviceContext);
- rpcContext.registerRpcServiceImplementation(SalFlowService.class, new SalFlowServiceImpl(rpcContext, deviceContext));
+ // register routed service instances
+ rpcContext.registerRpcServiceImplementation(SalEchoService.class, new SalEchoServiceImpl(rpcContext, deviceContext));
+ rpcContext.registerRpcServiceImplementation(SalFlowService.class, salFlowService);
//TODO: add constructors with rcpContext and deviceContext to meter, group, table constructors
- rpcContext.registerRpcServiceImplementation(FlowCapableTransactionService.class, new FlowCapableTransactionServiceImpl(rpcContext, deviceContext));
- rpcContext.registerRpcServiceImplementation(SalMeterService.class, new SalMeterServiceImpl(rpcContext, deviceContext));
- rpcContext.registerRpcServiceImplementation(SalGroupService.class, new SalGroupServiceImpl(rpcContext, deviceContext));
+ rpcContext.registerRpcServiceImplementation(FlowCapableTransactionService.class, flowCapableTransactionService);
+ rpcContext.registerRpcServiceImplementation(SalMeterService.class, salMeterService);
+ rpcContext.registerRpcServiceImplementation(SalGroupService.class, salGroupService);
rpcContext.registerRpcServiceImplementation(SalTableService.class, new SalTableServiceImpl(rpcContext, deviceContext));
rpcContext.registerRpcServiceImplementation(SalPortService.class, new SalPortServiceImpl(rpcContext, deviceContext));
rpcContext.registerRpcServiceImplementation(PacketProcessingService.class, new PacketProcessingServiceImpl(rpcContext, deviceContext));
rpcContext.registerRpcServiceImplementation(NodeConfigService.class, new NodeConfigServiceImpl(rpcContext, deviceContext));
rpcContext.registerRpcServiceImplementation(OpendaylightFlowStatisticsService.class, new OpendaylightFlowStatisticsServiceImpl(rpcContext, deviceContext));
+
+ final SalFlatBatchServiceImpl salFlatBatchService = new SalFlatBatchServiceImpl(
+ new SalFlowsBatchServiceImpl(salFlowService, flowCapableTransactionService),
+ new SalGroupsBatchServiceImpl(salGroupService, flowCapableTransactionService),
+ new SalMetersBatchServiceImpl(salMeterService, flowCapableTransactionService)
+ );
+ rpcContext.registerRpcServiceImplementation(SalFlatBatchService.class, salFlatBatchService);
+
// TODO: experimenter symmetric and multipart message services
rpcContext.registerRpcServiceImplementation(SalExperimenterMessageService.class, new SalExperimenterMessageServiceImpl(rpcContext, deviceContext));
}
* directly before by change role from {@link OfpRole#BECOMEMASTER} to {@link OfpRole#BECOMESLAVE}.
* Method registers {@link SalEchoService} in next step only because we would like to have SalEchoService as local service for all apps
* to be able actively check connection status for slave connection too.
+ *
* @param rpcContext - registration/unregistration processing is implemented in {@link RpcContext}
- * @param newRole - role validation for {@link OfpRole#BECOMESLAVE}
+ * @param newRole - role validation for {@link OfpRole#BECOMESLAVE}
*/
public static void registerSlaveServices(@CheckForNull final RpcContext rpcContext, @CheckForNull final OfpRole newRole) {
Preconditions.checkArgument(rpcContext != null);
/**
* Method unregisters all OF services.
+ *
* @param rpcContext - unregistration processing is implemented in {@link RpcContext}
*/
public static void unregisterServices(@CheckForNull final RpcContext rpcContext) {
rpcContext.unregisterRpcServiceImplementation(PacketProcessingService.class);
rpcContext.unregisterRpcServiceImplementation(NodeConfigService.class);
rpcContext.unregisterRpcServiceImplementation(OpendaylightFlowStatisticsService.class);
+ rpcContext.unregisterRpcServiceImplementation(SalFlatBatchService.class);
// TODO: experimenter symmetric and multipart message services
rpcContext.unregisterRpcServiceImplementation(SalExperimenterMessageService.class);
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import javax.annotation.Nullable;
+import org.apache.commons.lang3.tuple.Pair;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.BatchMeterOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.output.list.grouping.BatchFailedMetersOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.output.list.grouping.BatchFailedMetersOutputBuilder;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * provides meter util methods
+ */
+public final class MeterUtil {
+
+ private static final RpcResultBuilder<List<BatchFailedMetersOutput>> SUCCESSFUL_METER_OUTPUT_RPC_RESULT =
+ RpcResultBuilder.success(Collections.<BatchFailedMetersOutput>emptyList());
+
+ public static final Function<RpcResult<List<BatchFailedMetersOutput>>, RpcResult<AddMetersBatchOutput>> METER_ADD_TRANSFORM =
+ new Function<RpcResult<List<BatchFailedMetersOutput>>, RpcResult<AddMetersBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<AddMetersBatchOutput> apply(@Nullable final RpcResult<List<BatchFailedMetersOutput>> batchMetersCumulatedResult) {
+ final AddMetersBatchOutput batchOutput = new AddMetersBatchOutputBuilder()
+ .setBatchFailedMetersOutput(batchMetersCumulatedResult.getResult()).build();
+
+ final RpcResultBuilder<AddMetersBatchOutput> resultBld =
+ createCumulativeRpcResult(batchMetersCumulatedResult, batchOutput);
+ return resultBld.build();
+ }
+ };
+ public static final Function<Pair<RpcResult<AddMetersBatchOutput>, RpcResult<Void>>, RpcResult<AddMetersBatchOutput>>
+ METER_ADD_COMPOSING_TRANSFORM = createComposingFunction();
+
+ public static final Function<RpcResult<List<BatchFailedMetersOutput>>, RpcResult<RemoveMetersBatchOutput>> METER_REMOVE_TRANSFORM =
+ new Function<RpcResult<List<BatchFailedMetersOutput>>, RpcResult<RemoveMetersBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<RemoveMetersBatchOutput> apply(@Nullable final RpcResult<List<BatchFailedMetersOutput>> batchMetersCumulatedResult) {
+ final RemoveMetersBatchOutput batchOutput = new RemoveMetersBatchOutputBuilder()
+ .setBatchFailedMetersOutput(batchMetersCumulatedResult.getResult()).build();
+
+ final RpcResultBuilder<RemoveMetersBatchOutput> resultBld =
+ createCumulativeRpcResult(batchMetersCumulatedResult, batchOutput);
+ return resultBld.build();
+ }
+ };
+ public static final Function<Pair<RpcResult<RemoveMetersBatchOutput>, RpcResult<Void>>, RpcResult<RemoveMetersBatchOutput>>
+ METER_REMOVE_COMPOSING_TRANSFORM = createComposingFunction();
+
+ public static final Function<RpcResult<List<BatchFailedMetersOutput>>, RpcResult<UpdateMetersBatchOutput>> METER_UPDATE_TRANSFORM =
+ new Function<RpcResult<List<BatchFailedMetersOutput>>, RpcResult<UpdateMetersBatchOutput>>() {
+ @Nullable
+ @Override
+ public RpcResult<UpdateMetersBatchOutput> apply(@Nullable final RpcResult<List<BatchFailedMetersOutput>> batchMetersCumulatedResult) {
+ final UpdateMetersBatchOutput batchOutput = new UpdateMetersBatchOutputBuilder()
+ .setBatchFailedMetersOutput(batchMetersCumulatedResult.getResult()).build();
+
+ final RpcResultBuilder<UpdateMetersBatchOutput> resultBld =
+ createCumulativeRpcResult(batchMetersCumulatedResult, batchOutput);
+ return resultBld.build();
+ }
+ };
+ public static final Function<Pair<RpcResult<UpdateMetersBatchOutput>, RpcResult<Void>>, RpcResult<UpdateMetersBatchOutput>>
+ METER_UPDATE_COMPOSING_TRANSFORM = createComposingFunction();
+
+ private MeterUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
+ /**
+ * @param nodePath
+ * @param meterId
+ * @return instance identifier assembled for given node and meter
+ */
+ public static MeterRef buildMeterPath(final InstanceIdentifier<Node> nodePath, final MeterId meterId) {
+ final KeyedInstanceIdentifier<Meter, MeterKey> meterPath = nodePath
+ .augmentation(FlowCapableNode.class)
+ .child(Meter.class, new MeterKey(meterId));
+
+ return new MeterRef(meterPath);
+ }
+
+ public static <O> Function<List<RpcResult<O>>, RpcResult<List<BatchFailedMetersOutput>>> createCumulativeFunction(
+ final Iterable<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter> inputBatchMeters) {
+ return createCumulativeFunction(inputBatchMeters, Iterables.size(inputBatchMeters));
+ }
+
+ public static <O> Function<List<RpcResult<O>>, RpcResult<List<BatchFailedMetersOutput>>> createCumulativeFunction(
+ final Iterable<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter> inputBatchMeters,
+ final int sizeOfInputBatch) {
+ return new Function<List<RpcResult<O>>, RpcResult<List<BatchFailedMetersOutput>>>() {
+ @Nullable
+ @Override
+ public RpcResult<List<BatchFailedMetersOutput>> apply(@Nullable final List<RpcResult<O>> innerInput) {
+ final int sizeOfFutures = innerInput.size();
+ Preconditions.checkArgument(sizeOfFutures == sizeOfInputBatch,
+ "wrong amount of returned futures: {} <> {}", sizeOfFutures, sizeOfInputBatch);
+
+ final List<BatchFailedMetersOutput> batchMeters = new ArrayList<>();
+ final Iterator<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter>
+ batchMeterIterator = inputBatchMeters.iterator();
+
+ Collection<RpcError> meterErrors = new ArrayList<>(sizeOfFutures);
+
+ int batchOrder = 0;
+ for (RpcResult<O> meterModOutput : innerInput) {
+ final MeterId meterId = batchMeterIterator.next().getMeterId();
+
+ if (!meterModOutput.isSuccessful()) {
+ batchMeters.add(new BatchFailedMetersOutputBuilder()
+ .setBatchOrder(batchOrder)
+ .setMeterId(meterId)
+ .build());
+ meterErrors.addAll(meterModOutput.getErrors());
+ }
+ batchOrder++;
+ }
+
+ final RpcResultBuilder<List<BatchFailedMetersOutput>> resultBuilder;
+ if (!meterErrors.isEmpty()) {
+ resultBuilder = RpcResultBuilder.<List<BatchFailedMetersOutput>>failed()
+ .withRpcErrors(meterErrors).withResult(batchMeters);
+ } else {
+ resultBuilder = SUCCESSFUL_METER_OUTPUT_RPC_RESULT;
+ }
+ return resultBuilder.build();
+ }
+ };
+ }
+
+ /**
+ * Factory method: create {@link Function} which attaches barrier response to given {@link RpcResult}<T>
+ * and changes success flag if needed.
+ * <br>
+ * Original rpcResult is the {@link Pair#getLeft()} and barrier result is the {@link Pair#getRight()}.
+ *
+ * @param <T> type of rpcResult value
+ * @return reusable static function
+ */
+ @VisibleForTesting
+ static <T extends BatchMeterOutputListGrouping>
+ Function<Pair<RpcResult<T>, RpcResult<Void>>, RpcResult<T>> createComposingFunction() {
+ return new Function<Pair<RpcResult<T>, RpcResult<Void>>, RpcResult<T>>() {
+ @Nullable
+ @Override
+ public RpcResult<T> apply(@Nullable final Pair<RpcResult<T>, RpcResult<Void>> input) {
+ final RpcResultBuilder<T> resultBld;
+ if (input.getLeft().isSuccessful() && input.getRight().isSuccessful()) {
+ resultBld = RpcResultBuilder.success();
+ } else {
+ resultBld = RpcResultBuilder.failed();
+ }
+
+ final ArrayList<RpcError> rpcErrors = new ArrayList<>(input.getLeft().getErrors());
+ rpcErrors.addAll(input.getRight().getErrors());
+ resultBld.withRpcErrors(rpcErrors);
+
+ resultBld.withResult(input.getLeft().getResult());
+
+ return resultBld.build();
+ }
+ };
+ }
+
+ /**
+ * Wrap given list of problematic group-ids into {@link RpcResult} of given type.
+ *
+ * @param batchMetersCumulativeResult list of ids failed groups
+ * @param batchOutput
+ * @param <T> group operation type
+ * @return batch group operation output of given type containing list of group-ids and corresponding success flag
+ */
+ private static <T extends BatchMeterOutputListGrouping>
+ RpcResultBuilder<T> createCumulativeRpcResult(final @Nullable RpcResult<List<BatchFailedMetersOutput>> batchMetersCumulativeResult,
+ final T batchOutput) {
+ final RpcResultBuilder<T> resultBld;
+ if (batchMetersCumulativeResult.isSuccessful()) {
+ resultBld = RpcResultBuilder.success(batchOutput);
+ } else {
+ resultBld = RpcResultBuilder.failed();
+ resultBld.withResult(batchOutput)
+ .withRpcErrors(batchMetersCumulativeResult.getErrors());
+ }
+ return resultBld;
+ }
+}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import org.opendaylight.openflowplugin.api.OFConstants;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
import org.opendaylight.openflowplugin.api.openflow.md.util.OpenflowVersion;
import org.opendaylight.openflowplugin.openflow.md.util.InventoryDataServiceUtil;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.match.entries.grouping.MatchEntry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.match.entry.value.grouping.match.entry.value.in.port._case.InPort;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.match.entry.value.grouping.match.entry.value.InPortCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.PacketIn;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.features.reply.PhyPort;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
*/
public class NodeConnectorRefToPortTranslator {
/**
- * Converts {@link DeviceState} to {@link NodeConnectorRef}
- * @param deviceState Device state to be converted
- * @return Device state converted to node connector reference
+ * Converts {@link PacketIn} to {@link NodeConnectorRef}
+ * @param packetIn Packet input
+ * @param dataPathId Data path id
+ * @return packet input converted to node connector reference
*/
@Nullable
- public static NodeConnectorRef toNodeConnectorRef(@Nonnull DeviceState deviceState) {
- Preconditions.checkNotNull(deviceState);
+ public static NodeConnectorRef toNodeConnectorRef(@Nonnull PacketIn packetIn, BigInteger dataPathId) {
+ Preconditions.checkNotNull(packetIn);
- Long port = getPortNoFromDeviceState(deviceState);
- OpenflowVersion version = OpenflowVersion.get(deviceState.getVersion());
- BigInteger dataPathId = deviceState.getFeatures().getDatapathId();
+ NodeConnectorRef ref = null;
+ Long port = getPortNoFromPacketIn(packetIn);
- return InventoryDataServiceUtil.nodeConnectorRefFromDatapathIdPortno(dataPathId, port, version);
+ if (port != null) {
+ OpenflowVersion version = OpenflowVersion.get(packetIn.getVersion());
+
+ ref = InventoryDataServiceUtil.nodeConnectorRefFromDatapathIdPortno(dataPathId, port, version);
+ }
+
+ return ref;
}
/**
- * Gets port number from {@link NodeConnectorRef}. If it is null, it will try to get the port from
- * {@link DeviceState}
- * @param deviceState Device state fallback if there is any problem with node connector reference
+ * Gets port number from {@link NodeConnectorRef}.
* @param nodeConnectorRef Node connector reference
+ * @param version Openflow version
* @return port number
*/
@SuppressWarnings("unchecked")
@Nullable
- public static Long fromNodeConnectorRef(@Nonnull DeviceState deviceState, NodeConnectorRef nodeConnectorRef) {
- Preconditions.checkNotNull(deviceState);
+ public static Long fromNodeConnectorRef(@Nonnull NodeConnectorRef nodeConnectorRef, short version) {
+ Preconditions.checkNotNull(nodeConnectorRef);
- if (nodeConnectorRef != null && nodeConnectorRef.getValue() instanceof KeyedInstanceIdentifier) {
+ Long port = null;
+
+ if (nodeConnectorRef.getValue() instanceof KeyedInstanceIdentifier) {
KeyedInstanceIdentifier<NodeConnector, NodeConnectorKey> identifier =
(KeyedInstanceIdentifier<NodeConnector, NodeConnectorKey>) nodeConnectorRef.getValue();
- OpenflowVersion version = OpenflowVersion.get(deviceState.getVersion());
+ OpenflowVersion ofVersion = OpenflowVersion.get(version);
String nodeConnectorId = identifier.getKey().getId().getValue();
- return InventoryDataServiceUtil.portNumberfromNodeConnectorId(version, nodeConnectorId);
- } else {
- return getPortNoFromDeviceState(deviceState);
+ port = InventoryDataServiceUtil.portNumberfromNodeConnectorId(ofVersion, nodeConnectorId);
}
+
+ return port;
}
@VisibleForTesting
@Nullable
- static Long getPortNoFromDeviceState(@Nonnull DeviceState deviceState) {
- Preconditions.checkNotNull(deviceState);
+ static Long getPortNoFromPacketIn(@Nonnull PacketIn packetIn) {
+ Preconditions.checkNotNull(packetIn);
- List<PhyPort> ports = deviceState.getFeatures().getPhyPort();
+ Long port = null;
+
+ if (packetIn.getVersion() == OFConstants.OFP_VERSION_1_0 && packetIn.getInPort() != null) {
+ port = packetIn.getInPort().longValue();
+ } else if (packetIn.getVersion() == OFConstants.OFP_VERSION_1_3) {
+ if (packetIn.getMatch() != null && packetIn.getMatch().getMatchEntry() != null) {
+ List<MatchEntry> entries = packetIn.getMatch().getMatchEntry();
+
+ for (MatchEntry entry : entries) {
+ if (entry.getMatchEntryValue() instanceof InPortCase) {
+ InPortCase inPortCase = (InPortCase) entry.getMatchEntryValue();
+
+ InPort inPort = inPortCase.getInPort();
+
+ if (inPort != null) {
+ port = inPort.getPortNumber().getValue();
+ break;
+ }
+ }
+ }
+ }
+ }
- return ports != null ?
- ports.stream().filter(Objects::nonNull).map(PhyPort::getPortNo).filter(Objects::nonNull).findFirst().orElse(null) :
- null;
+ return port;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * Purpose: utility class providing path and {@link InstanceIdentifier} tools
+ */
+public class PathUtil {
+
+ private PathUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
+
+ /**
+ * @param input reference to {@link Node}
+ * @return node-id from given reference
+ */
+ public static NodeId extractNodeId(final NodeRef input) {
+ return input.getValue().firstKeyOf(Node.class).getId();
+ }
+}
import io.netty.util.Timeout;
import java.math.BigInteger;
import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.junit.Assert;
import org.junit.Before;
import org.opendaylight.openflowplugin.api.openflow.device.TranslatorLibrary;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
import org.opendaylight.openflowplugin.api.openflow.md.core.TranslatorKey;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.DeviceFlowRegistry;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowDescriptor;
MessageTranslator messageTranslatorFlowCapableNodeConnector;
@Mock
private MessageTranslator<Object, Object> messageTranslatorFlowRemoved;
+ @Mock
+ private LifecycleConductor lifecycleConductor;
private InOrder inOrderDevState;
org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.FlowRemoved.class.getName()))))
.thenReturn(messageTranslatorFlowRemoved);
- deviceContext = new DeviceContextImpl(connectionContext, deviceState, dataBroker, timer, messageIntelligenceAgency, outboundQueueProvider, translatorLibrary, false);
+ deviceContext = new DeviceContextImpl(connectionContext, deviceState, dataBroker, messageIntelligenceAgency, outboundQueueProvider, translatorLibrary, false);
xid = new Xid(atomicLong.incrementAndGet());
xidMulti = new Xid(atomicLong.incrementAndGet());
@Test(expected = NullPointerException.class)
public void testDeviceContextImplConstructorNullDataBroker() throws Exception {
- new DeviceContextImpl(connectionContext, deviceState, null, timer, messageIntelligenceAgency, outboundQueueProvider, translatorLibrary, false).close();
+ new DeviceContextImpl(connectionContext, deviceState, null, messageIntelligenceAgency, outboundQueueProvider, translatorLibrary, false).close();
}
@Test(expected = NullPointerException.class)
public void testDeviceContextImplConstructorNullDeviceState() throws Exception {
- new DeviceContextImpl(connectionContext, null, dataBroker, timer, messageIntelligenceAgency, outboundQueueProvider, translatorLibrary, false).close();
+ new DeviceContextImpl(connectionContext, null, dataBroker, messageIntelligenceAgency, outboundQueueProvider, translatorLibrary, false).close();
}
@Test(expected = NullPointerException.class)
public void testDeviceContextImplConstructorNullTimer() throws Exception {
- new DeviceContextImpl(null, deviceState, dataBroker, null, messageIntelligenceAgency, outboundQueueProvider, translatorLibrary, false).close();
+ new DeviceContextImpl(null, deviceState, dataBroker, messageIntelligenceAgency, outboundQueueProvider, translatorLibrary, false).close();
}
@Test
@Test
public void testGetReservedXid() {
- deviceContext.reservedXidForDeviceMessage();
+ deviceContext.reserveXidForDeviceMessage();
verify(outboundQueueProvider).reserveEntry();
}
assertEquals(translatorLibrary, pickedTranslatorLibrary);
}
- @Test
- public void testGetTimer() {
- final HashedWheelTimer pickedTimer = deviceContext.getTimer();
- assertEquals(timer, pickedTimer);
- }
-
@Test
public void testShutdownConnection() {
final ConnectionAdapter mockedConnectionAdapter = mock(ConnectionAdapter.class);
final ConnectionContext mockedAuxiliaryConnectionContext = prepareConnectionContext();
deviceContext.addAuxiliaryConnectionContext(mockedAuxiliaryConnectionContext);
final DeviceTerminationPhaseHandler mockedDeviceContextClosedHandler = mock(DeviceTerminationPhaseHandler.class);
- deviceContext.addDeviceContextClosedHandler(mockedDeviceContextClosedHandler);
when(deviceState.isValid()).thenReturn(true);
deviceContext.shutdownConnection();
verify(connectionContext).closeConnection(true);
@Test
public void testOnDeviceDisconnected() throws Exception {
final DeviceTerminationPhaseHandler deviceContextClosedHandler = mock(DeviceTerminationPhaseHandler.class);
- deviceContext.addDeviceContextClosedHandler(deviceContextClosedHandler);
// Mockito.verify(deviceState).setValid(false);
// Mockito.verify(deviceContextClosedHandler).onDeviceContextClosed(deviceContext);
import org.opendaylight.openflowplugin.api.openflow.device.TranslatorLibrary;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
import org.opendaylight.openflowplugin.api.openflow.md.core.TranslatorKey;
import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.MessageIntelligenceAgency;
+import org.opendaylight.openflowplugin.impl.LifecycleConductorImpl;
import org.opendaylight.openflowplugin.openflow.md.util.OpenflowPortsUtil;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
when(mockedWriteTransaction.submit()).thenReturn(mockedFuture);
final MessageIntelligenceAgency mockedMessageIntelligenceAgency = mock(MessageIntelligenceAgency.class);
- final DeviceManagerImpl deviceManager = new DeviceManagerImpl(mockedDataBroker, mockedMessageIntelligenceAgency,
- TEST_VALUE_GLOBAL_NOTIFICATION_QUOTA, false, barrierIntervalNanos, barrierCountLimit);
+ final LifecycleConductor lifecycleConductor = new LifecycleConductorImpl(mockedMessageIntelligenceAgency);
+ final DeviceManagerImpl deviceManager = new DeviceManagerImpl(mockedDataBroker,
+ TEST_VALUE_GLOBAL_NOTIFICATION_QUOTA, false, barrierIntervalNanos, barrierCountLimit, lifecycleConductor);
deviceManager.setDeviceInitializationPhaseHandler(deviceInitPhaseHandler);
deviceManager.setDeviceTerminationPhaseHandler(deviceTerminationPhaseHandler);
final DeviceState mockedDeviceState = mock(DeviceState.class);
when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
when(mockedDeviceState.getRole()).thenReturn(OfpRole.BECOMEMASTER);
+ when(mockedDeviceState.getNodeId()).thenReturn(mockedNodeId);
if (withException) {
doThrow(new IllegalStateException("dummy")).when(mockedDeviceContext).initialSubmitTransaction();
}
-
- deviceManager.onDeviceContextLevelUp(mockedDeviceContext);
+ deviceManager.addDeviceContextToMap(mockedNodeId, mockedDeviceContext);
+ deviceManager.onDeviceContextLevelUp(mockedDeviceContext.getDeviceState().getNodeId());
if (withException) {
verify(mockedDeviceContext).close();
} else {
order.verify(mockConnectionContext).setOutboundQueueHandleRegistration(
Mockito.<OutboundQueueHandlerRegistration<OutboundQueueProvider>>any());
order.verify(mockConnectionContext).getNodeId();
- Mockito.verify(deviceInitPhaseHandler).onDeviceContextLevelUp(Matchers.<DeviceContext>any());
+ Mockito.verify(deviceInitPhaseHandler).onDeviceContextLevelUp(Matchers.<NodeId>any());
}
@Test
order.verify(mockConnectionContext).setOutboundQueueHandleRegistration(
Mockito.<OutboundQueueHandlerRegistration<OutboundQueueProvider>>any());
order.verify(mockConnectionContext).getNodeId();
- Mockito.verify(deviceInitPhaseHandler).onDeviceContextLevelUp(Matchers.<DeviceContext>any());
+ Mockito.verify(deviceInitPhaseHandler).onDeviceContextLevelUp(Matchers.<NodeId>any());
}
protected ConnectionContext buildMockConnectionContext(final short ofpVersion) {
*/
package org.opendaylight.openflowplugin.impl.role;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
+
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
-import org.mockito.ArgumentMatcher;
-import org.mockito.Matchers;
import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.mockito.stubbing.Answer;
import org.opendaylight.controller.md.sal.common.api.clustering.CandidateAlreadyRegisteredException;
import org.opendaylight.controller.md.sal.common.api.clustering.Entity;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipCandidateRegistration;
import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.openflowplugin.api.OFConstants;
-import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
-import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
-import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
+import org.opendaylight.openflowplugin.api.openflow.role.RoleContext;
import org.opendaylight.openflowplugin.api.openflow.role.RoleManager;
-import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.MessageSpy;
-import org.opendaylight.openflowplugin.impl.util.DeviceStateUtil;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.FeaturesReply;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.GetFeaturesOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SalRoleService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SetRoleInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SetRoleOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SetRoleOutputBuilder;
-import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
- * Created by kramesha on 9/1/15.
+ * @author Jozef Bacigal
+ * Date: 4/19/16
+ * Time: 12:56
*/
@RunWith(MockitoJUnitRunner.class)
public class RoleContextImplTest {
- public static final int FUTURE_SAFETY_TIMEOUT = 5;
- @Mock
- private EntityOwnershipService entityOwnershipService;
-
- @Mock
- private DataBroker dataBroker;
-
- @Mock
- private RpcProviderRegistry rpcProviderRegistry;
-
- @Mock
- private DeviceContext deviceContext;
-
- @Mock
- private ConnectionContext connectionContext;
+ private static final Logger LOG = LoggerFactory.getLogger(RoleContextImpl.class);
@Mock
- private DeviceState deviceState;
+ private EntityOwnershipService entityOwnershipService;
@Mock
- private SalRoleService salRoleService;
+ private EntityOwnershipCandidateRegistration entityOwnershipCandidateRegistration;
@Mock
- private GetFeaturesOutput getFeaturesOutput;
-
- @Mock
- private FeaturesReply featuresReply;
- @Mock
- private MessageSpy mockedMessageSpy;
+ private LifecycleConductor conductor;
private final NodeId nodeId = NodeId.getDefaultInstance("openflow:1");
- private final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier = DeviceStateUtil.createNodeInstanceIdentifier(nodeId);
private final Entity entity = new Entity(RoleManager.ENTITY_TYPE, nodeId.getValue());
private final Entity txEntity = new Entity(RoleManager.TX_ENTITY_TYPE, nodeId.getValue());
- private RoleContextImpl roleContext;
+ private RoleContext roleContext;
@Before
public void setup() throws CandidateAlreadyRegisteredException {
- when(deviceContext.getPrimaryConnectionContext()).thenReturn(connectionContext);
- when(deviceContext.getDeviceState()).thenReturn(deviceState);
- when(deviceContext.getMessageSpy()).thenReturn(mockedMessageSpy);
- when(connectionContext.getNodeId()).thenReturn(nodeId);
- when(deviceState.getNodeInstanceIdentifier()).thenReturn(instanceIdentifier);
- when(deviceState.getNodeId()).thenReturn(nodeId);
- when(rpcProviderRegistry.getRpcService(SalRoleService.class)).thenReturn(salRoleService);
- when(deviceState.getFeatures()).thenReturn(getFeaturesOutput);
- when(getFeaturesOutput.getVersion()).thenReturn(OFConstants.OFP_VERSION_1_0);
- when(deviceContext.getPrimaryConnectionContext().getFeatures()).thenReturn(featuresReply);
- when(deviceContext.getPrimaryConnectionContext().getConnectionState()).thenReturn(ConnectionContext.CONNECTION_STATE.WORKING);
- when(deviceContext.onClusterRoleChange(Matchers.<OfpRole>any(), Matchers.<OfpRole>any()))
- .thenReturn(Futures.immediateFuture((Void) null));
-
- roleContext = new RoleContextImpl(deviceContext, entityOwnershipService, entity, txEntity);
- roleContext.initializationRoleContext();
+ roleContext = new RoleContextImpl(nodeId, entityOwnershipService, entity, txEntity, conductor);
+ Mockito.when(entityOwnershipService.registerCandidate(entity)).thenReturn(entityOwnershipCandidateRegistration);
+ Mockito.when(entityOwnershipService.registerCandidate(txEntity)).thenReturn(entityOwnershipCandidateRegistration);
}
- @Test
- public void testOnRoleChangedStartingMaster() throws InterruptedException, ExecutionException, TimeoutException {
- final OfpRole oldRole = OfpRole.BECOMESLAVE;
- final OfpRole newRole = OfpRole.BECOMEMASTER;
+// @Test
+// Run this test only if demanded because it takes 15s to run
+ public void testInitializationThreads() throws Exception {
+
+ /*Setting answer which will hold the answer for 5s*/
+ Mockito.when(entityOwnershipService.registerCandidate(entity)).thenAnswer(new Answer<EntityOwnershipService>() {
+ @Override
+ public EntityOwnershipService answer(final InvocationOnMock invocationOnMock) throws Throwable {
+ LOG.info("Sleeping this thread for 14s");
+ Thread.sleep(14000L);
+ return null;
+ }
+ });
- final SettableFuture<RpcResult<SetRoleOutput>> future = SettableFuture.create();
- future.set(RpcResultBuilder.<SetRoleOutput>success().build());
- when(salRoleService.setRole(Matchers.argThat(new SetRoleInputMatcher(newRole, instanceIdentifier))))
- .thenReturn(future);
+ Thread t1 = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ LOG.info("Starting thread 1");
+ Assert.assertTrue(roleContext.initialization());
+ }
+ });
- roleContext.setSalRoleService(salRoleService);
+ Thread t2 = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ LOG.info("Starting thread 2");
+ Assert.assertFalse(roleContext.initialization());
+ }
+ });
- final ListenableFuture<Void> onRoleChanged = roleContext.onRoleChanged(oldRole, newRole);
- onRoleChanged.get(FUTURE_SAFETY_TIMEOUT, TimeUnit.SECONDS);
+ t1.start();
+ LOG.info("Sleeping main thread for 1s to prevent race condition.");
+ Thread.sleep(1000L);
+ t2.start();
+
+ while (t2.isAlive()) {
+ //Waiting
+ }
- verify(deviceContext).onClusterRoleChange(oldRole, newRole);
}
@Test
- public void testOnRoleChangedStartingSlave() throws InterruptedException, ExecutionException, TimeoutException {
- final OfpRole oldRole = OfpRole.BECOMEMASTER;
- final OfpRole newRole = OfpRole.BECOMESLAVE;
-
- final SettableFuture<RpcResult<SetRoleOutput>> future = SettableFuture.create();
- future.set(RpcResultBuilder.<SetRoleOutput>success().build());
- when(salRoleService.setRole(Matchers.argThat(new SetRoleInputMatcher(newRole, instanceIdentifier))))
- .thenReturn(future);
-
- roleContext.setSalRoleService(salRoleService);
-
- final ListenableFuture<Void> onRoleChanged = roleContext.onRoleChanged(oldRole, newRole);
- onRoleChanged.get(5, TimeUnit.SECONDS);
-
- verify(deviceContext).onClusterRoleChange(oldRole, newRole);
+ public void testTermination() throws Exception {
+ roleContext.registerCandidate(entity);
+ roleContext.registerCandidate(txEntity);
+ Assert.assertTrue(roleContext.isMainCandidateRegistered());
+ Assert.assertTrue(roleContext.isTxCandidateRegistered());
+ roleContext.unregisterAllCandidates();
+ Assert.assertFalse(roleContext.isMainCandidateRegistered());
}
@Test
- public void testOnRoleChangedWorkingMaster() throws InterruptedException, ExecutionException, TimeoutException {
- final OfpRole oldRole = OfpRole.BECOMESLAVE;
- final OfpRole newRole = OfpRole.BECOMEMASTER;
-
- final ListenableFuture<RpcResult<SetRoleOutput>> future =
- RpcResultBuilder.success(new SetRoleOutputBuilder().build()).buildFuture();
- when(salRoleService.setRole(Matchers.argThat(new SetRoleInputMatcher(newRole, instanceIdentifier))))
- .thenReturn(future);
-
- roleContext.setSalRoleService(salRoleService);
+ public void testCreateRequestContext() throws Exception {
- final ListenableFuture<Void> onRoleChanged = roleContext.onRoleChanged(oldRole, newRole);
- onRoleChanged.get(5, TimeUnit.SECONDS);
+ }
- verify(deviceContext).onClusterRoleChange(oldRole, newRole);
+ @Test(expected = NullPointerException.class)
+ public void testSetSalRoleService() throws Exception {
+ roleContext.setSalRoleService(null);
}
@Test
- public void testOnRoleChangedWorkingSlave() throws InterruptedException, ExecutionException, TimeoutException {
- final OfpRole oldRole = OfpRole.BECOMEMASTER;
- final OfpRole newRole = OfpRole.BECOMESLAVE;
-
- final SettableFuture<RpcResult<SetRoleOutput>> future = SettableFuture.create();
- future.set(RpcResultBuilder.<SetRoleOutput>success().build());
- when(salRoleService.setRole(Matchers.argThat(new SetRoleInputMatcher(newRole, instanceIdentifier))))
- .thenReturn(future);
-
- roleContext.setSalRoleService(salRoleService);
-
- final ListenableFuture<Void> onRoleChanged = roleContext.onRoleChanged(oldRole, newRole);
- onRoleChanged.get(5, TimeUnit.SECONDS);
-
- verify(deviceContext).onClusterRoleChange(oldRole, newRole);
+ public void testGetEntity() throws Exception {
+ Assert.assertTrue(roleContext.getEntity().equals(entity));
}
- private class SetRoleInputMatcher extends ArgumentMatcher<SetRoleInput> {
-
- private final OfpRole ofpRole;
- private final NodeRef nodeRef;
-
- public SetRoleInputMatcher(final OfpRole ofpRole, final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier) {
- this.ofpRole = ofpRole;
- nodeRef = new NodeRef(instanceIdentifier);
+ @Test
+ public void testGetTxEntity() throws Exception {
+ Assert.assertTrue(roleContext.getTxEntity().equals(txEntity));
+ }
- }
+ @Test
+ public void testGetNodeId() throws Exception {
+ Assert.assertTrue(roleContext.getNodeId().equals(nodeId));
+ }
- @Override
- public boolean matches(final Object o) {
- final SetRoleInput input = (SetRoleInput) o;
- if (input.getControllerRole() == ofpRole &&
- input.getNode().equals(nodeRef)) {
- return true;
- }
- return false;
- }
+ @Test
+ public void testIsMaster() throws Exception {
+ Assert.assertTrue(roleContext.initialization());
+ Assert.assertFalse(roleContext.isMaster());
+ Assert.assertTrue(roleContext.registerCandidate(txEntity));
+ Assert.assertTrue(roleContext.isMaster());
+ Assert.assertTrue(roleContext.unregisterCandidate(entity));
+ Assert.assertFalse(roleContext.isMaster());
}
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.role;
+
+
+import java.math.BigInteger;
+
+import com.google.common.base.VerifyException;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.clustering.Entity;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipCandidateRegistration;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipChange;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipListener;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipListenerRegistration;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.openflowplugin.api.OFConstants;
+import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
+import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
+import org.opendaylight.openflowplugin.api.openflow.device.DeviceManager;
+import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
+import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.RoleChangeListener;
+import org.opendaylight.openflowplugin.api.openflow.role.RoleContext;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.FeaturesReply;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.OfpRole;
+
+/**
+ * Created by Jozef Bacigal
+ * Date: 19.4.2016.
+ * Time: 13:08
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class RoleManagerImplTest {
+
+ @Mock
+ EntityOwnershipService entityOwnershipService;
+
+ @Mock
+ DataBroker dataBroker;
+
+ @Mock
+ DeviceContext deviceContext;
+
+ @Mock
+ DeviceManager deviceManager;
+
+ @Mock
+ EntityOwnershipListener entityOwnershipListener;
+
+ @Mock
+ EntityOwnershipListenerRegistration entityOwnershipListenerRegistration;
+
+ @Mock
+ EntityOwnershipCandidateRegistration entityOwnershipCandidateRegistration;
+
+ @Mock
+ ConnectionContext connectionContext;
+
+ @Mock
+ FeaturesReply featuresReply;
+
+ @Mock
+ DeviceInitializationPhaseHandler deviceInitializationPhaseHandler;
+
+ @Mock
+ DeviceTerminationPhaseHandler deviceTerminationPhaseHandler;
+
+ @Mock
+ WriteTransaction writeTransaction;
+
+ @Mock
+ LifecycleConductor conductor;
+
+ private RoleManagerImpl roleManager;
+ private RoleManagerImpl roleManagerSpy;
+ private RoleContext roleContextSpy;
+ private final NodeId nodeId = NodeId.getDefaultInstance("openflow:1");
+
+ private final EntityOwnershipChange masterEntity = new EntityOwnershipChange(RoleManagerImpl.makeEntity(nodeId), false, true, true);
+ private final EntityOwnershipChange masterTxEntity = new EntityOwnershipChange(RoleManagerImpl.makeTxEntity(nodeId), false, true, true);
+ private final EntityOwnershipChange slaveEntity = new EntityOwnershipChange(RoleManagerImpl.makeEntity(nodeId), true, false, true);
+ private final EntityOwnershipChange slaveTxEntityLast = new EntityOwnershipChange(RoleManagerImpl.makeTxEntity(nodeId), true, false, false);
+
+ private InOrder inOrder;
+
+ @Before
+ public void setUp() throws Exception {
+ CheckedFuture<Void, TransactionCommitFailedException> future = Futures.immediateCheckedFuture(null);
+ Mockito.when(entityOwnershipService.registerListener(Mockito.anyString(), Mockito.any(EntityOwnershipListener.class))).thenReturn(entityOwnershipListenerRegistration);
+ Mockito.when(entityOwnershipService.registerCandidate(Mockito.any(Entity.class))).thenReturn(entityOwnershipCandidateRegistration);
+ Mockito.when(deviceContext.getPrimaryConnectionContext()).thenReturn(connectionContext);
+ Mockito.when(connectionContext.getFeatures()).thenReturn(featuresReply);
+ Mockito.when(connectionContext.getNodeId()).thenReturn(nodeId);
+ Mockito.when(connectionContext.getConnectionState()).thenReturn(ConnectionContext.CONNECTION_STATE.WORKING);
+ Mockito.when(featuresReply.getDatapathId()).thenReturn(new BigInteger("1"));
+ Mockito.when(featuresReply.getVersion()).thenReturn(OFConstants.OFP_VERSION_1_3);
+ Mockito.doNothing().when(deviceInitializationPhaseHandler).onDeviceContextLevelUp(Mockito.<NodeId>any());
+ Mockito.doNothing().when(deviceTerminationPhaseHandler).onDeviceContextLevelDown(Mockito.<DeviceContext>any());
+ Mockito.when(dataBroker.newWriteOnlyTransaction()).thenReturn(writeTransaction);
+ Mockito.when(writeTransaction.submit()).thenReturn(future);
+ Mockito.when(deviceManager.getDeviceContextFromNodeId(Mockito.<NodeId>any())).thenReturn(deviceContext);
+ roleManager = new RoleManagerImpl(entityOwnershipService, dataBroker, conductor);
+ roleManager.setDeviceInitializationPhaseHandler(deviceInitializationPhaseHandler);
+ roleManager.setDeviceTerminationPhaseHandler(deviceTerminationPhaseHandler);
+ Mockito.when(conductor.getDeviceContext(Mockito.<NodeId>any())).thenReturn(deviceContext);
+ roleManagerSpy = Mockito.spy(roleManager);
+ Mockito.doNothing().when(roleManagerSpy).makeDeviceRoleChange(Mockito.<OfpRole>any(), Mockito.<RoleContext>any(), Mockito.anyBoolean());
+ roleManagerSpy.onDeviceContextLevelUp(nodeId);
+ roleContextSpy = Mockito.spy(roleManager.getRoleContext(nodeId));
+ inOrder = Mockito.inOrder(entityOwnershipListenerRegistration, roleManagerSpy, roleContextSpy);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ @Test(expected = VerifyException.class)
+ public void testOnDeviceContextLevelUp() throws Exception {
+ roleManagerSpy.onDeviceContextLevelUp(nodeId);
+ inOrder.verify(roleManagerSpy).onDeviceContextLevelUp(nodeId);
+ inOrder.verifyNoMoreInteractions();
+ }
+
+ @Test
+ public void testCloseMaster() throws Exception {
+ roleManagerSpy.ownershipChanged(masterEntity);
+ roleManagerSpy.ownershipChanged(masterTxEntity);
+ roleManagerSpy.close();
+ inOrder.verify(entityOwnershipListenerRegistration, Mockito.calls(2)).close();
+ inOrder.verify(roleManagerSpy).removeDeviceFromOperationalDS(nodeId);
+ inOrder.verifyNoMoreInteractions();
+ }
+
+ @Test
+ public void testCloseSlave() throws Exception {
+ roleManagerSpy.ownershipChanged(slaveEntity);
+ roleManagerSpy.close();
+ inOrder.verify(entityOwnershipListenerRegistration, Mockito.calls(2)).close();
+ inOrder.verify(roleManagerSpy, Mockito.never()).removeDeviceFromOperationalDS(nodeId);
+ inOrder.verifyNoMoreInteractions();
+ }
+
+ @Test
+ public void testOnDeviceContextLevelDown() throws Exception {
+ roleManagerSpy.onDeviceContextLevelDown(deviceContext);
+ inOrder.verify(roleManagerSpy).onDeviceContextLevelDown(deviceContext);
+ inOrder.verifyNoMoreInteractions();
+ }
+
+ @Test
+ public void testOwnershipChanged1() throws Exception {
+ roleManagerSpy.ownershipChanged(masterEntity);
+ inOrder.verify(roleManagerSpy, Mockito.calls(1)).changeOwnershipForMainEntity(Mockito.<EntityOwnershipChange>any(),Mockito.<RoleContext>any());
+ inOrder.verifyNoMoreInteractions();
+ }
+
+ @Test
+ public void testOwnershipChanged2() throws Exception {
+ roleManagerSpy.ownershipChanged(masterEntity);
+ roleManagerSpy.ownershipChanged(masterTxEntity);
+ inOrder.verify(roleManagerSpy, Mockito.calls(1)).changeOwnershipForTxEntity(Mockito.<EntityOwnershipChange>any(),Mockito.<RoleContext>any());
+ inOrder.verify(roleManagerSpy, Mockito.calls(1)).makeDeviceRoleChange(Mockito.<OfpRole>any(), Mockito.<RoleContext>any(), Mockito.anyBoolean());
+ inOrder.verifyNoMoreInteractions();
+ }
+
+ @Test
+ public void testChangeOwnershipForMainEntity() throws Exception {
+ roleManagerSpy.changeOwnershipForMainEntity(masterEntity, roleContextSpy);
+ inOrder.verify(roleContextSpy, Mockito.atLeastOnce()).isMainCandidateRegistered();
+ inOrder.verify(roleContextSpy, Mockito.atLeastOnce()).registerCandidate(Mockito.<Entity>any());
+ }
+
+ @Test
+ public void testChangeOwnershipForMainEntity2() throws Exception {
+ Mockito.when(roleContextSpy.isMainCandidateRegistered()).thenReturn(false);
+ roleManagerSpy.changeOwnershipForMainEntity(masterEntity, roleContextSpy);
+ inOrder.verify(roleContextSpy, Mockito.atLeastOnce()).isMainCandidateRegistered();
+ }
+
+ @Test
+ public void testChangeOwnershipForTxEntity() throws Exception {
+ Mockito.when(roleContextSpy.isTxCandidateRegistered()).thenReturn(true);
+ roleManagerSpy.changeOwnershipForTxEntity(slaveTxEntityLast, roleContextSpy);
+ inOrder.verify(roleContextSpy, Mockito.atLeastOnce()).isTxCandidateRegistered();
+ inOrder.verify(roleContextSpy, Mockito.calls(1)).unregisterCandidate(Mockito.<Entity>any());
+ inOrder.verify(roleContextSpy, Mockito.never()).close();
+ inOrder.verify(roleManagerSpy, Mockito.calls(1)).removeDeviceFromOperationalDS(Mockito.<NodeId>any());
+ }
+
+ @Test
+ public void testChangeOwnershipForTxEntity2() throws Exception {
+ roleManagerSpy.changeOwnershipForMainEntity(masterEntity, roleContextSpy);
+ roleManagerSpy.changeOwnershipForTxEntity(masterTxEntity, roleContextSpy);
+ inOrder.verify(roleContextSpy, Mockito.atLeastOnce()).isMainCandidateRegistered();
+ inOrder.verify(roleContextSpy, Mockito.calls(1)).registerCandidate(Mockito.<Entity>any());
+ inOrder.verify(roleContextSpy, Mockito.atLeastOnce()).isTxCandidateRegistered();
+ inOrder.verify(roleManagerSpy, Mockito.calls(1)).makeDeviceRoleChange(Mockito.<OfpRole>any(), Mockito.<RoleContext>any(), Mockito.anyBoolean());
+ }
+
+ @Test
+ public void testAddListener() throws Exception {
+ roleManager.addRoleChangeListener((new RoleChangeListener() {
+ @Override
+ public void roleInitializationDone(final NodeId nodeId, final boolean success) {
+ Assert.assertTrue(nodeId.equals(nodeId));
+ Assert.assertTrue(success);
+ }
+
+ @Override
+ public void roleChangeOnDevice(final NodeId nodeId_, final boolean success, final OfpRole newRole, final boolean initializationPhase) {
+ Assert.assertTrue(nodeId.equals(nodeId_));
+ Assert.assertTrue(success);
+ Assert.assertFalse(initializationPhase);
+ Assert.assertTrue(newRole.equals(OfpRole.BECOMEMASTER));
+ }
+ }));
+ roleManager.notifyListenersRoleInitializationDone(nodeId, true);
+ roleManager.notifyListenersRoleChangeOnDevice(nodeId, true, OfpRole.BECOMEMASTER, false);
+ }
+
+
+}
\ No newline at end of file
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
import org.opendaylight.openflowplugin.api.openflow.device.RequestContext;
+import org.opendaylight.openflowplugin.api.openflow.device.XidSequencer;
import org.opendaylight.openflowplugin.api.openflow.rpc.RpcContext;
import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.MessageSpy;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
public class RpcContextImplTest {
@Mock
- private BindingAwareBroker.ProviderContext mockedRpcProviderRegistry;
+ private BindingAwareBroker.ProviderContext rpcProviderRegistry;
@Mock
private DeviceState deviceState;
@Mock
- private DeviceContext deviceContext;
+ private XidSequencer xidSequencer;
@Mock
private MessageSpy messageSpy;
- @Mock
- private NotificationPublishService notificationPublishService;
private KeyedInstanceIdentifier<Node, NodeKey> nodeInstanceIdentifier;
public void setup() {
final NodeId nodeId = new NodeId("openflow:1");
nodeInstanceIdentifier = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(nodeId));
-
- when(deviceState.getNodeInstanceIdentifier()).thenReturn(nodeInstanceIdentifier);
- when(deviceContext.getDeviceState()).thenReturn(deviceState);
- }
-
- @Test
- public void invokeRpcTest() {
-
}
@Test
public void testStoreOrFail() throws Exception {
- try (final RpcContext rpcContext = new RpcContextImpl(messageSpy, mockedRpcProviderRegistry, deviceContext,
- 100, false, notificationPublishService)) {
+ try (final RpcContext rpcContext = new RpcContextImpl(rpcProviderRegistry, xidSequencer,
+ messageSpy, 100, nodeInstanceIdentifier)) {
final RequestContext<?> requestContext = rpcContext.createRequestContext();
assertNotNull(requestContext);
}
@Test
public void testStoreOrFailThatFails() throws Exception {
- try (final RpcContext rpcContext = new RpcContextImpl(messageSpy, mockedRpcProviderRegistry, deviceContext, 0,
- false, notificationPublishService)) {
+ try (final RpcContext rpcContext = new RpcContextImpl(rpcProviderRegistry, xidSequencer,
+ messageSpy, 0, nodeInstanceIdentifier)) {
final RequestContext<?> requestContext = rpcContext.createRequestContext();
assertNull(requestContext);
}
*/
package org.opendaylight.openflowplugin.impl.rpc;
-import static org.mockito.Mockito.times;
-
+import com.google.common.base.VerifyException;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.opendaylight.openflowplugin.api.OFConstants;
import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
+import org.opendaylight.openflowplugin.api.openflow.device.DeviceManager;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
import org.opendaylight.openflowplugin.api.openflow.registry.ItemLifeCycleRegistry;
import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.MessageSpy;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeContext;
+import org.opendaylight.openflowplugin.impl.LifecycleConductorImpl;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
@RunWith(MockitoJUnitRunner.class)
public class RpcManagerImplTest {
- private static final int AWAITED_NUM_OF_CALL_ADD_ROUTED_RPC = 11;
-
+ private static final int QUOTA_VALUE = 5;
private RpcManagerImpl rpcManager;
+
@Mock
private ProviderContext rpcProviderRegistry;
@Mock
@Mock
private DeviceInitializationPhaseHandler deviceINitializationPhaseHandler;
@Mock
- private ConnectionContext connectionContext;
- @Mock
- private BindingAwareBroker.RoutedRpcRegistration<RpcService> routedRpcRegistration;
- @Mock
private DeviceState deviceState;
@Mock
- private ItemLifeCycleRegistry itemLifeCycleRegistry;
+ private MessageSpy mockMsgSpy;
@Mock
- private MessageSpy messageSpy;
+ private LifecycleConductor conductor;
+
+ @Rule
+ public ExpectedException expectedException = ExpectedException.none();
private KeyedInstanceIdentifier<Node, NodeKey> nodePath;
+ private NodeId nodeId = new NodeId("openflow-junit:1");
+
@Before
public void setUp() {
- final NodeKey nodeKey = new NodeKey(new NodeId("openflow-junit:1"));
- nodePath = KeyedInstanceIdentifier.create(Nodes.class).child(Node.class, nodeKey);
- rpcManager = new RpcManagerImpl(rpcProviderRegistry, 5);
+ final NodeKey nodeKey = new NodeKey(nodeId);
+ rpcManager = new RpcManagerImpl(rpcProviderRegistry, QUOTA_VALUE, conductor);
rpcManager.setDeviceInitializationPhaseHandler(deviceINitializationPhaseHandler);
- FeaturesReply features = new GetFeaturesOutputBuilder()
- .setVersion(OFConstants.OFP_VERSION_1_3)
- .build();
- Mockito.when(connectionContext.getFeatures()).thenReturn(features);
- Mockito.when(deviceContext.getPrimaryConnectionContext()).thenReturn(connectionContext);
Mockito.when(deviceContext.getDeviceState()).thenReturn(deviceState);
- Mockito.when(deviceContext.getDeviceState().getRole()).thenReturn(OfpRole.BECOMEMASTER);
- Mockito.when(deviceContext.getItemLifeCycleSourceRegistry()).thenReturn(itemLifeCycleRegistry);
- Mockito.when(deviceState.getNodeInstanceIdentifier()).thenReturn(nodePath);
- Mockito.when(deviceContext.getMessageSpy()).thenReturn(messageSpy);
+ Mockito.when(deviceContext.getMessageSpy()).thenReturn(mockMsgSpy);
Mockito.when(deviceState.getNodeId()).thenReturn(nodeKey.getId());
+ Mockito.when(conductor.getDeviceContext(Mockito.<NodeId>any())).thenReturn(deviceContext);
}
@Test
- public void testOnDeviceContextLevelUp() throws Exception {
-
- Mockito.when(rpcProviderRegistry.addRoutedRpcImplementation(
- Matchers.<Class<RpcService>>any(), Matchers.any(RpcService.class)))
- .thenReturn(routedRpcRegistration);
-
- rpcManager.onDeviceContextLevelUp(deviceContext);
+ public void onDeviceContextLevelUp() throws Exception {
+ rpcManager.onDeviceContextLevelUp(nodeId);
+ Mockito.verify(conductor).getDeviceContext(Mockito.<NodeId>any());
+ }
- Mockito.verify(rpcProviderRegistry, times(AWAITED_NUM_OF_CALL_ADD_ROUTED_RPC)).addRoutedRpcImplementation(
- Matchers.<Class<RpcService>>any(), Matchers.any(RpcService.class));
- Mockito.verify(routedRpcRegistration, times(AWAITED_NUM_OF_CALL_ADD_ROUTED_RPC)).registerPath(
- NodeContext.class, nodePath);
- Mockito.verify(deviceINitializationPhaseHandler).onDeviceContextLevelUp(deviceContext);
+ @Test
+ public void onDeviceContextLevelUpTwice() throws Exception {
+ rpcManager.onDeviceContextLevelUp(nodeId);
+ expectedException.expect(VerifyException.class);
+ rpcManager.onDeviceContextLevelUp(nodeId);
}
}
}
@Test
- public void testOnSuccessWithNotMultiNoMultipart() throws ExecutionException, InterruptedException {
+ public void testOnSuccessWithNotMultiNoMultipart() throws Exception {
final HelloMessage mockedHelloMessage = mock(HelloMessage.class);
multipartRequestOnTheFlyCallback.onSuccess(mockedHelloMessage);
* @throws InterruptedException
*/
@Test
- public void testOnSuccessWithValidMultipart1() throws ExecutionException, InterruptedException {
+ public void testOnSuccessWithValidMultipart1() throws Exception {
final MatchBuilder matchBuilder = new MatchBuilder()
.setMatchEntry(Collections.<MatchEntry>emptyList());
final FlowStatsBuilder flowStatsBuilder = new FlowStatsBuilder()
* @throws InterruptedException
*/
@Test
- public void testOnSuccessWithValidMultipart2() throws ExecutionException, InterruptedException {
+ public void testOnSuccessWithValidMultipart2() throws Exception {
final MultipartReplyMessageBuilder mpReplyMessage = new MultipartReplyMessageBuilder()
.setType(MultipartType.OFPMPDESC)
.setFlags(new MultipartRequestFlags(false));
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services;
+
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.InOrder;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.opendaylight.openflowplugin.impl.services.batch.BatchPlanStep;
+import org.opendaylight.openflowplugin.impl.services.batch.BatchStepType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.Batch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.BatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchAddFlowCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchAddFlowCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchAddGroupCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchAddMeterCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchRemoveFlowCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchRemoveGroupCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchRemoveMeterCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchUpdateFlowCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchUpdateGroupCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchUpdateMeterCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.flow._case.FlatBatchAddFlow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.flow._case.FlatBatchAddFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.group._case.FlatBatchAddGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.meter._case.FlatBatchAddMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.flow._case.FlatBatchRemoveFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.group._case.FlatBatchRemoveGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.meter._case.FlatBatchRemoveMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.flow._case.FlatBatchUpdateFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.group._case.FlatBatchUpdateGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.meter._case.FlatBatchUpdateMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailure;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailureBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureFlowIdCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureFlowIdCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.SalFlowsBatchService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.output.list.grouping.BatchFailedFlowsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.SalGroupsBatchService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.input.update.grouping.OriginalBatchedGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.input.update.grouping.UpdatedBatchedGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.SalMetersBatchService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.input.update.grouping.OriginalBatchedMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.input.update.grouping.UpdatedBatchedMeterBuilder;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * Test for {@link SalFlatBatchServiceImpl}.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class SalFlatBatchServiceImplTest {
+
+ private static final NodeId NODE_ID = new NodeId("ut-node-id");
+ private static final InstanceIdentifier<Node> NODE_II = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(NODE_ID));
+ private static final NodeRef NODE_REF = new NodeRef(NODE_II);
+
+ @Mock
+ private SalFlowsBatchService salFlowsBatchService;
+ @Mock
+ private SalGroupsBatchService salGroupsBatchService;
+ @Mock
+ private SalMetersBatchService salMetersBatchService;
+ @Mock
+ private AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>> chainElement1;
+ @Mock
+ private AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>> chainElement2;
+ @Captor
+ private ArgumentCaptor<AddFlowsBatchInput> addFlowsBatchInputCpt;
+
+ private SalFlatBatchServiceImpl salFlatBatchService;
+
+ @Before
+ public void setUp() throws Exception {
+ salFlatBatchService = new SalFlatBatchServiceImpl(salFlowsBatchService, salGroupsBatchService, salMetersBatchService);
+
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ Mockito.verifyNoMoreInteractions(salFlowsBatchService, salGroupsBatchService, salMetersBatchService);
+ }
+
+ @Test
+ public void testProcessFlatBatch_allSuccessFinished() throws Exception {
+ Mockito.when(salFlowsBatchService.addFlowsBatch(Matchers.<AddFlowsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new AddFlowsBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salFlowsBatchService.removeFlowsBatch(Matchers.<RemoveFlowsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new RemoveFlowsBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salFlowsBatchService.updateFlowsBatch(Matchers.<UpdateFlowsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new UpdateFlowsBatchOutputBuilder().build()).buildFuture());
+
+ Mockito.when(salGroupsBatchService.addGroupsBatch(Matchers.<AddGroupsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new AddGroupsBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salGroupsBatchService.removeGroupsBatch(Matchers.<RemoveGroupsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new RemoveGroupsBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salGroupsBatchService.updateGroupsBatch(Matchers.<UpdateGroupsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new UpdateGroupsBatchOutputBuilder().build()).buildFuture());
+
+ Mockito.when(salMetersBatchService.addMetersBatch(Matchers.<AddMetersBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new AddMetersBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salMetersBatchService.removeMetersBatch(Matchers.<RemoveMetersBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new RemoveMetersBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salMetersBatchService.updateMetersBatch(Matchers.<UpdateMetersBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new UpdateMetersBatchOutputBuilder().build()).buildFuture());
+
+
+ ProcessFlatBatchInput batchInput = new ProcessFlatBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBatch(Lists.newArrayList(
+ createFlowAddBatch(0, "f1"),
+ createFlowRemoveBatch(1, "f2"),
+ createFlowUpdateBatch(2, "f3"),
+
+ createGroupAddBatch(3, 1L),
+ createGroupRemoveBatch(4, 2L),
+ createGroupUpdateBatch(5, 3L),
+
+ createMeterAddBatch(3, 1L),
+ createMeterRemoveBatch(4, 2L),
+ createMeterUpdateBatch(5, 3L)
+ ))
+ .setExitOnFirstError(true)
+ .build();
+
+ final Future<RpcResult<ProcessFlatBatchOutput>> rpcResultFuture = salFlatBatchService.processFlatBatch(batchInput);
+ Assert.assertTrue(rpcResultFuture.isDone());
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = rpcResultFuture.get();
+ Assert.assertTrue(rpcResult.isSuccessful());
+ Assert.assertTrue(rpcResult.getErrors().isEmpty());
+ Assert.assertTrue(rpcResult.getResult().getBatchFailure().isEmpty());
+
+ final InOrder inOrder = Mockito.inOrder(salFlowsBatchService, salGroupsBatchService, salMetersBatchService);
+ inOrder.verify(salFlowsBatchService).addFlowsBatch(Matchers.<AddFlowsBatchInput>any());
+ inOrder.verify(salFlowsBatchService).removeFlowsBatch(Matchers.<RemoveFlowsBatchInput>any());
+ inOrder.verify(salFlowsBatchService).updateFlowsBatch(Matchers.<UpdateFlowsBatchInput>any());
+
+ inOrder.verify(salGroupsBatchService).addGroupsBatch(Matchers.<AddGroupsBatchInput>any());
+ inOrder.verify(salGroupsBatchService).removeGroupsBatch(Matchers.<RemoveGroupsBatchInput>any());
+ inOrder.verify(salGroupsBatchService).updateGroupsBatch(Matchers.<UpdateGroupsBatchInput>any());
+
+ inOrder.verify(salMetersBatchService).addMetersBatch(Matchers.<AddMetersBatchInput>any());
+ inOrder.verify(salMetersBatchService).removeMetersBatch(Matchers.<RemoveMetersBatchInput>any());
+ inOrder.verify(salMetersBatchService).updateMetersBatch(Matchers.<UpdateMetersBatchInput>any());
+ }
+
+ @Test
+ public void testProcessFlatBatch_firstFailedInterrupted() throws Exception {
+ prepareFirstFailingMockService();
+
+ int idx = 0;
+ ProcessFlatBatchInput batchInput = new ProcessFlatBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBatch(Lists.newArrayList(
+ createFlowAddBatch(idx++, "f1", 2),
+ createFlowRemoveBatch(idx++, "f2"),
+ createFlowUpdateBatch(idx++, "f3"),
+
+ createGroupAddBatch(idx++, 1L),
+ createGroupRemoveBatch(idx++, 2L),
+ createGroupUpdateBatch(idx++, 3L),
+
+ createMeterAddBatch(idx++, 1L),
+ createMeterRemoveBatch(idx++, 2L),
+ createMeterUpdateBatch(idx++, 3L)
+ ))
+ .setExitOnFirstError(true)
+ .build();
+
+ final Future<RpcResult<ProcessFlatBatchOutput>> rpcResultFuture = salFlatBatchService.processFlatBatch(batchInput);
+ Assert.assertTrue(rpcResultFuture.isDone());
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = rpcResultFuture.get();
+ Assert.assertFalse(rpcResult.isSuccessful());
+ Assert.assertEquals(1, rpcResult.getErrors().size());
+ Assert.assertEquals(1, rpcResult.getResult().getBatchFailure().size());
+ Assert.assertEquals(3, rpcResult.getResult().getBatchFailure().get(0).getBatchOrder().intValue());
+
+ final InOrder inOrder = Mockito.inOrder(salFlowsBatchService, salGroupsBatchService, salMetersBatchService);
+ inOrder.verify(salFlowsBatchService).addFlowsBatch(Matchers.<AddFlowsBatchInput>any());
+ inOrder.verify(salFlowsBatchService).removeFlowsBatch(Matchers.<RemoveFlowsBatchInput>any());
+ }
+
+ @Test
+ public void testProcessFlatBatch_firstFailedContinue() throws Exception {
+ prepareFirstFailingMockService();
+
+ int idx = 0;
+ ProcessFlatBatchInput batchInput = new ProcessFlatBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBatch(Lists.newArrayList(
+ createFlowAddBatch(idx++, "f1", 2),
+ createFlowRemoveBatch(idx++, "f2"),
+ createFlowUpdateBatch(idx++, "f3"),
+
+ createGroupAddBatch(idx++, 1L),
+ createGroupRemoveBatch(idx++, 2L),
+ createGroupUpdateBatch(idx++, 3L),
+
+ createMeterAddBatch(idx++, 1L),
+ createMeterRemoveBatch(idx++, 2L),
+ createMeterUpdateBatch(idx++, 3L)
+ ))
+ .setExitOnFirstError(false)
+ .build();
+
+ final Future<RpcResult<ProcessFlatBatchOutput>> rpcResultFuture = salFlatBatchService.processFlatBatch(batchInput);
+ Assert.assertTrue(rpcResultFuture.isDone());
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = rpcResultFuture.get();
+ Assert.assertFalse(rpcResult.isSuccessful());
+ Assert.assertEquals(1, rpcResult.getErrors().size());
+ Assert.assertEquals(1, rpcResult.getResult().getBatchFailure().size());
+ Assert.assertEquals(3, rpcResult.getResult().getBatchFailure().get(0).getBatchOrder().intValue());
+
+ final InOrder inOrder = Mockito.inOrder(salFlowsBatchService, salGroupsBatchService, salMetersBatchService);
+ inOrder.verify(salFlowsBatchService).addFlowsBatch(Matchers.<AddFlowsBatchInput>any());
+ inOrder.verify(salFlowsBatchService).removeFlowsBatch(Matchers.<RemoveFlowsBatchInput>any());
+ inOrder.verify(salFlowsBatchService).updateFlowsBatch(Matchers.<UpdateFlowsBatchInput>any());
+
+ inOrder.verify(salGroupsBatchService).addGroupsBatch(Matchers.<AddGroupsBatchInput>any());
+ inOrder.verify(salGroupsBatchService).removeGroupsBatch(Matchers.<RemoveGroupsBatchInput>any());
+ inOrder.verify(salGroupsBatchService).updateGroupsBatch(Matchers.<UpdateGroupsBatchInput>any());
+
+ inOrder.verify(salMetersBatchService).addMetersBatch(Matchers.<AddMetersBatchInput>any());
+ inOrder.verify(salMetersBatchService).removeMetersBatch(Matchers.<RemoveMetersBatchInput>any());
+ inOrder.verify(salMetersBatchService).updateMetersBatch(Matchers.<UpdateMetersBatchInput>any());
+ }
+
+ private void prepareFirstFailingMockService() {
+ Mockito.when(salFlowsBatchService.addFlowsBatch(Matchers.<AddFlowsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new AddFlowsBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salFlowsBatchService.removeFlowsBatch(Matchers.<RemoveFlowsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.<RemoveFlowsBatchOutput>failed()
+ .withResult(new RemoveFlowsBatchOutputBuilder()
+ .setBatchFailedFlowsOutput(Lists.newArrayList(
+ new BatchFailedFlowsOutputBuilder()
+ .setBatchOrder(1)
+ .setFlowId(new FlowId("123"))
+ .build()))
+ .build())
+ .withError(RpcError.ErrorType.APPLICATION, "ut-firstFlowAddError")
+ .buildFuture());
+ Mockito.when(salFlowsBatchService.updateFlowsBatch(Matchers.<UpdateFlowsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new UpdateFlowsBatchOutputBuilder().build()).buildFuture());
+
+ Mockito.when(salGroupsBatchService.addGroupsBatch(Matchers.<AddGroupsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new AddGroupsBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salGroupsBatchService.removeGroupsBatch(Matchers.<RemoveGroupsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new RemoveGroupsBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salGroupsBatchService.updateGroupsBatch(Matchers.<UpdateGroupsBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new UpdateGroupsBatchOutputBuilder().build()).buildFuture());
+
+ Mockito.when(salMetersBatchService.addMetersBatch(Matchers.<AddMetersBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new AddMetersBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salMetersBatchService.removeMetersBatch(Matchers.<RemoveMetersBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new RemoveMetersBatchOutputBuilder().build()).buildFuture());
+ Mockito.when(salMetersBatchService.updateMetersBatch(Matchers.<UpdateMetersBatchInput>any()))
+ .thenReturn(RpcResultBuilder.success(new UpdateMetersBatchOutputBuilder().build()).buildFuture());
+ }
+
+ private Batch createFlowAddBatch(final int batchOrder, final String flowIdValue) {
+ return createFlowAddBatch(batchOrder, flowIdValue, 1);
+ }
+
+ private Batch createFlowAddBatch(final int batchOrder, final String flowIdValue, int amount) {
+ return new BatchBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchChoice(new FlatBatchAddFlowCaseBuilder()
+ .setFlatBatchAddFlow(repeatInList(new FlatBatchAddFlowBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .build(), amount))
+ .build())
+ .build();
+ }
+
+ private <T> List<T> repeatInList(final T item, final int amount) {
+ final List<T> list = new ArrayList<>();
+ for (int i = 0; i < amount; i++) {
+ list.add(item);
+ }
+ return list;
+ }
+
+ private Batch createFlowRemoveBatch(final int batchOrder, final String flowIdValue) {
+ return new BatchBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchChoice(new FlatBatchRemoveFlowCaseBuilder()
+ .setFlatBatchRemoveFlow(Collections.singletonList(new FlatBatchRemoveFlowBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .build()))
+ .build())
+ .build();
+ }
+
+ private Batch createFlowUpdateBatch(final int batchOrder, final String flowIdValue) {
+ return new BatchBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchChoice(new FlatBatchUpdateFlowCaseBuilder()
+ .setFlatBatchUpdateFlow(Collections.singletonList(new FlatBatchUpdateFlowBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .build()))
+ .build())
+ .build();
+ }
+
+ private Batch createGroupAddBatch(final int batchOrder, final long groupIdValue) {
+ return new BatchBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchChoice(new FlatBatchAddGroupCaseBuilder()
+ .setFlatBatchAddGroup(Collections.singletonList(new FlatBatchAddGroupBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build()))
+ .build())
+ .build();
+ }
+
+ private Batch createGroupRemoveBatch(final int batchOrder, final long groupIdValue) {
+ return new BatchBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchChoice(new FlatBatchRemoveGroupCaseBuilder()
+ .setFlatBatchRemoveGroup(Collections.singletonList(new FlatBatchRemoveGroupBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build()))
+ .build())
+ .build();
+ }
+
+ private Batch createGroupUpdateBatch(final int batchOrder, final long groupIdValue) {
+ return new BatchBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchChoice(new FlatBatchUpdateGroupCaseBuilder()
+ .setFlatBatchUpdateGroup(Collections.singletonList(new FlatBatchUpdateGroupBuilder()
+ .setOriginalBatchedGroup(new OriginalBatchedGroupBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build())
+ .setUpdatedBatchedGroup(new UpdatedBatchedGroupBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build())
+ .build()))
+ .build())
+ .build();
+ }
+
+ private Batch createMeterAddBatch(final int batchOrder, final long groupIdValue) {
+ return new BatchBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchChoice(new FlatBatchAddMeterCaseBuilder()
+ .setFlatBatchAddMeter(Collections.singletonList(new FlatBatchAddMeterBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build()))
+ .build())
+ .build();
+ }
+
+ private Batch createMeterRemoveBatch(final int batchOrder, final long groupIdValue) {
+ return new BatchBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchChoice(new FlatBatchRemoveMeterCaseBuilder()
+ .setFlatBatchRemoveMeter(Collections.singletonList(new FlatBatchRemoveMeterBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build()))
+ .build())
+ .build();
+ }
+
+ private Batch createMeterUpdateBatch(final int batchOrder, final long groupIdValue) {
+ return new BatchBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchChoice(new FlatBatchUpdateMeterCaseBuilder()
+ .setFlatBatchUpdateMeter(Collections.singletonList(new FlatBatchUpdateMeterBuilder()
+ .setOriginalBatchedMeter(new OriginalBatchedMeterBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build())
+ .setUpdatedBatchedMeter(new UpdatedBatchedMeterBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build())
+ .build()))
+ .build())
+ .build();
+ }
+
+ @Test
+ public void testExecuteBatchPlan() throws Exception {
+ final ListenableFuture<RpcResult<ProcessFlatBatchOutput>> succeededChainOutput =
+ RpcResultBuilder.<ProcessFlatBatchOutput>success().buildFuture();
+ final ListenableFuture<RpcResult<ProcessFlatBatchOutput>> failedChainOutput =
+ RpcResultBuilder.<ProcessFlatBatchOutput>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-chainError")
+ .withResult(createFlatBatchOutput(
+ createFlowBatchFailure(0, "f1"), createFlowBatchFailure(1, "f2")))
+ .buildFuture();
+
+ Mockito.when(chainElement1.apply(Matchers.<RpcResult<ProcessFlatBatchOutput>>any()))
+ .thenReturn(succeededChainOutput);
+ Mockito.when(chainElement2.apply(Matchers.<RpcResult<ProcessFlatBatchOutput>>any()))
+ .thenReturn(failedChainOutput);
+
+ final List<AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>>> batchChainElements =
+ Lists.newArrayList(chainElement1, chainElement2);
+ final Future<RpcResult<ProcessFlatBatchOutput>> rpcResultFuture = salFlatBatchService.executeBatchPlan(batchChainElements);
+
+ Assert.assertTrue(rpcResultFuture.isDone());
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = rpcResultFuture.get();
+ Assert.assertFalse(rpcResult.isSuccessful());
+ Assert.assertEquals(1, rpcResult.getErrors().size());
+ Assert.assertEquals(2, rpcResult.getResult().getBatchFailure().size());
+ Assert.assertEquals("f2", ((FlatBatchFailureFlowIdCase) rpcResult.getResult().getBatchFailure().get(1).getBatchItemIdChoice()).getFlowId().getValue());
+ }
+
+ private BatchFailure createFlowBatchFailure(final int batchOrder, final String flowIdValue) {
+ return new BatchFailureBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchItemIdChoice(new FlatBatchFailureFlowIdCaseBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .build())
+ .build();
+ }
+
+ private ProcessFlatBatchOutput createFlatBatchOutput(BatchFailure... batchFailures) {
+ return new ProcessFlatBatchOutputBuilder()
+ .setBatchFailure(Lists.newArrayList(batchFailures))
+ .build();
+ }
+
+ @Test
+ public void testPrepareBatchPlan_success() throws Exception {
+ final FlatBatchAddFlowCase flatBatchAddFlowCase = new FlatBatchAddFlowCaseBuilder()
+ .setFlatBatchAddFlow(Collections.singletonList(new FlatBatchAddFlowBuilder()
+ .setFlowId(new FlowId("f1"))
+ .build()))
+ .build();
+ final BatchPlanStep batchPlanStep =
+ new BatchPlanStep(BatchStepType.FLOW_ADD);
+ final List<BatchPlanStep> batchPlan = Lists.newArrayList(batchPlanStep);
+
+ final List<AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>>> batchChain =
+ salFlatBatchService.prepareBatchChain(batchPlan, NODE_REF, true);
+
+ Assert.assertEquals(1, batchChain.size());
+
+ Mockito.when(salFlowsBatchService.addFlowsBatch(Matchers.<AddFlowsBatchInput>any()))
+ .thenReturn(RpcResultBuilder
+ .success(new AddFlowsBatchOutputBuilder().build())
+ .buildFuture());
+
+ final Future<RpcResult<ProcessFlatBatchOutput>> rpcResultFuture = salFlatBatchService.executeBatchPlan(batchChain);
+ Assert.assertTrue(rpcResultFuture.isDone());
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = rpcResultFuture.get();
+ Assert.assertTrue(rpcResult.isSuccessful());
+ Assert.assertEquals(0, rpcResult.getErrors().size());
+ Assert.assertEquals(0, rpcResult.getResult().getBatchFailure().size());
+
+ Mockito.verify(salFlowsBatchService).addFlowsBatch(Matchers.<AddFlowsBatchInput>any());
+ }
+
+ @Test
+ public void testPrepareBatchPlan_failure() throws Exception {
+ final FlatBatchAddFlow flatBatchAddFlow = new FlatBatchAddFlowBuilder()
+ .setFlowId(new FlowId("f1"))
+ .build();
+ final BatchPlanStep batchPlanStep =
+ new BatchPlanStep(BatchStepType.FLOW_ADD);
+ batchPlanStep.getTaskBag().addAll(Lists.newArrayList(
+ flatBatchAddFlow,
+ flatBatchAddFlow));
+
+ final List<BatchPlanStep> batchPlan = Lists.newArrayList(batchPlanStep, batchPlanStep);
+
+ final List<AsyncFunction<RpcResult<ProcessFlatBatchOutput>, RpcResult<ProcessFlatBatchOutput>>> batchChain =
+ salFlatBatchService.prepareBatchChain(batchPlan, NODE_REF, true);
+
+ Assert.assertEquals(2, batchChain.size());
+
+ Mockito.when(salFlowsBatchService.addFlowsBatch(Matchers.<AddFlowsBatchInput>any()))
+ .thenReturn(RpcResultBuilder
+ .<AddFlowsBatchOutput>failed()
+ .withResult(new AddFlowsBatchOutputBuilder()
+ .setBatchFailedFlowsOutput(Lists.newArrayList(
+ new BatchFailedFlowsOutputBuilder()
+ .setBatchOrder(0)
+ .setFlowId(new FlowId("f1"))
+ .build(),
+ new BatchFailedFlowsOutputBuilder()
+ .setBatchOrder(1)
+ .setFlowId(new FlowId("f2"))
+ .build()))
+ .build())
+ .withError(RpcError.ErrorType.APPLICATION, "ut-addFlowBatchError")
+ .buildFuture());
+
+ final Future<RpcResult<ProcessFlatBatchOutput>> rpcResultFuture = salFlatBatchService.executeBatchPlan(batchChain);
+ Assert.assertTrue(rpcResultFuture.isDone());
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = rpcResultFuture.get();
+ Assert.assertFalse(rpcResult.isSuccessful());
+ Assert.assertEquals(1, rpcResult.getErrors().size());
+ Assert.assertEquals(2, rpcResult.getResult().getBatchFailure().size());
+
+ Mockito.verify(salFlowsBatchService).addFlowsBatch(addFlowsBatchInputCpt.capture());
+ Assert.assertEquals(2, addFlowsBatchInputCpt.getValue().getBatchAddFlows().size());
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services;
+
+import com.google.common.collect.Lists;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.InOrder;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.FlowCapableTransactionService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.SendBarrierInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.add.flows.batch.input.BatchAddFlows;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.add.flows.batch.input.BatchAddFlowsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.input.update.grouping.OriginalBatchedFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.input.update.grouping.UpdatedBatchedFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.remove.flows.batch.input.BatchRemoveFlows;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.remove.flows.batch.input.BatchRemoveFlowsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.update.flows.batch.input.BatchUpdateFlows;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.update.flows.batch.input.BatchUpdateFlowsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test for {@link SalFlowsBatchServiceImpl}.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class SalFlowsBatchServiceImplTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(SalFlowsBatchServiceImplTest.class);
+
+ public static final NodeId NODE_ID = new NodeId("ut-dummy-node");
+ public static final NodeKey NODE_KEY = new NodeKey(NODE_ID);
+ public static final NodeRef NODE_REF = new NodeRef(InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY));
+
+ @Mock
+ private SalFlowService salFlowService;
+ @Mock
+ private FlowCapableTransactionService transactionService;
+ @Captor
+ private ArgumentCaptor<RemoveFlowInput> removeFlowInputCpt;
+ @Captor
+ private ArgumentCaptor<UpdateFlowInput> updateFlowInputCpt;
+ @Captor
+ private ArgumentCaptor<AddFlowInput> addFlowInputCpt;
+
+ private SalFlowsBatchServiceImpl salFlowsBatchService;
+ public static final String FLOW_ID_VALUE_1 = "ut-dummy-flow1";
+ public static final String FLOW_ID_VALUE_2 = "ut-dummy-flow2";
+
+ @Before
+ public void setUp() throws Exception {
+ salFlowsBatchService = new SalFlowsBatchServiceImpl(salFlowService, transactionService);
+
+ Mockito.when(transactionService.sendBarrier(Matchers.<SendBarrierInput>any()))
+ .thenReturn(RpcResultBuilder.<Void>success().buildFuture());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ Mockito.verifyNoMoreInteractions(salFlowService, transactionService);
+ }
+
+ @Test
+ public void testRemoveFlowsBatch_success() throws Exception {
+ Mockito.when(salFlowService.removeFlow(Matchers.<RemoveFlowInput>any()))
+ .thenReturn(RpcResultBuilder.success(new RemoveFlowOutputBuilder().build())
+ .buildFuture());
+
+ final String flow1IdValue = "ut-dummy-flow1";
+ final String flow2IdValue = "ut-dummy-flow2";
+ final BatchRemoveFlows batchFlow1 = createEmptyBatchRemoveFlow(flow1IdValue, 42);
+ final BatchRemoveFlows batchFlow2 = createEmptyBatchRemoveFlow(flow2IdValue, 43);
+
+ final RemoveFlowsBatchInput input = new RemoveFlowsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchRemoveFlows(Lists.newArrayList(batchFlow1, batchFlow2))
+ .build();
+
+ final Future<RpcResult<RemoveFlowsBatchOutput>> resultFuture = salFlowsBatchService.removeFlowsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ final RpcResult<RemoveFlowsBatchOutput> rpcResult = resultFuture.get();
+ Assert.assertTrue(rpcResult.isSuccessful());
+ final RemoveFlowsBatchOutput result = rpcResult.getResult();
+ Assert.assertEquals(0, result.getBatchFailedFlowsOutput().size());
+
+ final InOrder inOrder = Mockito.inOrder(salFlowService, transactionService);
+
+ inOrder.verify(salFlowService, Mockito.times(2)).removeFlow(removeFlowInputCpt.capture());
+ final List<RemoveFlowInput> allValues = removeFlowInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42, allValues.get(0).getPriority().longValue());
+ Assert.assertEquals(43, allValues.get(1).getPriority().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testRemoveFlowsBatch_failed() throws Exception {
+ Mockito.when(salFlowService.removeFlow(Matchers.<RemoveFlowInput>any()))
+ .thenReturn(RpcResultBuilder.<RemoveFlowOutput>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "flow-remove-fail-1")
+ .buildFuture());
+
+ final BatchRemoveFlows batchFlow1 = createEmptyBatchRemoveFlow(FLOW_ID_VALUE_1, 42);
+ final BatchRemoveFlows batchFlow2 = createEmptyBatchRemoveFlow(FLOW_ID_VALUE_2, 43);
+
+ final RemoveFlowsBatchInput input = new RemoveFlowsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchRemoveFlows(Lists.newArrayList(batchFlow1, batchFlow2))
+ .build();
+
+ final Future<RpcResult<RemoveFlowsBatchOutput>> resultFuture = salFlowsBatchService.removeFlowsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ final RpcResult<RemoveFlowsBatchOutput> rpcResult = resultFuture.get();
+ Assert.assertFalse(rpcResult.isSuccessful());
+ final RemoveFlowsBatchOutput result = rpcResult.getResult();
+ Assert.assertEquals(2, result.getBatchFailedFlowsOutput().size());
+ Assert.assertEquals(FLOW_ID_VALUE_1, result.getBatchFailedFlowsOutput().get(0).getFlowId().getValue());
+ Assert.assertEquals(FLOW_ID_VALUE_2, result.getBatchFailedFlowsOutput().get(1).getFlowId().getValue());
+
+ final InOrder inOrder = Mockito.inOrder(salFlowService, transactionService);
+
+ inOrder.verify(salFlowService, Mockito.times(2)).removeFlow(removeFlowInputCpt.capture());
+ final List<RemoveFlowInput> allValues = removeFlowInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42, allValues.get(0).getPriority().longValue());
+ Assert.assertEquals(43, allValues.get(1).getPriority().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ private static BatchAddFlows createEmptyBatchAddFlow(final String flowIdValue, final int priority) {
+ return new BatchAddFlowsBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .setPriority(priority)
+ .setMatch(new MatchBuilder().build())
+ .setTableId((short) 0)
+ .build();
+ }
+
+ private static BatchRemoveFlows createEmptyBatchRemoveFlow(final String flowIdValue, final int priority) {
+ return new BatchRemoveFlowsBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .setPriority(priority)
+ .setMatch(new MatchBuilder().build())
+ .setTableId((short) 0)
+ .build();
+ }
+
+ private static BatchUpdateFlows createEmptyBatchUpdateFlow(final String flowIdValue, final int priority) {
+ final BatchAddFlows emptyOriginalFlow = createEmptyBatchAddFlow(flowIdValue, priority);
+ final BatchAddFlows emptyUpdatedFlow = createEmptyBatchAddFlow(flowIdValue, priority + 1);
+ return new BatchUpdateFlowsBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .setOriginalBatchedFlow(new OriginalBatchedFlowBuilder(emptyOriginalFlow).build())
+ .setUpdatedBatchedFlow(new UpdatedBatchedFlowBuilder(emptyUpdatedFlow).build())
+ .build();
+ }
+
+ @Test
+ public void testAddFlowsBatch_success() throws Exception {
+ Mockito.when(salFlowService.addFlow(Matchers.<AddFlowInput>any()))
+ .thenReturn(RpcResultBuilder.success(new AddFlowOutputBuilder().build()).buildFuture());
+
+ final AddFlowsBatchInput input = new AddFlowsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchAddFlows(Lists.newArrayList(
+ createEmptyBatchAddFlow("ut-dummy-flow1", 42),
+ createEmptyBatchAddFlow("ut-dummy-flow2", 43)))
+ .build();
+
+ final Future<RpcResult<AddFlowsBatchOutput>> resultFuture = salFlowsBatchService.addFlowsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertTrue(resultFuture.get().isSuccessful());
+
+ final InOrder inOrder = Mockito.inOrder(salFlowService, transactionService);
+
+ inOrder.verify(salFlowService, Mockito.times(2)).addFlow(addFlowInputCpt.capture());
+ final List<AddFlowInput> allValues = addFlowInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42, allValues.get(0).getPriority().longValue());
+ Assert.assertEquals(43, allValues.get(1).getPriority().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testAddFlowsBatch_failed() throws Exception {
+ Mockito.when(salFlowService.addFlow(Matchers.<AddFlowInput>any()))
+ .thenReturn(RpcResultBuilder.<AddFlowOutput>failed().withError(RpcError.ErrorType.APPLICATION, "ut-groupAddError")
+ .buildFuture());
+
+ final AddFlowsBatchInput input = new AddFlowsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchAddFlows(Lists.newArrayList(
+ createEmptyBatchAddFlow(FLOW_ID_VALUE_1, 42),
+ createEmptyBatchAddFlow(FLOW_ID_VALUE_2, 43)))
+ .build();
+
+ final Future<RpcResult<AddFlowsBatchOutput>> resultFuture = salFlowsBatchService.addFlowsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertFalse(resultFuture.get().isSuccessful());
+ Assert.assertEquals(2, resultFuture.get().getResult().getBatchFailedFlowsOutput().size());
+ Assert.assertEquals(FLOW_ID_VALUE_1, resultFuture.get().getResult().getBatchFailedFlowsOutput().get(0).getFlowId().getValue());
+ Assert.assertEquals(FLOW_ID_VALUE_2, resultFuture.get().getResult().getBatchFailedFlowsOutput().get(1).getFlowId().getValue());
+ Assert.assertEquals(2, resultFuture.get().getErrors().size());
+
+ final InOrder inOrder = Mockito.inOrder(salFlowService, transactionService);
+
+ inOrder.verify(salFlowService, Mockito.times(2)).addFlow(addFlowInputCpt.capture());
+ final List<AddFlowInput> allValues = addFlowInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42, allValues.get(0).getPriority().longValue());
+ Assert.assertEquals(43, allValues.get(1).getPriority().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testUpdateFlowsBatch_success() throws Exception {
+ Mockito.when(salFlowService.updateFlow(Matchers.<UpdateFlowInput>any()))
+ .thenReturn(RpcResultBuilder.success(new UpdateFlowOutputBuilder().build()).buildFuture());
+
+ final UpdateFlowsBatchInput input = new UpdateFlowsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchUpdateFlows(Lists.newArrayList(
+ createEmptyBatchUpdateFlow(FLOW_ID_VALUE_1, 42),
+ createEmptyBatchUpdateFlow(FLOW_ID_VALUE_2, 44)))
+ .build();
+
+ final Future<RpcResult<UpdateFlowsBatchOutput>> resultFuture = salFlowsBatchService.updateFlowsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertTrue(resultFuture.get().isSuccessful());
+
+ final InOrder inOrder = Mockito.inOrder(salFlowService, transactionService);
+
+ inOrder.verify(salFlowService, Mockito.times(2)).updateFlow(updateFlowInputCpt.capture());
+ final List<UpdateFlowInput> allValues = updateFlowInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42, allValues.get(0).getOriginalFlow().getPriority().longValue());
+ Assert.assertEquals(43, allValues.get(0).getUpdatedFlow().getPriority().longValue());
+ Assert.assertEquals(44, allValues.get(1).getOriginalFlow().getPriority().longValue());
+ Assert.assertEquals(45, allValues.get(1).getUpdatedFlow().getPriority().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testUpdateFlowsBatch_failure() throws Exception {
+ Mockito.when(salFlowService.updateFlow(Matchers.<UpdateFlowInput>any()))
+ .thenReturn(RpcResultBuilder.<UpdateFlowOutput>failed().withError(RpcError.ErrorType.APPLICATION, "ut-flowUpdateError")
+ .buildFuture());
+
+ final UpdateFlowsBatchInput input = new UpdateFlowsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchUpdateFlows(Lists.newArrayList(
+ createEmptyBatchUpdateFlow(FLOW_ID_VALUE_1, 42),
+ createEmptyBatchUpdateFlow(FLOW_ID_VALUE_2, 44)))
+ .build();
+
+ final Future<RpcResult<UpdateFlowsBatchOutput>> resultFuture = salFlowsBatchService.updateFlowsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertFalse(resultFuture.get().isSuccessful());
+ Assert.assertFalse(resultFuture.get().isSuccessful());
+ Assert.assertEquals(2, resultFuture.get().getResult().getBatchFailedFlowsOutput().size());
+ Assert.assertEquals(FLOW_ID_VALUE_1, resultFuture.get().getResult().getBatchFailedFlowsOutput().get(0).getFlowId().getValue());
+ Assert.assertEquals(FLOW_ID_VALUE_2, resultFuture.get().getResult().getBatchFailedFlowsOutput().get(1).getFlowId().getValue());
+ Assert.assertEquals(2, resultFuture.get().getErrors().size());
+
+ final InOrder inOrder = Mockito.inOrder(salFlowService, transactionService);
+ inOrder.verify(salFlowService, Mockito.times(2)).updateFlow(updateFlowInputCpt.capture());
+ final List<UpdateFlowInput> allValues = updateFlowInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42, allValues.get(0).getOriginalFlow().getPriority().longValue());
+ Assert.assertEquals(43, allValues.get(0).getUpdatedFlow().getPriority().longValue());
+ Assert.assertEquals(44, allValues.get(1).getOriginalFlow().getPriority().longValue());
+ Assert.assertEquals(45, allValues.get(1).getUpdatedFlow().getPriority().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services;
+
+import com.google.common.collect.Lists;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.InOrder;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.FlowCapableTransactionService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.SendBarrierInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.add.groups.batch.input.BatchAddGroups;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.add.groups.batch.input.BatchAddGroupsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.input.update.grouping.OriginalBatchedGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.input.update.grouping.UpdatedBatchedGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.remove.groups.batch.input.BatchRemoveGroups;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.remove.groups.batch.input.BatchRemoveGroupsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.update.groups.batch.input.BatchUpdateGroups;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.update.groups.batch.input.BatchUpdateGroupsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * Test for {@link SalGroupsBatchServiceImpl}.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class SalGroupsBatchServiceImplTest {
+
+ public static final NodeId NODE_ID = new NodeId("ut-dummy-node");
+ public static final NodeKey NODE_KEY = new NodeKey(NODE_ID);
+ public static final NodeRef NODE_REF = new NodeRef(InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY));
+
+ @Mock
+ private SalGroupService salGroupService;
+ @Mock
+ private FlowCapableTransactionService transactionService;
+ @Captor
+ private ArgumentCaptor<RemoveGroupInput> removeGroupInputCpt;
+ @Captor
+ private ArgumentCaptor<UpdateGroupInput> updateGroupInputCpt;
+ @Captor
+ private ArgumentCaptor<AddGroupInput> addGroupInputCpt;
+
+ private SalGroupsBatchServiceImpl salGroupsBatchService;
+
+
+ @Before
+ public void setUp() throws Exception {
+ salGroupsBatchService = new SalGroupsBatchServiceImpl(salGroupService, transactionService);
+
+ Mockito.when(transactionService.sendBarrier(Matchers.<SendBarrierInput>any()))
+ .thenReturn(RpcResultBuilder.<Void>success().buildFuture());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ Mockito.verifyNoMoreInteractions(salGroupService, transactionService);
+ }
+
+ @Test
+ public void testUpdateGroupsBatch_success() throws Exception {
+ Mockito.when(salGroupService.updateGroup(Mockito.<UpdateGroupInput>any()))
+ .thenReturn(RpcResultBuilder.success(new UpdateGroupOutputBuilder().build()).buildFuture());
+
+ final UpdateGroupsBatchInput input = new UpdateGroupsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchUpdateGroups(Lists.newArrayList(
+ createEmptyBatchUpdateGroup(42L),
+ createEmptyBatchUpdateGroup(44L)))
+ .build();
+
+ final Future<RpcResult<UpdateGroupsBatchOutput>> resultFuture = salGroupsBatchService.updateGroupsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertTrue(resultFuture.get().isSuccessful());
+
+ final InOrder inOrder = Mockito.inOrder(salGroupService, transactionService);
+ inOrder.verify(salGroupService, Mockito.times(2)).updateGroup(updateGroupInputCpt.capture());
+ final List<UpdateGroupInput> allValues = updateGroupInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42, allValues.get(0).getOriginalGroup().getGroupId().getValue().longValue());
+ Assert.assertEquals(43, allValues.get(0).getUpdatedGroup().getGroupId().getValue().longValue());
+ Assert.assertEquals(44, allValues.get(1).getOriginalGroup().getGroupId().getValue().longValue());
+ Assert.assertEquals(45, allValues.get(1).getUpdatedGroup().getGroupId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testUpdateGroupsBatch_failure() throws Exception {
+ Mockito.when(salGroupService.updateGroup(Mockito.<UpdateGroupInput>any()))
+ .thenReturn(RpcResultBuilder.<UpdateGroupOutput>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ur-groupUpdateError")
+ .buildFuture());
+
+ final UpdateGroupsBatchInput input = new UpdateGroupsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchUpdateGroups(Lists.newArrayList(
+ createEmptyBatchUpdateGroup(42L),
+ createEmptyBatchUpdateGroup(44L)))
+ .build();
+
+ final Future<RpcResult<UpdateGroupsBatchOutput>> resultFuture = salGroupsBatchService.updateGroupsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertFalse(resultFuture.get().isSuccessful());
+ Assert.assertEquals(2, resultFuture.get().getResult().getBatchFailedGroupsOutput().size());
+ Assert.assertEquals(43L, resultFuture.get().getResult().getBatchFailedGroupsOutput().get(0).getGroupId().getValue().longValue());
+ Assert.assertEquals(45L, resultFuture.get().getResult().getBatchFailedGroupsOutput().get(1).getGroupId().getValue().longValue());
+ Assert.assertEquals(2, resultFuture.get().getErrors().size());
+
+
+ final InOrder inOrder = Mockito.inOrder(salGroupService, transactionService);
+ inOrder.verify(salGroupService, Mockito.times(2)).updateGroup(updateGroupInputCpt.capture());
+ final List<UpdateGroupInput> allValues = updateGroupInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42, allValues.get(0).getOriginalGroup().getGroupId().getValue().longValue());
+ Assert.assertEquals(43, allValues.get(0).getUpdatedGroup().getGroupId().getValue().longValue());
+ Assert.assertEquals(44, allValues.get(1).getOriginalGroup().getGroupId().getValue().longValue());
+ Assert.assertEquals(45, allValues.get(1).getUpdatedGroup().getGroupId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+
+ @Test
+ public void testAddGroupsBatch_success() throws Exception {
+ Mockito.when(salGroupService.addGroup(Mockito.<AddGroupInput>any()))
+ .thenReturn(RpcResultBuilder.success(new AddGroupOutputBuilder().build()).buildFuture());
+
+ final AddGroupsBatchInput input = new AddGroupsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchAddGroups(Lists.newArrayList(
+ createEmptyBatchAddGroup(42L),
+ createEmptyBatchAddGroup(43L)))
+ .build();
+
+ final Future<RpcResult<AddGroupsBatchOutput>> resultFuture = salGroupsBatchService.addGroupsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertTrue(resultFuture.get().isSuccessful());
+
+ final InOrder inOrder = Mockito.inOrder(salGroupService, transactionService);
+ inOrder.verify(salGroupService, Mockito.times(2)).addGroup(addGroupInputCpt.capture());
+ final List<AddGroupInput> allValues = addGroupInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42L, allValues.get(0).getGroupId().getValue().longValue());
+ Assert.assertEquals(43L, allValues.get(1).getGroupId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testAddGroupsBatch_failure() throws Exception {
+ Mockito.when(salGroupService.addGroup(Mockito.<AddGroupInput>any()))
+ .thenReturn(RpcResultBuilder.<AddGroupOutput>failed().withError(RpcError.ErrorType.APPLICATION, "ut-groupAddError")
+ .buildFuture());
+
+ final AddGroupsBatchInput input = new AddGroupsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchAddGroups(Lists.newArrayList(
+ createEmptyBatchAddGroup(42L),
+ createEmptyBatchAddGroup(43L)))
+ .build();
+
+ final Future<RpcResult<AddGroupsBatchOutput>> resultFuture = salGroupsBatchService.addGroupsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertFalse(resultFuture.get().isSuccessful());
+ Assert.assertEquals(2, resultFuture.get().getResult().getBatchFailedGroupsOutput().size());
+ Assert.assertEquals(42L, resultFuture.get().getResult().getBatchFailedGroupsOutput().get(0).getGroupId().getValue().longValue());
+ Assert.assertEquals(43L, resultFuture.get().getResult().getBatchFailedGroupsOutput().get(1).getGroupId().getValue().longValue());
+ Assert.assertEquals(2, resultFuture.get().getErrors().size());
+
+
+ final InOrder inOrder = Mockito.inOrder(salGroupService, transactionService);
+ inOrder.verify(salGroupService, Mockito.times(2)).addGroup(addGroupInputCpt.capture());
+ final List<AddGroupInput> allValues = addGroupInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42L, allValues.get(0).getGroupId().getValue().longValue());
+ Assert.assertEquals(43L, allValues.get(1).getGroupId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testRemoveGroupsBatch_success() throws Exception {
+ Mockito.when(salGroupService.removeGroup(Mockito.<RemoveGroupInput>any()))
+ .thenReturn(RpcResultBuilder.success(new RemoveGroupOutputBuilder().build()).buildFuture());
+
+ final RemoveGroupsBatchInput input = new RemoveGroupsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchRemoveGroups(Lists.newArrayList(
+ createEmptyBatchRemoveGroup(42L),
+ createEmptyBatchRemoveGroup(43L)))
+ .build();
+
+ final Future<RpcResult<RemoveGroupsBatchOutput>> resultFuture = salGroupsBatchService.removeGroupsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertTrue(resultFuture.get().isSuccessful());
+
+ final InOrder inOrder = Mockito.inOrder(salGroupService, transactionService);
+
+ inOrder.verify(salGroupService, Mockito.times(2)).removeGroup(removeGroupInputCpt.capture());
+ final List<RemoveGroupInput> allValues = removeGroupInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42L, allValues.get(0).getGroupId().getValue().longValue());
+ Assert.assertEquals(43L, allValues.get(1).getGroupId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testRemoveGroupsBatch_failure() throws Exception {
+ Mockito.when(salGroupService.removeGroup(Mockito.<RemoveGroupInput>any()))
+ .thenReturn(RpcResultBuilder.<RemoveGroupOutput>failed().withError(RpcError.ErrorType.APPLICATION, "ut-groupRemoveError")
+ .buildFuture());
+
+ final RemoveGroupsBatchInput input = new RemoveGroupsBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchRemoveGroups(Lists.newArrayList(
+ createEmptyBatchRemoveGroup(42L),
+ createEmptyBatchRemoveGroup(43L)))
+ .build();
+
+ final Future<RpcResult<RemoveGroupsBatchOutput>> resultFuture = salGroupsBatchService.removeGroupsBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertFalse(resultFuture.get().isSuccessful());
+ Assert.assertEquals(2, resultFuture.get().getResult().getBatchFailedGroupsOutput().size());
+ Assert.assertEquals(42L, resultFuture.get().getResult().getBatchFailedGroupsOutput().get(0).getGroupId().getValue().longValue());
+ Assert.assertEquals(43L, resultFuture.get().getResult().getBatchFailedGroupsOutput().get(1).getGroupId().getValue().longValue());
+ Assert.assertEquals(2, resultFuture.get().getErrors().size());
+
+ final InOrder inOrder = Mockito.inOrder(salGroupService, transactionService);
+
+ inOrder.verify(salGroupService, Mockito.times(2)).removeGroup(removeGroupInputCpt.capture());
+ final List<RemoveGroupInput> allValues = removeGroupInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42L, allValues.get(0).getGroupId().getValue().longValue());
+ Assert.assertEquals(43L, allValues.get(1).getGroupId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ private static BatchAddGroups createEmptyBatchAddGroup(final long groupIdValue) {
+ return new BatchAddGroupsBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build();
+ }
+
+ private static BatchRemoveGroups createEmptyBatchRemoveGroup(final long groupIdValue) {
+ return new BatchRemoveGroupsBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build();
+ }
+
+ private static BatchUpdateGroups createEmptyBatchUpdateGroup(final long groupIdValue) {
+ return new BatchUpdateGroupsBuilder()
+ .setOriginalBatchedGroup(new OriginalBatchedGroupBuilder(createEmptyBatchAddGroup(groupIdValue)).build())
+ .setUpdatedBatchedGroup(new UpdatedBatchedGroupBuilder(createEmptyBatchAddGroup(groupIdValue+1)).build())
+ .build();
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services;
+
+import com.google.common.collect.Lists;
+import java.util.List;
+import java.util.concurrent.Future;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.InOrder;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.FlowCapableTransactionService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.SendBarrierInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.SalMeterService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.add.meters.batch.input.BatchAddMeters;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.add.meters.batch.input.BatchAddMetersBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.input.update.grouping.OriginalBatchedMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.input.update.grouping.UpdatedBatchedMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.remove.meters.batch.input.BatchRemoveMeters;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.remove.meters.batch.input.BatchRemoveMetersBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.update.meters.batch.input.BatchUpdateMeters;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.update.meters.batch.input.BatchUpdateMetersBuilder;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * Test for {@link SalMetersBatchServiceImpl}.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class SalMetersBatchServiceImplTest {
+
+ public static final NodeId NODE_ID = new NodeId("ut-dummy-node");
+ public static final NodeKey NODE_KEY = new NodeKey(NODE_ID);
+ public static final NodeRef NODE_REF = new NodeRef(InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY));
+
+ @Mock
+ private SalMeterService salMeterService;
+ @Mock
+ private FlowCapableTransactionService transactionService;
+ @Captor
+ private ArgumentCaptor<RemoveMeterInput> removeMeterInputCpt;
+ @Captor
+ private ArgumentCaptor<UpdateMeterInput> updateMeterInputCpt;
+ @Captor
+ private ArgumentCaptor<AddMeterInput> addMeterInputCpt;
+
+ private SalMetersBatchServiceImpl salMetersBatchService;
+
+ @Before
+ public void setUp() throws Exception {
+ salMetersBatchService = new SalMetersBatchServiceImpl(salMeterService, transactionService);
+
+ Mockito.when(transactionService.sendBarrier(Matchers.<SendBarrierInput>any()))
+ .thenReturn(RpcResultBuilder.<Void>success().buildFuture());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ Mockito.verifyNoMoreInteractions(salMeterService, transactionService);
+ }
+
+ @Test
+ public void testUpdateMetersBatch_success() throws Exception {
+ Mockito.when(salMeterService.updateMeter(Mockito.<UpdateMeterInput>any()))
+ .thenReturn(RpcResultBuilder.success(new UpdateMeterOutputBuilder().build()).buildFuture());
+
+ final UpdateMetersBatchInput input = new UpdateMetersBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchUpdateMeters(Lists.newArrayList(
+ createEmptyBatchUpdateMeter(42L),
+ createEmptyBatchUpdateMeter(44L)))
+ .build();
+
+ final Future<RpcResult<UpdateMetersBatchOutput>> resultFuture = salMetersBatchService.updateMetersBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertTrue(resultFuture.get().isSuccessful());
+
+ final InOrder inOrder = Mockito.inOrder(salMeterService, transactionService);
+ inOrder.verify(salMeterService, Mockito.times(2)).updateMeter(updateMeterInputCpt.capture());
+ final List<UpdateMeterInput> allValues = updateMeterInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42, allValues.get(0).getOriginalMeter().getMeterId().getValue().longValue());
+ Assert.assertEquals(43, allValues.get(0).getUpdatedMeter().getMeterId().getValue().longValue());
+ Assert.assertEquals(44, allValues.get(1).getOriginalMeter().getMeterId().getValue().longValue());
+ Assert.assertEquals(45, allValues.get(1).getUpdatedMeter().getMeterId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testUpdateMetersBatch_failure() throws Exception {
+ Mockito.when(salMeterService.updateMeter(Mockito.<UpdateMeterInput>any()))
+ .thenReturn(RpcResultBuilder.<UpdateMeterOutput>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ur-groupUpdateError")
+ .buildFuture());
+
+ final UpdateMetersBatchInput input = new UpdateMetersBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchUpdateMeters(Lists.newArrayList(
+ createEmptyBatchUpdateMeter(42L),
+ createEmptyBatchUpdateMeter(44L)))
+ .build();
+
+ final Future<RpcResult<UpdateMetersBatchOutput>> resultFuture = salMetersBatchService.updateMetersBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertFalse(resultFuture.get().isSuccessful());
+ Assert.assertEquals(2, resultFuture.get().getResult().getBatchFailedMetersOutput().size());
+ Assert.assertEquals(43L, resultFuture.get().getResult().getBatchFailedMetersOutput().get(0).getMeterId().getValue().longValue());
+ Assert.assertEquals(45L, resultFuture.get().getResult().getBatchFailedMetersOutput().get(1).getMeterId().getValue().longValue());
+ Assert.assertEquals(2, resultFuture.get().getErrors().size());
+
+
+ final InOrder inOrder = Mockito.inOrder(salMeterService, transactionService);
+ inOrder.verify(salMeterService, Mockito.times(2)).updateMeter(updateMeterInputCpt.capture());
+ final List<UpdateMeterInput> allValues = updateMeterInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42, allValues.get(0).getOriginalMeter().getMeterId().getValue().longValue());
+ Assert.assertEquals(43, allValues.get(0).getUpdatedMeter().getMeterId().getValue().longValue());
+ Assert.assertEquals(44, allValues.get(1).getOriginalMeter().getMeterId().getValue().longValue());
+ Assert.assertEquals(45, allValues.get(1).getUpdatedMeter().getMeterId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+
+ @Test
+ public void testAddMetersBatch_success() throws Exception {
+ Mockito.when(salMeterService.addMeter(Mockito.<AddMeterInput>any()))
+ .thenReturn(RpcResultBuilder.success(new AddMeterOutputBuilder().build()).buildFuture());
+
+ final AddMetersBatchInput input = new AddMetersBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchAddMeters(Lists.newArrayList(
+ createEmptyBatchAddMeter(42L),
+ createEmptyBatchAddMeter(43L)))
+ .build();
+
+ final Future<RpcResult<AddMetersBatchOutput>> resultFuture = salMetersBatchService.addMetersBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertTrue(resultFuture.get().isSuccessful());
+
+ final InOrder inOrder = Mockito.inOrder(salMeterService, transactionService);
+ inOrder.verify(salMeterService, Mockito.times(2)).addMeter(addMeterInputCpt.capture());
+ final List<AddMeterInput> allValues = addMeterInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42L, allValues.get(0).getMeterId().getValue().longValue());
+ Assert.assertEquals(43L, allValues.get(1).getMeterId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testAddMetersBatch_failure() throws Exception {
+ Mockito.when(salMeterService.addMeter(Mockito.<AddMeterInput>any()))
+ .thenReturn(RpcResultBuilder.<AddMeterOutput>failed().withError(RpcError.ErrorType.APPLICATION, "ut-groupAddError")
+ .buildFuture());
+
+ final AddMetersBatchInput input = new AddMetersBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchAddMeters(Lists.newArrayList(
+ createEmptyBatchAddMeter(42L),
+ createEmptyBatchAddMeter(43L)))
+ .build();
+
+ final Future<RpcResult<AddMetersBatchOutput>> resultFuture = salMetersBatchService.addMetersBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertFalse(resultFuture.get().isSuccessful());
+ Assert.assertEquals(2, resultFuture.get().getResult().getBatchFailedMetersOutput().size());
+ Assert.assertEquals(42L, resultFuture.get().getResult().getBatchFailedMetersOutput().get(0).getMeterId().getValue().longValue());
+ Assert.assertEquals(43L, resultFuture.get().getResult().getBatchFailedMetersOutput().get(1).getMeterId().getValue().longValue());
+ Assert.assertEquals(2, resultFuture.get().getErrors().size());
+
+
+ final InOrder inOrder = Mockito.inOrder(salMeterService, transactionService);
+ inOrder.verify(salMeterService, Mockito.times(2)).addMeter(addMeterInputCpt.capture());
+ final List<AddMeterInput> allValues = addMeterInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42L, allValues.get(0).getMeterId().getValue().longValue());
+ Assert.assertEquals(43L, allValues.get(1).getMeterId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testRemoveMetersBatch_success() throws Exception {
+ Mockito.when(salMeterService.removeMeter(Mockito.<RemoveMeterInput>any()))
+ .thenReturn(RpcResultBuilder.success(new RemoveMeterOutputBuilder().build()).buildFuture());
+
+ final RemoveMetersBatchInput input = new RemoveMetersBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchRemoveMeters(Lists.newArrayList(
+ createEmptyBatchRemoveMeter(42L),
+ createEmptyBatchRemoveMeter(43L)))
+ .build();
+
+ final Future<RpcResult<RemoveMetersBatchOutput>> resultFuture = salMetersBatchService.removeMetersBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertTrue(resultFuture.get().isSuccessful());
+
+ final InOrder inOrder = Mockito.inOrder(salMeterService, transactionService);
+
+ inOrder.verify(salMeterService, Mockito.times(2)).removeMeter(removeMeterInputCpt.capture());
+ final List<RemoveMeterInput> allValues = removeMeterInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42L, allValues.get(0).getMeterId().getValue().longValue());
+ Assert.assertEquals(43L, allValues.get(1).getMeterId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ @Test
+ public void testRemoveMetersBatch_failure() throws Exception {
+ Mockito.when(salMeterService.removeMeter(Mockito.<RemoveMeterInput>any()))
+ .thenReturn(RpcResultBuilder.<RemoveMeterOutput>failed().withError(RpcError.ErrorType.APPLICATION, "ut-groupRemoveError")
+ .buildFuture());
+
+ final RemoveMetersBatchInput input = new RemoveMetersBatchInputBuilder()
+ .setNode(NODE_REF)
+ .setBarrierAfter(true)
+ .setBatchRemoveMeters(Lists.newArrayList(
+ createEmptyBatchRemoveMeter(42L),
+ createEmptyBatchRemoveMeter(43L)))
+ .build();
+
+ final Future<RpcResult<RemoveMetersBatchOutput>> resultFuture = salMetersBatchService.removeMetersBatch(input);
+
+ Assert.assertTrue(resultFuture.isDone());
+ Assert.assertFalse(resultFuture.get().isSuccessful());
+ Assert.assertEquals(2, resultFuture.get().getResult().getBatchFailedMetersOutput().size());
+ Assert.assertEquals(42L, resultFuture.get().getResult().getBatchFailedMetersOutput().get(0).getMeterId().getValue().longValue());
+ Assert.assertEquals(43L, resultFuture.get().getResult().getBatchFailedMetersOutput().get(1).getMeterId().getValue().longValue());
+ Assert.assertEquals(2, resultFuture.get().getErrors().size());
+
+ final InOrder inOrder = Mockito.inOrder(salMeterService, transactionService);
+
+ inOrder.verify(salMeterService, Mockito.times(2)).removeMeter(removeMeterInputCpt.capture());
+ final List<RemoveMeterInput> allValues = removeMeterInputCpt.getAllValues();
+ Assert.assertEquals(2, allValues.size());
+ Assert.assertEquals(42L, allValues.get(0).getMeterId().getValue().longValue());
+ Assert.assertEquals(43L, allValues.get(1).getMeterId().getValue().longValue());
+
+ inOrder.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ }
+
+ private static BatchAddMeters createEmptyBatchAddMeter(final long groupIdValue) {
+ return new BatchAddMetersBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build();
+ }
+
+ private static BatchRemoveMeters createEmptyBatchRemoveMeter(final long groupIdValue) {
+ return new BatchRemoveMetersBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build();
+ }
+
+ private static BatchUpdateMeters createEmptyBatchUpdateMeter(final long groupIdValue) {
+ return new BatchUpdateMetersBuilder()
+ .setOriginalBatchedMeter(new OriginalBatchedMeterBuilder(createEmptyBatchAddMeter(groupIdValue)).build())
+ .setUpdatedBatchedMeter(new UpdatedBatchedMeterBuilder(createEmptyBatchAddMeter(groupIdValue + 1)).build())
+ .build();
+ }
+}
\ No newline at end of file
when(mockedDeviceContext.getMessageSpy()).thenReturn(mockedMessagSpy);
when(mockedDeviceContext.getDeviceFlowRegistry()).thenReturn(new DeviceFlowRegistryImpl());
when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
- when(mockedDeviceContext.getTimer()).thenReturn(mock(HashedWheelTimer.class));
when(mockedDeviceContext.getMultiMsgCollector(Matchers.<RequestContext<List<MultipartReply>>>any())).thenReturn(multiMessageCollector);
setup();
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services.batch;
+
+import com.google.common.collect.Lists;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.flow._case.FlatBatchAddFlow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.flow._case.FlatBatchAddFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.flow._case.FlatBatchRemoveFlow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.flow._case.FlatBatchRemoveFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.flow._case.FlatBatchUpdateFlow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.flow._case.FlatBatchUpdateFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailure;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailureBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureFlowIdCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureFlowIdCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.BatchFlowOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.RemoveFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.UpdateFlowsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.output.list.grouping.BatchFailedFlowsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.output.list.grouping.BatchFailedFlowsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * Test for {@link FlatBatchFlowAdapters}.
+ */
+public class FlatBatchFlowAdaptersTest {
+
+ private static final NodeId NODE_ID = new NodeId("ut-node-id");
+ private static final InstanceIdentifier<Node> NODE_II = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(NODE_ID));
+ private static final NodeRef NODE_REF = new NodeRef(NODE_II);
+
+ @Test
+ public void testAdaptFlatBatchAddFlow() throws Exception {
+ final BatchPlanStep planStep = new BatchPlanStep(BatchStepType.FLOW_ADD);
+ planStep.setBarrierAfter(true);
+ planStep.getTaskBag().addAll(Lists.newArrayList(
+ createAddFlowBatch("1"),
+ createAddFlowBatch("2")));
+
+ final AddFlowsBatchInput addFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchAddFlow(planStep, NODE_REF);
+
+ Assert.assertTrue(addFlowsBatchInput.isBarrierAfter());
+ Assert.assertEquals(2, addFlowsBatchInput.getBatchAddFlows().size());
+ Assert.assertEquals("1", addFlowsBatchInput.getBatchAddFlows().get(0).getFlowId().getValue());
+ Assert.assertEquals("2", addFlowsBatchInput.getBatchAddFlows().get(1).getFlowId().getValue());
+ }
+
+ private FlatBatchAddFlow createAddFlowBatch(final String flowIdValue) {
+ return new FlatBatchAddFlowBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .build();
+ }
+
+ private FlatBatchRemoveFlow createRemoveFlowBatch(final String flowIdValue) {
+ return new FlatBatchRemoveFlowBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .build();
+ }
+
+ private FlatBatchUpdateFlow createUpdateFlowBatch(final String flowIdValue) {
+ return new FlatBatchUpdateFlowBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .build();
+ }
+
+ @Test
+ public void testAdaptFlatBatchRemoveFlow() throws Exception {
+ final BatchPlanStep planStep = new BatchPlanStep(BatchStepType.FLOW_REMOVE);
+ planStep.setBarrierAfter(true);
+ planStep.getTaskBag().addAll(Lists.newArrayList(
+ createRemoveFlowBatch("1"),
+ createRemoveFlowBatch("2")));
+
+ final RemoveFlowsBatchInput removeFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchRemoveFlow(planStep, NODE_REF);
+
+ Assert.assertTrue(removeFlowsBatchInput.isBarrierAfter());
+ Assert.assertEquals(2, removeFlowsBatchInput.getBatchRemoveFlows().size());
+ Assert.assertEquals("1", removeFlowsBatchInput.getBatchRemoveFlows().get(0).getFlowId().getValue());
+ Assert.assertEquals("2", removeFlowsBatchInput.getBatchRemoveFlows().get(1).getFlowId().getValue());
+ }
+
+ @Test
+ public void testAdaptFlatBatchUpdateFlow() throws Exception {
+ final BatchPlanStep planStep = new BatchPlanStep(BatchStepType.FLOW_UPDATE);
+ planStep.setBarrierAfter(true);
+ planStep.getTaskBag().addAll(Lists.newArrayList(
+ createUpdateFlowBatch("1"),
+ createUpdateFlowBatch("2")));
+
+ final UpdateFlowsBatchInput updateFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchUpdateFlow(planStep, NODE_REF);
+
+ Assert.assertTrue(updateFlowsBatchInput.isBarrierAfter());
+ Assert.assertEquals(2, updateFlowsBatchInput.getBatchUpdateFlows().size());
+ Assert.assertEquals("1", updateFlowsBatchInput.getBatchUpdateFlows().get(0).getFlowId().getValue());
+ Assert.assertEquals("2", updateFlowsBatchInput.getBatchUpdateFlows().get(1).getFlowId().getValue());
+ }
+
+ @Test
+ public void testCreateBatchFlowChainingFunction_failures() throws Exception {
+ final RpcResult<ProcessFlatBatchOutput> chainInput = RpcResultBuilder.<ProcessFlatBatchOutput>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-chainError")
+ .withResult(new ProcessFlatBatchOutputBuilder()
+ .setBatchFailure(Lists.newArrayList(
+ createChainFailure(0, "f1"),
+ createChainFailure(1, "f2")))
+ .build())
+ .build();
+
+ final RpcResult<BatchFlowOutputListGrouping> input = RpcResultBuilder.<BatchFlowOutputListGrouping>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-flowError")
+ .withResult(new AddFlowsBatchOutputBuilder()
+ .setBatchFailedFlowsOutput(Lists.newArrayList(
+ createBatchFailedFlowsOutput(0, "f3"),
+ createBatchFailedFlowsOutput(1, "f4")
+ ))
+ .build())
+ .build();
+
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = FlatBatchFlowAdapters
+ .createBatchFlowChainingFunction(chainInput, 2).apply(input);
+
+ Assert.assertFalse(rpcResult.isSuccessful());
+ Assert.assertEquals(2, rpcResult.getErrors().size());
+ Assert.assertEquals(4, rpcResult.getResult().getBatchFailure().size());
+ Assert.assertEquals(0, rpcResult.getResult().getBatchFailure().get(0).getBatchOrder().intValue());
+ Assert.assertEquals(1, rpcResult.getResult().getBatchFailure().get(1).getBatchOrder().intValue());
+ Assert.assertEquals(2, rpcResult.getResult().getBatchFailure().get(2).getBatchOrder().intValue());
+ Assert.assertEquals(3, rpcResult.getResult().getBatchFailure().get(3).getBatchOrder().intValue());
+ Assert.assertEquals("f4", ((FlatBatchFailureFlowIdCase) rpcResult.getResult().getBatchFailure().get(3).getBatchItemIdChoice()).getFlowId().getValue());
+ }
+
+ @Test
+ public void testCreateBatchFlowChainingFunction_successes() throws Exception {
+ final RpcResult<ProcessFlatBatchOutput> chainInput = RpcResultBuilder
+ .success(new ProcessFlatBatchOutputBuilder().build())
+ .build();
+ final RpcResult<BatchFlowOutputListGrouping> input = RpcResultBuilder
+ .<BatchFlowOutputListGrouping>success(new AddFlowsBatchOutputBuilder().build())
+ .build();
+
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = FlatBatchFlowAdapters
+ .createBatchFlowChainingFunction(chainInput, 0).apply(input);
+
+ Assert.assertTrue(rpcResult.isSuccessful());
+ Assert.assertEquals(0, rpcResult.getErrors().size());
+ Assert.assertEquals(0, rpcResult.getResult().getBatchFailure().size());
+ }
+
+ private BatchFailedFlowsOutput createBatchFailedFlowsOutput(final Integer batchOrder, final String flowIdValue) {
+ return new BatchFailedFlowsOutputBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .setBatchOrder(batchOrder)
+ .build();
+ }
+
+ private BatchFailure createChainFailure(final int batchOrder, final String flowIdValue) {
+ return new BatchFailureBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchItemIdChoice(new FlatBatchFailureFlowIdCaseBuilder()
+ .setFlowId(new FlowId(flowIdValue))
+ .build())
+ .build();
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services.batch;
+
+import com.google.common.collect.Lists;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.group._case.FlatBatchAddGroup;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.group._case.FlatBatchAddGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.group._case.FlatBatchRemoveGroup;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.group._case.FlatBatchRemoveGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.group._case.FlatBatchUpdateGroup;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.group._case.FlatBatchUpdateGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailure;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailureBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureGroupIdCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureGroupIdCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.BatchGroupOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.RemoveGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.UpdateGroupsBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.input.update.grouping.OriginalBatchedGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.input.update.grouping.UpdatedBatchedGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.output.list.grouping.BatchFailedGroupsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.output.list.grouping.BatchFailedGroupsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * Test for {@link FlatBatchGroupAdapters}.
+ */
+public class FlatBatchGroupAdaptersTest {
+
+ private static final NodeId NODE_ID = new NodeId("ut-node-id");
+ private static final InstanceIdentifier<Node> NODE_II = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(NODE_ID));
+ private static final NodeRef NODE_REF = new NodeRef(NODE_II);
+
+ @Test
+ public void testAdaptFlatBatchAddGroup() throws Exception {
+ final BatchPlanStep planStep = new BatchPlanStep(BatchStepType.FLOW_ADD);
+ planStep.setBarrierAfter(true);
+ planStep.getTaskBag().addAll(Lists.newArrayList(
+ createAddGroupBatch(1L),
+ createAddGroupBatch(2L)));
+
+ final AddGroupsBatchInput addGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchAddGroup(planStep, NODE_REF);
+
+ Assert.assertTrue(addGroupsBatchInput.isBarrierAfter());
+ Assert.assertEquals(2, addGroupsBatchInput.getBatchAddGroups().size());
+ Assert.assertEquals(1L, addGroupsBatchInput.getBatchAddGroups().get(0).getGroupId().getValue().longValue());
+ Assert.assertEquals(2L, addGroupsBatchInput.getBatchAddGroups().get(1).getGroupId().getValue().longValue());
+ }
+
+ private FlatBatchAddGroup createAddGroupBatch(final long groupIdValue) {
+ return new FlatBatchAddGroupBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build();
+ }
+
+ private FlatBatchRemoveGroup createRemoveGroupBatch(final long groupIdValue) {
+ return new FlatBatchRemoveGroupBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build();
+ }
+
+ private FlatBatchUpdateGroup createUpdateGroupBatch(final long groupIdValue) {
+ return new FlatBatchUpdateGroupBuilder()
+ .setOriginalBatchedGroup(new OriginalBatchedGroupBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build())
+ .setUpdatedBatchedGroup(new UpdatedBatchedGroupBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build())
+ .build();
+ }
+
+ @Test
+ public void testAdaptFlatBatchRemoveGroup() throws Exception {
+ final BatchPlanStep planStep = new BatchPlanStep(BatchStepType.FLOW_REMOVE);
+ planStep.setBarrierAfter(true);
+ planStep.getTaskBag().addAll(Lists.newArrayList(
+ createRemoveGroupBatch(1L),
+ createRemoveGroupBatch(2L)));
+
+ final RemoveGroupsBatchInput removeGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchRemoveGroup(planStep, NODE_REF);
+
+ Assert.assertTrue(removeGroupsBatchInput.isBarrierAfter());
+ Assert.assertEquals(2, removeGroupsBatchInput.getBatchRemoveGroups().size());
+ Assert.assertEquals(1L, removeGroupsBatchInput.getBatchRemoveGroups().get(0).getGroupId().getValue().longValue());
+ Assert.assertEquals(2L, removeGroupsBatchInput.getBatchRemoveGroups().get(1).getGroupId().getValue().longValue());
+ }
+
+ @Test
+ public void testAdaptFlatBatchUpdateGroup() throws Exception {
+ final BatchPlanStep planStep = new BatchPlanStep(BatchStepType.FLOW_UPDATE);
+ planStep.setBarrierAfter(true);
+ planStep.getTaskBag().addAll(Lists.newArrayList(
+ createUpdateGroupBatch(1L),
+ createUpdateGroupBatch(2L)));
+
+ final UpdateGroupsBatchInput updateGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchUpdateGroup(planStep, NODE_REF);
+
+ Assert.assertTrue(updateGroupsBatchInput.isBarrierAfter());
+ Assert.assertEquals(2, updateGroupsBatchInput.getBatchUpdateGroups().size());
+ Assert.assertEquals(1L, updateGroupsBatchInput.getBatchUpdateGroups().get(0).getUpdatedBatchedGroup().getGroupId().getValue().longValue());
+ Assert.assertEquals(2L, updateGroupsBatchInput.getBatchUpdateGroups().get(1).getUpdatedBatchedGroup().getGroupId().getValue().longValue());
+ }
+
+ @Test
+ public void testCreateBatchGroupChainingFunction_failures() throws Exception {
+ final RpcResult<ProcessFlatBatchOutput> chainInput = RpcResultBuilder.<ProcessFlatBatchOutput>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-chainError")
+ .withResult(new ProcessFlatBatchOutputBuilder()
+ .setBatchFailure(Lists.newArrayList(
+ createChainFailure(0, 1L),
+ createChainFailure(1, 2L)))
+ .build())
+ .build();
+
+ final RpcResult<BatchGroupOutputListGrouping> input = RpcResultBuilder.<BatchGroupOutputListGrouping>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-groupError")
+ .withResult(new AddGroupsBatchOutputBuilder()
+ .setBatchFailedGroupsOutput(Lists.newArrayList(
+ createBatchFailedGroupsOutput(0, 3L),
+ createBatchFailedGroupsOutput(1, 4L)
+ ))
+ .build())
+ .build();
+
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = FlatBatchGroupAdapters
+ .createBatchGroupChainingFunction(chainInput, 2).apply(input);
+
+ Assert.assertFalse(rpcResult.isSuccessful());
+ Assert.assertEquals(2, rpcResult.getErrors().size());
+ Assert.assertEquals(4, rpcResult.getResult().getBatchFailure().size());
+ Assert.assertEquals(0, rpcResult.getResult().getBatchFailure().get(0).getBatchOrder().intValue());
+ Assert.assertEquals(1, rpcResult.getResult().getBatchFailure().get(1).getBatchOrder().intValue());
+ Assert.assertEquals(2, rpcResult.getResult().getBatchFailure().get(2).getBatchOrder().intValue());
+ Assert.assertEquals(3, rpcResult.getResult().getBatchFailure().get(3).getBatchOrder().intValue());
+ Assert.assertEquals(4L, ((FlatBatchFailureGroupIdCase) rpcResult.getResult().getBatchFailure().get(3).getBatchItemIdChoice()).getGroupId().getValue().longValue());
+ }
+
+ @Test
+ public void testCreateBatchGroupChainingFunction_successes() throws Exception {
+ final RpcResult<ProcessFlatBatchOutput> chainInput = RpcResultBuilder
+ .success(new ProcessFlatBatchOutputBuilder().build())
+ .build();
+ final RpcResult<BatchGroupOutputListGrouping> input = RpcResultBuilder
+ .<BatchGroupOutputListGrouping>success(new AddGroupsBatchOutputBuilder().build())
+ .build();
+
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = FlatBatchGroupAdapters
+ .createBatchGroupChainingFunction(chainInput, 0).apply(input);
+
+ Assert.assertTrue(rpcResult.isSuccessful());
+ Assert.assertEquals(0, rpcResult.getErrors().size());
+ Assert.assertEquals(0, rpcResult.getResult().getBatchFailure().size());
+ }
+
+ private BatchFailedGroupsOutput createBatchFailedGroupsOutput(final Integer batchOrder, final long groupIdValue) {
+ return new BatchFailedGroupsOutputBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .setBatchOrder(batchOrder)
+ .build();
+ }
+
+ private BatchFailure createChainFailure(final int batchOrder, final long groupIdValue) {
+ return new BatchFailureBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchItemIdChoice(new FlatBatchFailureGroupIdCaseBuilder()
+ .setGroupId(new GroupId(groupIdValue))
+ .build())
+ .build();
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services.batch;
+
+import com.google.common.collect.Lists;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.ProcessFlatBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.meter._case.FlatBatchAddMeter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.meter._case.FlatBatchAddMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.meter._case.FlatBatchRemoveMeter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.meter._case.FlatBatchRemoveMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.meter._case.FlatBatchUpdateMeter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.meter._case.FlatBatchUpdateMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailure;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.BatchFailureBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureMeterIdCase;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.output.batch.failure.batch.item.id.choice.FlatBatchFailureMeterIdCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.BatchMeterOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.RemoveMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.UpdateMetersBatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.input.update.grouping.OriginalBatchedMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.input.update.grouping.UpdatedBatchedMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.output.list.grouping.BatchFailedMetersOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.output.list.grouping.BatchFailedMetersOutputBuilder;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * Test for {@link FlatBatchMeterAdapters}.
+ */
+public class FlatBatchMeterAdaptersTest {
+
+ private static final NodeId NODE_ID = new NodeId("ut-node-id");
+ private static final InstanceIdentifier<Node> NODE_II = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(NODE_ID));
+ private static final NodeRef NODE_REF = new NodeRef(NODE_II);
+
+ @Test
+ public void testAdaptFlatBatchAddMeter() throws Exception {
+ final BatchPlanStep planStep = new BatchPlanStep(BatchStepType.FLOW_ADD);
+ planStep.setBarrierAfter(true);
+ planStep.getTaskBag().addAll(Lists.newArrayList(
+ createAddMeterBatch(1L),
+ createAddMeterBatch(2L)));
+
+ final AddMetersBatchInput addMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchAddMeter(planStep, NODE_REF);
+
+ Assert.assertTrue(addMetersBatchInput.isBarrierAfter());
+ Assert.assertEquals(2, addMetersBatchInput.getBatchAddMeters().size());
+ Assert.assertEquals(1L, addMetersBatchInput.getBatchAddMeters().get(0).getMeterId().getValue().longValue());
+ Assert.assertEquals(2L, addMetersBatchInput.getBatchAddMeters().get(1).getMeterId().getValue().longValue());
+ }
+
+ private FlatBatchAddMeter createAddMeterBatch(final long groupIdValue) {
+ return new FlatBatchAddMeterBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build();
+ }
+
+ private FlatBatchRemoveMeter createRemoveMeterBatch(final long groupIdValue) {
+ return new FlatBatchRemoveMeterBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build();
+ }
+
+ private FlatBatchUpdateMeter createUpdateMeterBatch(final long groupIdValue) {
+ return new FlatBatchUpdateMeterBuilder()
+ .setOriginalBatchedMeter(new OriginalBatchedMeterBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build())
+ .setUpdatedBatchedMeter(new UpdatedBatchedMeterBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build())
+ .build();
+ }
+
+ @Test
+ public void testAdaptFlatBatchRemoveMeter() throws Exception {
+ final BatchPlanStep planStep = new BatchPlanStep(BatchStepType.FLOW_REMOVE);
+ planStep.setBarrierAfter(true);
+ planStep.getTaskBag().addAll(Lists.newArrayList(
+ createRemoveMeterBatch(1L),
+ createRemoveMeterBatch(2L)));
+
+ final RemoveMetersBatchInput removeMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchRemoveMeter(planStep, NODE_REF);
+
+ Assert.assertTrue(removeMetersBatchInput.isBarrierAfter());
+ Assert.assertEquals(2, removeMetersBatchInput.getBatchRemoveMeters().size());
+ Assert.assertEquals(1L, removeMetersBatchInput.getBatchRemoveMeters().get(0).getMeterId().getValue().longValue());
+ Assert.assertEquals(2L, removeMetersBatchInput.getBatchRemoveMeters().get(1).getMeterId().getValue().longValue());
+ }
+
+ @Test
+ public void testAdaptFlatBatchUpdateMeter() throws Exception {
+ final BatchPlanStep planStep = new BatchPlanStep(BatchStepType.FLOW_UPDATE);
+ planStep.setBarrierAfter(true);
+ planStep.getTaskBag().addAll(Lists.newArrayList(
+ createUpdateMeterBatch(1L),
+ createUpdateMeterBatch(2L)));
+
+ final UpdateMetersBatchInput updateMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchUpdateMeter(planStep, NODE_REF);
+
+ Assert.assertTrue(updateMetersBatchInput.isBarrierAfter());
+ Assert.assertEquals(2, updateMetersBatchInput.getBatchUpdateMeters().size());
+ Assert.assertEquals(1L, updateMetersBatchInput.getBatchUpdateMeters().get(0).getUpdatedBatchedMeter().getMeterId().getValue().longValue());
+ Assert.assertEquals(2L, updateMetersBatchInput.getBatchUpdateMeters().get(1).getUpdatedBatchedMeter().getMeterId().getValue().longValue());
+ }
+
+ @Test
+ public void testCreateBatchMeterChainingFunction_failures() throws Exception {
+ final RpcResult<ProcessFlatBatchOutput> chainInput = RpcResultBuilder.<ProcessFlatBatchOutput>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-chainError")
+ .withResult(new ProcessFlatBatchOutputBuilder()
+ .setBatchFailure(Lists.newArrayList(
+ createChainFailure(0, 1L),
+ createChainFailure(1, 2L)))
+ .build())
+ .build();
+
+ final RpcResult<BatchMeterOutputListGrouping> input = RpcResultBuilder.<BatchMeterOutputListGrouping>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-groupError")
+ .withResult(new AddMetersBatchOutputBuilder()
+ .setBatchFailedMetersOutput(Lists.newArrayList(
+ createBatchFailedMetersOutput(0, 3L),
+ createBatchFailedMetersOutput(1, 4L)
+ ))
+ .build())
+ .build();
+
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = FlatBatchMeterAdapters
+ .createBatchMeterChainingFunction(chainInput, 2).apply(input);
+
+ Assert.assertFalse(rpcResult.isSuccessful());
+ Assert.assertEquals(2, rpcResult.getErrors().size());
+ Assert.assertEquals(4, rpcResult.getResult().getBatchFailure().size());
+ Assert.assertEquals(0, rpcResult.getResult().getBatchFailure().get(0).getBatchOrder().intValue());
+ Assert.assertEquals(1, rpcResult.getResult().getBatchFailure().get(1).getBatchOrder().intValue());
+ Assert.assertEquals(2, rpcResult.getResult().getBatchFailure().get(2).getBatchOrder().intValue());
+ Assert.assertEquals(3, rpcResult.getResult().getBatchFailure().get(3).getBatchOrder().intValue());
+ Assert.assertEquals(4L, ((FlatBatchFailureMeterIdCase) rpcResult.getResult().getBatchFailure().get(3).getBatchItemIdChoice()).getMeterId().getValue().longValue());
+ }
+
+ @Test
+ public void testCreateBatchMeterChainingFunction_successes() throws Exception {
+ final RpcResult<ProcessFlatBatchOutput> chainInput = RpcResultBuilder
+ .success(new ProcessFlatBatchOutputBuilder().build())
+ .build();
+ final RpcResult<BatchMeterOutputListGrouping> input = RpcResultBuilder
+ .<BatchMeterOutputListGrouping>success(new AddMetersBatchOutputBuilder().build())
+ .build();
+
+ final RpcResult<ProcessFlatBatchOutput> rpcResult = FlatBatchMeterAdapters
+ .createBatchMeterChainingFunction(chainInput, 0).apply(input);
+
+ Assert.assertTrue(rpcResult.isSuccessful());
+ Assert.assertEquals(0, rpcResult.getErrors().size());
+ Assert.assertEquals(0, rpcResult.getResult().getBatchFailure().size());
+ }
+
+ private BatchFailedMetersOutput createBatchFailedMetersOutput(final Integer batchOrder, final long groupIdValue) {
+ return new BatchFailedMetersOutputBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .setBatchOrder(batchOrder)
+ .build();
+ }
+
+ private BatchFailure createChainFailure(final int batchOrder, final long groupIdValue) {
+ return new BatchFailureBuilder()
+ .setBatchOrder(batchOrder)
+ .setBatchItemIdChoice(new FlatBatchFailureMeterIdCaseBuilder()
+ .setMeterId(new MeterId(groupIdValue))
+ .build())
+ .build();
+ }
+}
\ No newline at end of file
import static org.mockito.Mockito.when;
import org.junit.Before;
+import org.mockito.Mockito;
import org.opendaylight.openflowjava.protocol.api.connection.OutboundQueue;
import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
+import org.opendaylight.openflowplugin.api.openflow.device.DeviceManager;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.MessageSpy;
import org.opendaylight.openflowplugin.impl.statistics.services.dedicated.StatisticsGatheringOnTheFlyService;
import org.opendaylight.openflowplugin.impl.statistics.services.dedicated.StatisticsGatheringService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.FeaturesReply;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
-public class StatisticsContextImpMockInitiation {
- protected boolean isTable = false;
- protected boolean isFlow = false;
- protected boolean isGroup = false;
- protected boolean isMeter = false;
- protected boolean isPort = false;
- protected boolean isQueue = false;
+class StatisticsContextImpMockInitiation {
+ Boolean isTable = false;
+ Boolean isFlow = false;
+ Boolean isGroup = false;
+ Boolean isMeter = false;
+ Boolean isPort = false;
+ Boolean isQueue = false;
protected DeviceContext mockedDeviceContext;
- protected StatisticsGatheringService mockedStatisticsGatheringService;
- protected StatisticsGatheringOnTheFlyService mockedStatisticsOnFlyGatheringService;
- protected ConnectionContext mockedConnectionContext;
- protected FeaturesReply mockedFeatures;
+ StatisticsGatheringService mockedStatisticsGatheringService;
+ StatisticsGatheringOnTheFlyService mockedStatisticsOnFlyGatheringService;
+ ConnectionContext mockedConnectionContext;
protected DeviceState mockedDeviceState;
+ static final KeyedInstanceIdentifier<Node, NodeKey> dummyNodeII = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(new NodeId("dummyNodeId")));
protected MessageSpy mockedMessageSpy;
protected OutboundQueue mockedOutboundQueue;
+ protected DeviceManager mockedDeviceManager;
+ LifecycleConductor mockConductor;
@Before
public void initialize() {
mockedStatisticsGatheringService = mock(StatisticsGatheringService.class);
mockedStatisticsOnFlyGatheringService = mock(StatisticsGatheringOnTheFlyService.class);
mockedConnectionContext = mock(ConnectionContext.class);
- mockedFeatures = mock(FeaturesReply.class);
+ final FeaturesReply mockedFeatures = mock(FeaturesReply.class);
mockedDeviceState = mock(DeviceState.class);
- mockedMessageSpy = mock(MessageSpy.class);
- mockedOutboundQueue = mock(OutboundQueue.class);
+ final MessageSpy mockedMessageSpy = mock(MessageSpy.class);
+ final OutboundQueue mockedOutboundQueue = mock(OutboundQueue.class);
+ final DeviceManager mockedDeviceManager = mock(DeviceManager.class);
+ mockConductor = mock(LifecycleConductor.class);
+ when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
+ when(mockedDeviceContext.getPrimaryConnectionContext()).thenReturn(mockedConnectionContext);
+ when(mockedDeviceContext.getMessageSpy()).thenReturn(mockedMessageSpy);
when(mockedDeviceState.isTableStatisticsAvailable()).thenReturn(isTable);
when(mockedDeviceState.isFlowStatisticsAvailable()).thenReturn(isFlow);
when(mockedDeviceState.isGroupAvailable()).thenReturn(isGroup);
when(mockedDeviceState.isMetersAvailable()).thenReturn(isMeter);
when(mockedDeviceState.isPortStatisticsAvailable()).thenReturn(isPort);
when(mockedDeviceState.isQueueStatisticsAvailable()).thenReturn(isQueue);
- when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
- when(mockedDeviceContext.getPrimaryConnectionContext()).thenReturn(mockedConnectionContext);
- when(mockedDeviceContext.getMessageSpy()).thenReturn(mockedMessageSpy);
+ when(mockedDeviceState.getNodeInstanceIdentifier()).thenReturn(dummyNodeII);
- when(mockedConnectionContext.getNodeId()).thenReturn(new NodeId("dummyNodeId"));
+ when(mockedConnectionContext.getNodeId()).thenReturn(dummyNodeII.getKey().getId());
when(mockedConnectionContext.getFeatures()).thenReturn(mockedFeatures);
when(mockedConnectionContext.getConnectionState()).thenReturn(ConnectionContext.CONNECTION_STATE.WORKING);
when(mockedConnectionContext.getOutboundQueueProvider()).thenReturn(mockedOutboundQueue);
+ when(mockedDeviceManager.getDeviceContextFromNodeId(Mockito.<NodeId>any())).thenReturn(mockedDeviceContext);
+ mockConductor.setSafelyDeviceManager(mockedDeviceManager);
+ when(mockConductor.getDeviceContext(Mockito.<NodeId>any())).thenReturn(mockedDeviceContext);
+
}
}
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
+import org.mockito.Mockito;
import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.EventIdentifier;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.MultipartType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.MultipartReply;
import org.opendaylight.yangtools.yang.common.RpcResult;
@Test
public void gatherDynamicDataTest() {
- final StatisticsContextImpl statisticsContext = new StatisticsContextImpl(mockedDeviceContext, false);
+
+ final StatisticsContextImpl statisticsContext = new StatisticsContextImpl(mockedDeviceContext.getDeviceState().getNodeId(), false, mockConductor);
final ListenableFuture<RpcResult<List<MultipartReply>>> rpcResult = immediateFuture(RpcResultBuilder.success(Collections.<MultipartReply>emptyList()).build());
when(mockedStatisticsGatheringService.getStatisticsOfType(any(EventIdentifier.class), any(MultipartType
import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
import org.opendaylight.openflowplugin.api.openflow.device.RequestContext;
import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.EventIdentifier;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.MultipartType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.MultipartReply;
import org.opendaylight.yangtools.yang.common.RpcResult;
@Before
public void setUp() throws Exception {
- when(mockedDeviceContext.reservedXidForDeviceMessage()).thenReturn(TEST_XID);
+ when(mockedDeviceContext.reserveXidForDeviceMessage()).thenReturn(TEST_XID);
+ when(mockConductor.getDeviceContext(Mockito.<NodeId>any())).thenReturn(mockedDeviceContext);
initStatisticsContext();
}
private void initStatisticsContext() {
- statisticsContext = new StatisticsContextImpl(mockedDeviceContext, false);
+ statisticsContext = new StatisticsContextImpl(mockedDeviceContext.getDeviceState().getNodeId(), false, mockConductor);
statisticsContext.setStatisticsGatheringService(mockedStatisticsGatheringService);
statisticsContext.setStatisticsGatheringOnTheFlyService(mockedStatisticsOnFlyGatheringService);
}
*/
@Test
public void testClose() throws Exception {
- final StatisticsContextImpl statisticsContext = new StatisticsContextImpl(mockedDeviceContext, false);
+ final StatisticsContextImpl statisticsContext = new StatisticsContextImpl(mockedDeviceContext.getDeviceState().getNodeId(), false, mockConductor);
final RequestContext<Object> requestContext = statisticsContext.createRequestContext();
statisticsContext.close();
try {
@Test
public void testGatherDynamicData_all() throws Exception {
Mockito.reset(mockedDeviceState);
- when(mockedDeviceState.isTableStatisticsAvailable()).thenReturn(true);
- when(mockedDeviceState.isFlowStatisticsAvailable()).thenReturn(true);
- when(mockedDeviceState.isGroupAvailable()).thenReturn(true);
- when(mockedDeviceState.isMetersAvailable()).thenReturn(true);
- when(mockedDeviceState.isPortStatisticsAvailable()).thenReturn(true);
- when(mockedDeviceState.isQueueStatisticsAvailable()).thenReturn(true);
+ when(mockedDeviceState.isTableStatisticsAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.isFlowStatisticsAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.isGroupAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.isMetersAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.isPortStatisticsAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.isQueueStatisticsAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.getNodeInstanceIdentifier()).thenReturn(dummyNodeII);
initStatisticsContext();
when(mockedStatisticsGatheringService.getStatisticsOfType(Matchers.any(EventIdentifier.class), Matchers.any(MultipartType.class)))
}
@Test
- public void testWriteFlowStatistics() {
+ public void testWriteFlowStatistics() throws Exception {
final ArgumentCaptor<LogicalDatastoreType> dataStoreType = ArgumentCaptor.forClass(LogicalDatastoreType.class);
final ArgumentCaptor<InstanceIdentifier> flowPath = ArgumentCaptor.forClass(InstanceIdentifier.class);
final ArgumentCaptor<Flow> flow = ArgumentCaptor.forClass(Flow.class);
import org.opendaylight.openflowplugin.api.OFConstants;
import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
+import org.opendaylight.openflowplugin.api.openflow.device.DeviceManager;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceState;
import org.opendaylight.openflowplugin.api.openflow.device.RequestContext;
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceTerminationPhaseHandler;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.MultiMsgCollector;
+import org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleConductor;
import org.opendaylight.openflowplugin.api.openflow.registry.ItemLifeCycleRegistry;
import org.opendaylight.openflowplugin.api.openflow.rpc.ItemLifeCycleSource;
import org.opendaylight.openflowplugin.api.openflow.rpc.listener.ItemLifecycleListener;
private ArgumentCaptor<ItemLifecycleListener> itemLifeCycleListenerCapt;
@Mock
private BindingAwareBroker.RpcRegistration<StatisticsManagerControlService> serviceControlRegistration;
+ @Mock
+ private DeviceManager deviceManager;
+ @Mock
+ private LifecycleConductor conductor;
private RequestContext<List<MultipartReply>> currentRequestContext;
private StatisticsManagerImpl statisticsManager;
when(mockedPrimConnectionContext.getNodeId()).thenReturn(new NodeId("ut-node:123"));
when(mockedPrimConnectionContext.getOutboundQueueProvider()).thenReturn(outboundQueue);
- when(mockedDeviceState.isFlowStatisticsAvailable()).thenReturn(true);
- when(mockedDeviceState.isGroupAvailable()).thenReturn(true);
- when(mockedDeviceState.isMetersAvailable()).thenReturn(true);
- when(mockedDeviceState.isPortStatisticsAvailable()).thenReturn(true);
- when(mockedDeviceState.isQueueStatisticsAvailable()).thenReturn(true);
- when(mockedDeviceState.isTableStatisticsAvailable()).thenReturn(true);
+ when(mockedDeviceState.isFlowStatisticsAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.isGroupAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.isMetersAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.isPortStatisticsAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.isQueueStatisticsAvailable()).thenReturn(Boolean.TRUE);
+ when(mockedDeviceState.isTableStatisticsAvailable()).thenReturn(Boolean.TRUE);
when(mockedDeviceState.getNodeId()).thenReturn(new NodeId("ofp-unit-dummy-node-id"));
when(mockedDeviceContext.getMessageSpy()).thenReturn(mockedMessagSpy);
when(mockedDeviceContext.getDeviceFlowRegistry()).thenReturn(new DeviceFlowRegistryImpl());
when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
- when(mockedDeviceContext.getTimer()).thenReturn(hashedWheelTimer);
when(mockedDeviceContext.getMultiMsgCollector(
Matchers.<RequestContext<List<MultipartReply>>>any())).thenAnswer(
new Answer<MultiMsgCollector>() {
Matchers.eq(StatisticsManagerControlService.class),
Matchers.<StatisticsManagerControlService>any())).thenReturn(serviceControlRegistration);
- statisticsManager = new StatisticsManagerImpl(rpcProviderRegistry, false);
+ statisticsManager = new StatisticsManagerImpl(rpcProviderRegistry, false, conductor);
+ when(deviceManager.getDeviceContextFromNodeId(Mockito.<NodeId>any())).thenReturn(mockedDeviceContext);
+ when(conductor.getDeviceContext(Mockito.<NodeId>any())).thenReturn(mockedDeviceContext);
}
@Test
public void testOnDeviceContextLevelUp() throws Exception {
- Mockito.doAnswer(new Answer<Void>() {
- @Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
- final FutureCallback<OfHeader> callback = (FutureCallback<OfHeader>) invocation.getArguments()[2];
- LOG.debug("committing entry: {}", ((MultipartRequestInput) invocation.getArguments()[1]).getType());
- callback.onSuccess(null);
- currentRequestContext.setResult(RpcResultBuilder.<List<MultipartReply>>success().build());
- return null;
- }
- }).when(outboundQueue)
- .commitEntry(Matchers.anyLong(), Matchers.<OfHeader>any(), Matchers.<FutureCallback<OfHeader>>any());
-
- statisticsManager.setDeviceInitializationPhaseHandler(mockedDevicePhaseHandler);
- statisticsManager.onDeviceContextLevelUp(mockedDeviceContext);
-
- verify(mockedDeviceContext).addDeviceContextClosedHandler(statisticsManager);
- verify(mockedDeviceContext, Mockito.never()).reservedXidForDeviceMessage();
- verify(mockedDeviceState).setDeviceSynchronized(true);
- verify(mockedDevicePhaseHandler).onDeviceContextLevelUp(mockedDeviceContext);
- verify(hashedWheelTimer).newTimeout(Matchers.<TimerTask>any(), Matchers.anyLong(), Matchers.<TimeUnit>any());
- }
-
- @Test
- public void testOnDeviceContextLevelUp1() throws Exception {
- statisticsManager = new StatisticsManagerImpl(rpcProviderRegistry, true);
+ statisticsManager = new StatisticsManagerImpl(rpcProviderRegistry, true, conductor);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(final InvocationOnMock invocation) throws Throwable {
.commitEntry(Matchers.anyLong(), Matchers.<OfHeader>any(), Matchers.<FutureCallback<OfHeader>>any());
statisticsManager.setDeviceInitializationPhaseHandler(mockedDevicePhaseHandler);
- statisticsManager.onDeviceContextLevelUp(mockedDeviceContext);
+ statisticsManager.onDeviceContextLevelUp(mockedDeviceContext.getDeviceState().getNodeId());
- verify(mockedDeviceContext).addDeviceContextClosedHandler(statisticsManager);
- verify(mockedDeviceContext, Mockito.never()).reservedXidForDeviceMessage();
+ verify(mockedDeviceContext, Mockito.never()).reserveXidForDeviceMessage();
verify(mockedDeviceState).setDeviceSynchronized(true);
- verify(mockedDevicePhaseHandler).onDeviceContextLevelUp(mockedDeviceContext);
+ verify(mockedDevicePhaseHandler).onDeviceContextLevelUp(mockedDeviceContext.getDeviceState().getNodeId());
verify(hashedWheelTimer, Mockito.never()).newTimeout(Matchers.<TimerTask>any(), Matchers.anyLong(), Matchers.<TimeUnit>any());
}
new ChangeStatisticsWorkModeInputBuilder()
.setMode(StatisticsWorkMode.FULLYDISABLED);
- Future<RpcResult<Void>> workMode = statisticsManager
+ final Future<RpcResult<Void>> workMode = statisticsManager
.changeStatisticsWorkMode(changeStatisticsWorkModeInputBld.build());
checkWorkModeChangeOutcome(workMode);
*/
@Test
public void testChangeStatisticsWorkMode3() throws Exception {
- Timeout pollTimeout = Mockito.mock(Timeout.class);
- ItemLifeCycleSource itemLifecycleSource = Mockito.mock(ItemLifeCycleSource.class);
+ final Timeout pollTimeout = Mockito.mock(Timeout.class);
+ final ItemLifeCycleSource itemLifecycleSource = Mockito.mock(ItemLifeCycleSource.class);
Mockito.doNothing().when(itemLifecycleSource)
.setItemLifecycleListener(itemLifeCycleListenerCapt.capture());
@Test
public void testCalculateTimerDelay() throws Exception {
final TimeCounter timeCounter = Mockito.mock(TimeCounter.class);
- when(timeCounter.getAverageTimeBetweenMarks()).thenReturn(2000L, 4000L);
+ when(timeCounter.getAverageTimeBetweenMarks()).thenReturn((Long)2000L, (Long)4000L);
statisticsManager.calculateTimerDelay(timeCounter);
Assert.assertEquals(3000L, StatisticsManagerImpl.getCurrentTimerDelay());
PhyPort phyPort;
static final Long PORT_NO = 5l;
+ static final Long PORT_NO_DS = 6l;
static final String DATA = "Test_Data";
static final Long PORT_NUM_VALUE = 11l;
Mockito.when(deviceState.getFeatures()).thenReturn(getFeaturesOutput);
Mockito.when(getFeaturesOutput.getDatapathId()).thenReturn(BigInteger.TEN);
Mockito.when(getFeaturesOutput.getPhyPort()).thenReturn(phyPorts);
- Mockito.when(phyPort.getPortNo()).thenReturn(PORT_NO);
+ Mockito.when(phyPort.getPortNo()).thenReturn(PORT_NO_DS);
}
@Test
builder.setHasMask(hasMask);
return builder;
}
-
-
- @Test
- public void testGetPortNumberFromMatch() throws Exception {
- final Long portNumber = PacketReceivedTranslator.getPortNumberFromMatch(Lists.newArrayList(assembleMatchEntryBld(11L).build()));
- Assert.assertEquals(11L, portNumber.longValue());
- }
}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import com.google.common.base.Function;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.commons.lang3.tuple.Pair;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.FlowCapableTransactionService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.SendBarrierInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * Test for {@link BarrierUtil}.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class BarrierUtilTest {
+
+ public static final NodeKey NODE_KEY = new NodeKey(new NodeId("ut-dummy-node"));
+ private static final NodeRef NODE_REF = new NodeRef(InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, NODE_KEY));
+
+ @Mock
+ private FlowCapableTransactionService transactionService;
+ @Mock
+ private Function<Pair<RpcResult<String>, RpcResult<Void>>, RpcResult<String>> compositeTransform;
+ @Captor
+ private ArgumentCaptor<Pair<RpcResult<String>, RpcResult<Void>>> pairCpt;
+
+ @Before
+ public void setUp() throws Exception {
+ Mockito.when(transactionService.sendBarrier(Matchers.<SendBarrierInput>any()))
+ .thenReturn(RpcResultBuilder.<Void>success().buildFuture());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ Mockito.verifyNoMoreInteractions(transactionService, compositeTransform);
+ }
+
+ @Test
+ public void testChainBarrier() throws Exception {
+ final String data = "ut-data1";
+ final ListenableFuture<RpcResult<String>> input = RpcResultBuilder.success(data).buildFuture();
+ final ListenableFuture<RpcResult<String>> chainResult =
+ BarrierUtil.chainBarrier(input, NODE_REF, transactionService, compositeTransform);
+
+ Mockito.verify(transactionService).sendBarrier(Matchers.<SendBarrierInput>any());
+ Mockito.verify(compositeTransform).apply(pairCpt.capture());
+
+ final Pair<RpcResult<String>, RpcResult<Void>> value = pairCpt.getValue();
+ Assert.assertTrue(value.getLeft().isSuccessful());
+ Assert.assertEquals(data, value.getLeft().getResult());
+ Assert.assertTrue(value.getRight().isSuccessful());
+ Assert.assertNull(value.getRight().getResult());
+
+ }
+
+ @Test
+ public void testCreateSendBarrierInput() throws Exception {
+ final SendBarrierInput barrierInput = BarrierUtil.createSendBarrierInput(NODE_REF);
+
+ Assert.assertEquals(NODE_REF, barrierInput.getNode());
+ Assert.assertEquals(SendBarrierInput.class, barrierInput.getImplementedInterface());
+ }
+}
\ No newline at end of file
when(mockFeatures.getDatapathId()).thenReturn(BigInteger.valueOf(21L));
}
@Test
- public void chainTableTrunkWriteOF10Test() {
- final DeviceState mockedDeviceState = mock(DeviceState.class);
+ public void chainTableTrunkWriteOF10Test() throws Exception {
+ DeviceState mockedDeviceState = mock(DeviceState.class);
final GetFeaturesOutput mockedFeatures = mock(GetFeaturesOutput.class);
when(mockedFeatures.getTables()).thenReturn((short) 2);
}
@Test
- public void testTranslateAndWriteReplyTypeDesc() {
+ public void testTranslateAndWriteReplyTypeDesc() throws Exception {
final ConnectionContext connectionContext = buildMockConnectionContext(OFConstants.OFP_VERSION_1_3);
Mockito.when(mockedDeviceContext.getPrimaryConnectionContext()).thenReturn(connectionContext);
final DeviceState deviceState = Mockito.mock(DeviceState.class);
}
@Test
- public void translateAndWriteReplyTypeTableFeatures() {
- final TableFeaturesBuilder tableFeature = new TableFeaturesBuilder();
+ public void translateAndWriteReplyTypeTableFeatures() throws Exception {
+ TableFeaturesBuilder tableFeature = new TableFeaturesBuilder();
tableFeature.setTableId(DUMMY_TABLE_ID);
final List<TableFeatures> tableFeatures = new ArrayList<>();
tableFeatures.add(tableFeature.build());
}
@Test
- public void translateAndWriteReplyTypeMeterFeatures() {
- final DeviceState mockedDeviceState = mock(DeviceState.class);
+ public void translateAndWriteReplyTypeMeterFeatures() throws Exception {
+ DeviceState mockedDeviceState = mock(DeviceState.class);
when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
final MultipartReplyMeterFeaturesBuilder multipartReplyMeterFeaturesBuilder = new MultipartReplyMeterFeaturesBuilder();
}
@Test
- public void translateAndWriteReplyTypeGroupFeatures() {
- final MultipartReplyGroupFeaturesBuilder multipartReplyGroupFeaturesBuilder = new MultipartReplyGroupFeaturesBuilder();
+ public void translateAndWriteReplyTypeGroupFeatures() throws Exception {
+ MultipartReplyGroupFeaturesBuilder multipartReplyGroupFeaturesBuilder = new MultipartReplyGroupFeaturesBuilder();
multipartReplyGroupFeaturesBuilder.setTypes(new GroupTypes(true, true, true, true));
multipartReplyGroupFeaturesBuilder.setCapabilities(new GroupCapabilities(true, true, true, true));
final ActionType actionType = new ActionType(true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true);
@Test
- public void translateAndWriteReplyTypePortDesc() {
- final ConnectionContext mockedPrimaryConnectionContext = mock(ConnectionContext.class);
- final FeaturesReply mockedFeatures = mock(FeaturesReply.class);
+ public void translateAndWriteReplyTypePortDesc() throws Exception {
+ ConnectionContext mockedPrimaryConnectionContext = mock(ConnectionContext.class);
+ FeaturesReply mockedFeatures = mock(FeaturesReply.class);
when(mockedFeatures.getDatapathId()).thenReturn(new BigInteger(DUMMY_DATAPATH_ID));
when(mockedPrimaryConnectionContext.getFeatures()).thenReturn(mockedFeatures);
when(mockedDeviceContext.getPrimaryConnectionContext()).thenReturn(mockedPrimaryConnectionContext);
}
@Test
- public void createSuccessProcessingCallbackTest() {
- final DeviceState mockedDeviceState = mock(DeviceState.class);
+ public void createSuccessProcessingCallbackTest() throws Exception {
+ DeviceState mockedDeviceState = mock(DeviceState.class);
when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
final ConnectionContext connectionContext = buildMockConnectionContext(OFConstants.OFP_VERSION_1_3);
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import com.google.common.collect.Lists;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.openflowplugin.impl.services.batch.BatchPlanStep;
+import org.opendaylight.openflowplugin.impl.services.batch.BatchStepType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.Batch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.BatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.BatchChoice;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchAddFlowCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchAddGroupCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchAddMeterCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchRemoveFlowCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchRemoveGroupCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchRemoveMeterCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchUpdateFlowCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchUpdateGroupCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.FlatBatchUpdateMeterCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.flow._case.FlatBatchAddFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.group._case.FlatBatchAddGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.add.meter._case.FlatBatchAddMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.flow._case.FlatBatchRemoveFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.group._case.FlatBatchRemoveGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.remove.meter._case.FlatBatchRemoveMeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.flow._case.FlatBatchUpdateFlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.group._case.FlatBatchUpdateGroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flat.batch.service.rev160321.process.flat.batch.input.batch.batch.choice.flat.batch.update.meter._case.FlatBatchUpdateMeterBuilder;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test for {@link FlatBatchUtil}.
+ */
+public class FlatBatchUtilTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(FlatBatchUtilTest.class);
+
+ @Test
+ public void testMarkBarriersWhereNeeded_noBarrier() throws Exception {
+ final List<Batch> batches = Lists.newArrayList(
+ //general part - no flush required
+ createBatch(BatchStepType.GROUP_REMOVE),
+ createBatch(BatchStepType.METER_REMOVE),
+ createBatch(BatchStepType.FLOW_ADD),
+ createBatch(BatchStepType.FLOW_REMOVE, 2),
+ createBatch(BatchStepType.FLOW_ADD),
+ createBatch(BatchStepType.FLOW_UPDATE),
+ createBatch(BatchStepType.GROUP_ADD),
+ createBatch(BatchStepType.GROUP_UPDATE),
+ createBatch(BatchStepType.METER_ADD),
+ createBatch(BatchStepType.METER_UPDATE)
+ );
+
+ final List<BatchPlanStep> batchPlan = FlatBatchUtil.assembleBatchPlan(batches);
+ FlatBatchUtil.markBarriersWhereNeeded(batchPlan);
+
+ Assert.assertEquals(10, batchPlan.size());
+ for (int i = 0; i < batchPlan.size(); i++) {
+ final BatchPlanStep planStep = batchPlan.get(i);
+ final boolean barrierBefore = planStep.isBarrierAfter();
+ LOG.debug("checking barrier mark @ {} {} -> {}",
+ i, planStep.getStepType(), barrierBefore);
+
+ Assert.assertFalse(barrierBefore);
+ }
+ }
+
+ @Test
+ public void testMarkBarriersWhereNeeded_allBarriers() throws Exception {
+ // need to flush G+/F+
+ checkBarriersBetween(BatchStepType.GROUP_ADD, BatchStepType.FLOW_ADD);
+ // need to flush G+/F*
+ checkBarriersBetween(BatchStepType.GROUP_ADD, BatchStepType.FLOW_UPDATE);
+ // need to flush F-/G-
+ checkBarriersBetween(BatchStepType.FLOW_REMOVE, BatchStepType.GROUP_REMOVE);
+ // need to flush F*/G-
+ checkBarriersBetween(BatchStepType.FLOW_UPDATE, BatchStepType.GROUP_REMOVE);
+
+ // need to flush G+/G+
+ checkBarriersBetween(BatchStepType.GROUP_ADD, BatchStepType.GROUP_ADD);
+ // need to flush G-/G-
+ checkBarriersBetween(BatchStepType.GROUP_REMOVE, BatchStepType.GROUP_REMOVE);
+ // need to flush G*/G+
+ checkBarriersBetween(BatchStepType.GROUP_UPDATE, BatchStepType.GROUP_ADD);
+ // need to flush G*/G-
+ checkBarriersBetween(BatchStepType.GROUP_UPDATE, BatchStepType.GROUP_REMOVE);
+
+ // need to flush M+/F+
+ checkBarriersBetween(BatchStepType.METER_ADD, BatchStepType.FLOW_ADD);
+ // need to flush M+/F*
+ checkBarriersBetween(BatchStepType.METER_ADD, BatchStepType.FLOW_UPDATE);
+ // need to flush F-/M-
+ checkBarriersBetween(BatchStepType.FLOW_REMOVE, BatchStepType.METER_REMOVE);
+ // need to flush F*/M-
+ checkBarriersBetween(BatchStepType.FLOW_UPDATE, BatchStepType.METER_REMOVE);
+ }
+
+ private void checkBarriersBetween(final BatchStepType typeOfFirst, final BatchStepType typeOfSecond) {
+ final List<Batch> batches = Lists.newArrayList(createBatch(typeOfFirst), createBatch(typeOfSecond));
+ final List<BatchPlanStep> batchPlan = FlatBatchUtil.assembleBatchPlan(batches);
+ FlatBatchUtil.markBarriersWhereNeeded(batchPlan);
+ LOG.debug("checking barrier between {} / {}", typeOfFirst, typeOfSecond);
+ Assert.assertEquals(2, batchPlan.size());
+ Assert.assertTrue("barrier expected between " + typeOfFirst + " / " + typeOfSecond, batchPlan.get(0).isBarrierAfter());
+ Assert.assertFalse(batchPlan.get(1).isBarrierAfter());
+ }
+
+ @Test
+ public void testMarkBarriersWhereNeeded_single() throws Exception {
+ final List<Batch> batches = Lists.newArrayList(
+ //general part - no flush required
+ createBatch(BatchStepType.GROUP_REMOVE)
+ );
+
+ final List<BatchPlanStep> batchPlan = FlatBatchUtil.assembleBatchPlan(batches);
+ FlatBatchUtil.markBarriersWhereNeeded(batchPlan);
+
+ Assert.assertEquals(1, batchPlan.size());
+ Assert.assertFalse(batchPlan.get(0).isBarrierAfter());
+ }
+
+ @Test
+ public void testDecideBarrier() throws Exception {
+ Assert.assertTrue(FlatBatchUtil.decideBarrier(EnumSet.of(BatchStepType.GROUP_ADD), BatchStepType.FLOW_ADD));
+ Assert.assertTrue(FlatBatchUtil.decideBarrier(EnumSet.of(BatchStepType.GROUP_ADD), BatchStepType.FLOW_UPDATE));
+
+ Assert.assertTrue(FlatBatchUtil.decideBarrier(EnumSet.of(BatchStepType.FLOW_REMOVE), BatchStepType.GROUP_REMOVE));
+ Assert.assertTrue(FlatBatchUtil.decideBarrier(EnumSet.of(BatchStepType.FLOW_UPDATE), BatchStepType.GROUP_REMOVE));
+
+ Assert.assertTrue(FlatBatchUtil.decideBarrier(EnumSet.of(BatchStepType.METER_ADD), BatchStepType.FLOW_ADD));
+ Assert.assertTrue(FlatBatchUtil.decideBarrier(EnumSet.of(BatchStepType.METER_ADD), BatchStepType.FLOW_UPDATE));
+
+ Assert.assertTrue(FlatBatchUtil.decideBarrier(EnumSet.of(BatchStepType.FLOW_REMOVE), BatchStepType.METER_REMOVE));
+ Assert.assertTrue(FlatBatchUtil.decideBarrier(EnumSet.of(BatchStepType.FLOW_UPDATE), BatchStepType.METER_REMOVE));
+ }
+
+ @Test
+ public void testAssembleBatchPlan() throws Exception {
+ final List<Batch> batches = Lists.newArrayList(
+ createBatch(BatchStepType.GROUP_ADD),
+ createBatch(BatchStepType.GROUP_REMOVE, 2),
+ createBatch(BatchStepType.GROUP_REMOVE),
+ createBatch(BatchStepType.GROUP_ADD),
+ createBatch(BatchStepType.GROUP_UPDATE, 3)
+ );
+
+ final List<BatchPlanStep> batchPlanSteps = FlatBatchUtil.assembleBatchPlan(batches);
+ Assert.assertEquals(5, batchPlanSteps.size());
+
+ int i = 0;
+ checkSegment(batchPlanSteps.get(i++), BatchStepType.GROUP_ADD, 1);
+ checkSegment(batchPlanSteps.get(i++), BatchStepType.GROUP_REMOVE, 2);
+ checkSegment(batchPlanSteps.get(i++), BatchStepType.GROUP_REMOVE, 1);
+ checkSegment(batchPlanSteps.get(i++), BatchStepType.GROUP_ADD, 1);
+ checkSegment(batchPlanSteps.get(i++), BatchStepType.GROUP_UPDATE, 3);
+ }
+
+ private void checkSegment(final BatchPlanStep planStep, final BatchStepType stepType, final int expected) {
+ Assert.assertEquals(stepType, planStep.getStepType());
+ Assert.assertEquals(expected, planStep.getTaskBag().size());
+ }
+
+ @Test
+ public void testDetectBatchStepType() throws Exception {
+ for (BatchStepType stepType : BatchStepType.values()) {
+ LOG.debug("checking detection of: {}", stepType);
+ final Batch batch = createBatch(stepType);
+ final BatchStepType actualType = FlatBatchUtil.detectBatchStepType(batch.getBatchChoice());
+ Assert.assertEquals(stepType, actualType);
+ }
+ }
+
+ private Batch createBatch(BatchStepType type) {
+ return createBatch(type, 1);
+ }
+
+ private Batch createBatch(BatchStepType type, final int size) {
+ final BatchChoice batchCase;
+ switch (type) {
+ case FLOW_ADD:
+ batchCase = new FlatBatchAddFlowCaseBuilder()
+ .setFlatBatchAddFlow(repeatIntoList(new FlatBatchAddFlowBuilder().build(), size))
+ .build();
+ break;
+ case FLOW_REMOVE:
+ batchCase = new FlatBatchRemoveFlowCaseBuilder()
+ .setFlatBatchRemoveFlow(repeatIntoList(new FlatBatchRemoveFlowBuilder().build(), size))
+ .build();
+ break;
+ case FLOW_UPDATE:
+ batchCase = new FlatBatchUpdateFlowCaseBuilder()
+ .setFlatBatchUpdateFlow(repeatIntoList(new FlatBatchUpdateFlowBuilder().build(), size))
+ .build();
+ break;
+ case GROUP_ADD:
+ batchCase = new FlatBatchAddGroupCaseBuilder()
+ .setFlatBatchAddGroup(repeatIntoList(new FlatBatchAddGroupBuilder().build(), size))
+ .build();
+ break;
+ case GROUP_REMOVE:
+ batchCase = new FlatBatchRemoveGroupCaseBuilder()
+ .setFlatBatchRemoveGroup(repeatIntoList(new FlatBatchRemoveGroupBuilder().build(), size))
+ .build();
+ break;
+ case GROUP_UPDATE:
+ batchCase = new FlatBatchUpdateGroupCaseBuilder()
+ .setFlatBatchUpdateGroup(repeatIntoList(new FlatBatchUpdateGroupBuilder().build(), size))
+ .build();
+ break;
+ case METER_ADD:
+ batchCase = new FlatBatchAddMeterCaseBuilder()
+ .setFlatBatchAddMeter(repeatIntoList(new FlatBatchAddMeterBuilder().build(), size))
+ .build();
+ break;
+ case METER_REMOVE:
+ batchCase = new FlatBatchRemoveMeterCaseBuilder()
+ .setFlatBatchRemoveMeter(repeatIntoList(new FlatBatchRemoveMeterBuilder().build(), size))
+ .build();
+ break;
+ case METER_UPDATE:
+ batchCase = new FlatBatchUpdateMeterCaseBuilder()
+ .setFlatBatchUpdateMeter(repeatIntoList(new FlatBatchUpdateMeterBuilder().build(), size))
+ .build();
+ break;
+ default:
+ LOG.warn("unsupported batch type: {}", type);
+ throw new IllegalArgumentException("unsupported batch type: " + type);
+ }
+
+ return new BatchBuilder()
+ .setBatchChoice(batchCase)
+ .build();
+ }
+
+ private <T> List<T> repeatIntoList(final T element, final int size) {
+ final List<T> list = new ArrayList<>();
+ for (int i = 0; i < size; i++) {
+ list.add(element);
+ }
+ return list;
+ }
+
+ @Test
+ public void testMergeRpcResults() throws Exception {
+ final RpcResult<String> rpcResultFailed = RpcResultBuilder.<String>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-rpcError").build();
+ final RpcResult<String> rpcResultSuccess = RpcResultBuilder.<String>success().build();
+
+ final RpcResult<String> rpcResult1 = FlatBatchUtil.mergeRpcResults(rpcResultFailed, rpcResultSuccess).build();
+ Assert.assertEquals(1, rpcResult1.getErrors().size());
+ Assert.assertFalse(rpcResult1.isSuccessful());
+
+ final RpcResult<String> rpcResult2 = FlatBatchUtil.mergeRpcResults(rpcResultFailed, rpcResultFailed).build();
+ Assert.assertEquals(2, rpcResult2.getErrors().size());
+ Assert.assertFalse(rpcResult2.isSuccessful());
+
+ final RpcResult<String> rpcResult3 = FlatBatchUtil.mergeRpcResults(rpcResultSuccess, rpcResultSuccess).build();
+ Assert.assertEquals(0, rpcResult3.getErrors().size());
+ Assert.assertTrue(rpcResult3.isSuccessful());
+ }
+}
\ No newline at end of file
package org.opendaylight.openflowplugin.impl.util;
-import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import org.junit.Test;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
-
+import com.google.common.base.Function;
+import com.google.common.collect.Lists;
+import java.util.Collections;
+import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.apache.commons.lang3.tuple.Pair;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.AddFlowsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.BatchFlowIdGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.BatchFlowOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.output.list.grouping.BatchFailedFlowsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flows.service.rev160314.batch.flow.output.list.grouping.BatchFailedFlowsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
public class FlowUtilTest {
- private static final short DUMMY_TABLE_ID = 1;
public static final Pattern INDEX_PATTERN = Pattern.compile("^#UF\\$TABLE\\*1-([0-9]+)$");
+ public static final NodeId DUMMY_NODE_ID = new NodeId("dummyNodeId");
+ public static final FlowId DUMMY_FLOW_ID = new FlowId("dummyFlowId");
+ public static final FlowId DUMMY_FLOW_ID_2 = new FlowId("dummyFlowId_2");
+ public static final Short DUMMY_TABLE_ID = 1;
@Test
public void createAlienFlowIdTest() {
final String alienFlowId2 = FlowUtil.createAlienFlowId(DUMMY_TABLE_ID).getValue();
final Integer index2 = parseIndex(alienFlowId2);
- assertNotNull("index1 parsing failed: "+alienFlowId1, index1);
- assertNotNull("index2 parsing failed: "+alienFlowId2, index2);
+ assertNotNull("index1 parsing failed: " + alienFlowId1, index1);
+ assertNotNull("index2 parsing failed: " + alienFlowId2, index2);
assertTrue(index1 < index2);
}
return null;
}
+ @Test
+ public void testBuildFlowPath() throws Exception {
+ final InstanceIdentifier<Node> nodePath = InstanceIdentifier
+ .create(Nodes.class)
+ .child(Node.class, new NodeKey(DUMMY_NODE_ID));
+
+ final FlowRef flowRef = FlowUtil.buildFlowPath(nodePath, DUMMY_TABLE_ID, DUMMY_FLOW_ID);
+ final InstanceIdentifier<?> flowRefValue = flowRef.getValue();
+ Assert.assertEquals(DUMMY_NODE_ID, flowRefValue.firstKeyOf(Node.class).getId());
+ Assert.assertEquals(DUMMY_TABLE_ID, flowRefValue.firstKeyOf(Table.class).getId());
+ Assert.assertEquals(DUMMY_FLOW_ID, flowRefValue.firstKeyOf(Flow.class).getId());
+ }
+
+ @Test
+ public void testCreateCumulatingFunction() throws Exception {
+ final Function<List<RpcResult<String>>, RpcResult<List<BatchFailedFlowsOutput>>> function =
+ FlowUtil.createCumulatingFunction(Lists.newArrayList(createBatchFlowIdGrouping(DUMMY_FLOW_ID),
+ createBatchFlowIdGrouping(DUMMY_FLOW_ID_2)));
+
+ final RpcResult<List<BatchFailedFlowsOutput>> summary = function.apply(Lists.newArrayList(
+ RpcResultBuilder.success("a").build(),
+ RpcResultBuilder.<String>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "action-failed reason")
+ .build()));
+
+ Assert.assertFalse(summary.isSuccessful());
+ Assert.assertEquals(1, summary.getResult().size());
+ Assert.assertEquals(1, summary.getErrors().size());
+ Assert.assertEquals(DUMMY_FLOW_ID_2, summary.getResult().get(0).getFlowId());
+ Assert.assertEquals(1, summary.getResult().get(0).getBatchOrder().intValue());
+ }
+
+ protected BatchFlowIdGrouping createBatchFlowIdGrouping(final FlowId flowId) {
+ final BatchFlowIdGrouping mock = Mockito.mock(BatchFlowIdGrouping.class);
+ Mockito.when(mock.getFlowId()).thenReturn(flowId);
+ return mock;
+ }
+
+ @Test
+ public void testFLOW_ADD_TRANSFORM__failure() throws Exception {
+ final RpcResult<List<BatchFailedFlowsOutput>> input = createBatchOutcomeWithError();
+ checkBatchErrorOutcomeTransformation(FlowUtil.FLOW_ADD_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_ADD_TRANSFORM__success() throws Exception {
+ final RpcResult<List<BatchFailedFlowsOutput>> input = createEmptyBatchOutcome();
+ checkBatchSuccessOutcomeTransformation(FlowUtil.FLOW_ADD_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_REMOVE_TRANSFORM__failure() throws Exception {
+ final RpcResult<List<BatchFailedFlowsOutput>> input = createBatchOutcomeWithError();
+ checkBatchErrorOutcomeTransformation(FlowUtil.FLOW_REMOVE_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_REMOVE_TRANSFORM__success() throws Exception {
+ final RpcResult<List<BatchFailedFlowsOutput>> input = createEmptyBatchOutcome();
+ checkBatchSuccessOutcomeTransformation(FlowUtil.FLOW_REMOVE_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_UPDATE_TRANSFORM__failure() throws Exception {
+ final RpcResult<List<BatchFailedFlowsOutput>> input = createBatchOutcomeWithError();
+ checkBatchErrorOutcomeTransformation(FlowUtil.FLOW_UPDATE_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_UPDATE_TRANSFORM__success() throws Exception {
+ final RpcResult<List<BatchFailedFlowsOutput>> input = createEmptyBatchOutcome();
+ checkBatchSuccessOutcomeTransformation(FlowUtil.FLOW_UPDATE_TRANSFORM.apply(input));
+ }
+
+ private <T extends BatchFlowOutputListGrouping> void checkBatchSuccessOutcomeTransformation(final RpcResult<T> output) {
+ Assert.assertTrue(output.isSuccessful());
+ Assert.assertEquals(0, output.getResult().getBatchFailedFlowsOutput().size());
+ Assert.assertEquals(0, output.getErrors().size());
+ }
+
+ private RpcResult<List<BatchFailedFlowsOutput>> createEmptyBatchOutcome() {
+ return RpcResultBuilder
+ .<List<BatchFailedFlowsOutput>>success(Collections.<BatchFailedFlowsOutput>emptyList())
+ .build();
+ }
+
+ private RpcResult<List<BatchFailedFlowsOutput>> createBatchOutcomeWithError() {
+ return RpcResultBuilder.<List<BatchFailedFlowsOutput>>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-flowAddFail")
+ .withResult(Collections.singletonList(new BatchFailedFlowsOutputBuilder()
+ .setFlowId(DUMMY_FLOW_ID)
+ .build()))
+ .build();
+ }
+
+ private <T extends BatchFlowOutputListGrouping> void checkBatchErrorOutcomeTransformation(final RpcResult<T> output) {
+ Assert.assertFalse(output.isSuccessful());
+ Assert.assertEquals(1, output.getResult().getBatchFailedFlowsOutput().size());
+ Assert.assertEquals(DUMMY_FLOW_ID, output.getResult().getBatchFailedFlowsOutput().get(0).getFlowId());
+
+ Assert.assertEquals(1, output.getErrors().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_success_success() throws Exception {
+ final Function<Pair<RpcResult<AddFlowsBatchOutput>, RpcResult<Void>>, RpcResult<AddFlowsBatchOutput>> compositeFunction =
+ FlowUtil.createComposingFunction();
+
+ final RpcResult<AddFlowsBatchOutput> addFlowBatchOutput = createAddFlowsBatchSuccessOutput();
+ final RpcResult<Void> barrierOutput = RpcResultBuilder.<Void>success().build();
+ final Pair<RpcResult<AddFlowsBatchOutput>, RpcResult<Void>> input = Pair.of(addFlowBatchOutput, barrierOutput);
+ final RpcResult<AddFlowsBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertTrue(composite.isSuccessful());
+ Assert.assertEquals(0, composite.getErrors().size());
+ Assert.assertEquals(0, composite.getResult().getBatchFailedFlowsOutput().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_failure_success() throws Exception {
+ final Function<Pair<RpcResult<AddFlowsBatchOutput>, RpcResult<Void>>, RpcResult<AddFlowsBatchOutput>> compositeFunction =
+ FlowUtil.createComposingFunction();
+
+ final RpcResult<AddFlowsBatchOutput> addFlowBatchOutput = createAddFlowsBatchFailureOutcome();
+ final RpcResult<Void> barrierOutput = RpcResultBuilder.<Void>success().build();
+ final Pair<RpcResult<AddFlowsBatchOutput>, RpcResult<Void>> input = Pair.of(addFlowBatchOutput, barrierOutput);
+ final RpcResult<AddFlowsBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertFalse(composite.isSuccessful());
+ Assert.assertEquals(1, composite.getErrors().size());
+ Assert.assertEquals(1, composite.getResult().getBatchFailedFlowsOutput().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_success_failure() throws Exception {
+ final Function<Pair<RpcResult<AddFlowsBatchOutput>, RpcResult<Void>>, RpcResult<AddFlowsBatchOutput>> compositeFunction =
+ FlowUtil.createComposingFunction();
+
+ final RpcResult<AddFlowsBatchOutput> addFlowBatchOutput = createAddFlowsBatchSuccessOutput();
+ final RpcResult<Void> barrierOutput = createBarrierFailureOutcome();
+ final Pair<RpcResult<AddFlowsBatchOutput>, RpcResult<Void>> input = Pair.of(addFlowBatchOutput, barrierOutput);
+ final RpcResult<AddFlowsBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertFalse(composite.isSuccessful());
+ Assert.assertEquals(1, composite.getErrors().size());
+ Assert.assertEquals(0, composite.getResult().getBatchFailedFlowsOutput().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_failure_failure() throws Exception {
+ final Function<Pair<RpcResult<AddFlowsBatchOutput>, RpcResult<Void>>, RpcResult<AddFlowsBatchOutput>> compositeFunction =
+ FlowUtil.createComposingFunction();
+
+ final RpcResult<AddFlowsBatchOutput> addFlowBatchOutput = createAddFlowsBatchFailureOutcome();
+ final RpcResult<Void> barrierOutput = createBarrierFailureOutcome();
+ final Pair<RpcResult<AddFlowsBatchOutput>, RpcResult<Void>> input = Pair.of(addFlowBatchOutput, barrierOutput);
+ final RpcResult<AddFlowsBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertFalse(composite.isSuccessful());
+ Assert.assertEquals(2, composite.getErrors().size());
+ Assert.assertEquals(1, composite.getResult().getBatchFailedFlowsOutput().size());
+ }
+
+ private RpcResult<Void> createBarrierFailureOutcome() {
+ return RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-barrier-error")
+ .build();
+ }
+
+ private RpcResult<AddFlowsBatchOutput> createAddFlowsBatchSuccessOutput() {
+ return RpcResultBuilder
+ .success(new AddFlowsBatchOutputBuilder()
+ .setBatchFailedFlowsOutput(Collections.<BatchFailedFlowsOutput>emptyList())
+ .build())
+ .build();
+ }
+
+ private RpcResult<AddFlowsBatchOutput> createAddFlowsBatchFailureOutcome() {
+ final RpcResult<List<BatchFailedFlowsOutput>> batchOutcomeWithError = createBatchOutcomeWithError();
+ return RpcResultBuilder.<AddFlowsBatchOutput>failed()
+ .withResult(new AddFlowsBatchOutputBuilder()
+ .setBatchFailedFlowsOutput(batchOutcomeWithError.getResult())
+ .build())
+ .withRpcErrors(batchOutcomeWithError.getErrors())
+ .build();
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Lists;
+import java.util.Collections;
+import java.util.List;
+import org.apache.commons.lang3.tuple.Pair;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.AddGroupsBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.BatchGroupOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.output.list.grouping.BatchFailedGroupsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groups.service.rev160315.batch.group.output.list.grouping.BatchFailedGroupsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * Test for {@link GroupUtil}.
+ */
+public class GroupUtilTest {
+
+ public static final NodeId DUMMY_NODE_ID = new NodeId("dummyNodeId");
+ private static final GroupId DUMMY_GROUP_ID = new GroupId(42L);
+ private static final GroupId DUMMY_GROUP_ID_2 = new GroupId(43L);
+
+ @Test
+ public void testBuildGroupPath() throws Exception {
+ final InstanceIdentifier<Node> nodePath = InstanceIdentifier
+ .create(Nodes.class)
+ .child(Node.class, new NodeKey(DUMMY_NODE_ID));
+
+ final GroupRef groupRef = GroupUtil.buildGroupPath(nodePath, DUMMY_GROUP_ID);
+ final InstanceIdentifier<?> groupRefValue = groupRef.getValue();
+ Assert.assertEquals(DUMMY_NODE_ID, groupRefValue.firstKeyOf(Node.class).getId());
+ Assert.assertEquals(DUMMY_GROUP_ID, groupRefValue.firstKeyOf(Group.class).getGroupId());
+ }
+
+ @Test
+ public void testCreateCumulatingFunction() throws Exception {
+ final Function<List<RpcResult<String>>, RpcResult<List<BatchFailedGroupsOutput>>> function =
+ GroupUtil.createCumulatingFunction(Lists.newArrayList(createBatchGroup(DUMMY_GROUP_ID),
+ createBatchGroup(DUMMY_GROUP_ID_2)));
+
+ final RpcResult<List<BatchFailedGroupsOutput>> summary = function.apply(Lists.newArrayList(
+ RpcResultBuilder.success("a").build(),
+ RpcResultBuilder.<String>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "action-failed reason")
+ .build()));
+
+ Assert.assertFalse(summary.isSuccessful());
+ Assert.assertEquals(1, summary.getResult().size());
+ Assert.assertEquals(1, summary.getErrors().size());
+ Assert.assertEquals(DUMMY_GROUP_ID_2, summary.getResult().get(0).getGroupId());
+ Assert.assertEquals(1, summary.getResult().get(0).getBatchOrder().intValue());
+ }
+
+ protected Group createBatchGroup(final GroupId groupId) {
+ return new GroupBuilder().setGroupId(groupId).build();
+ }
+
+ @Test
+ public void testGROUP_ADD_TRANSFORM__failure() throws Exception {
+ final RpcResult<List<BatchFailedGroupsOutput>> input = createBatchOutcomeWithError();
+ checkBatchErrorOutcomeTransformation(GroupUtil.GROUP_ADD_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testGROUP_ADD_TRANSFORM__success() throws Exception {
+ final RpcResult<List<BatchFailedGroupsOutput>> input = createEmptyBatchOutcome();
+ checkBatchSuccessOutcomeTransformation(GroupUtil.GROUP_ADD_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testGROUP_REMOVE_TRANSFORM__failure() throws Exception {
+ final RpcResult<List<BatchFailedGroupsOutput>> input = createBatchOutcomeWithError();
+ checkBatchErrorOutcomeTransformation(GroupUtil.GROUP_REMOVE_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_REMOVE_TRANSFORM__success() throws Exception {
+ final RpcResult<List<BatchFailedGroupsOutput>> input = createEmptyBatchOutcome();
+ checkBatchSuccessOutcomeTransformation(GroupUtil.GROUP_REMOVE_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_UPDATE_TRANSFORM__failure() throws Exception {
+ final RpcResult<List<BatchFailedGroupsOutput>> input = createBatchOutcomeWithError();
+ checkBatchErrorOutcomeTransformation(GroupUtil.GROUP_UPDATE_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_UPDATE_TRANSFORM__success() throws Exception {
+ final RpcResult<List<BatchFailedGroupsOutput>> input = createEmptyBatchOutcome();
+ checkBatchSuccessOutcomeTransformation(GroupUtil.GROUP_UPDATE_TRANSFORM.apply(input));
+ }
+
+ private <T extends BatchGroupOutputListGrouping> void checkBatchSuccessOutcomeTransformation(final RpcResult<T> output) {
+ Assert.assertTrue(output.isSuccessful());
+ Assert.assertEquals(0, output.getResult().getBatchFailedGroupsOutput().size());
+ Assert.assertEquals(0, output.getErrors().size());
+ }
+
+ private RpcResult<List<BatchFailedGroupsOutput>> createEmptyBatchOutcome() {
+ return RpcResultBuilder
+ .<List<BatchFailedGroupsOutput>>success(Collections.<BatchFailedGroupsOutput>emptyList())
+ .build();
+ }
+
+ private RpcResult<List<BatchFailedGroupsOutput>> createBatchOutcomeWithError() {
+ return RpcResultBuilder.<List<BatchFailedGroupsOutput>>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-flowAddFail")
+ .withResult(Collections.singletonList(new BatchFailedGroupsOutputBuilder()
+ .setGroupId(DUMMY_GROUP_ID)
+ .build()))
+ .build();
+ }
+
+ private <T extends BatchGroupOutputListGrouping> void checkBatchErrorOutcomeTransformation(final RpcResult<T> output) {
+ Assert.assertFalse(output.isSuccessful());
+ Assert.assertEquals(1, output.getResult().getBatchFailedGroupsOutput().size());
+ Assert.assertEquals(DUMMY_GROUP_ID, output.getResult().getBatchFailedGroupsOutput().get(0).getGroupId());
+
+ Assert.assertEquals(1, output.getErrors().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_success_success() throws Exception {
+ final Function<Pair<RpcResult<AddGroupsBatchOutput>, RpcResult<Void>>, RpcResult<AddGroupsBatchOutput>> compositeFunction =
+ GroupUtil.createComposingFunction();
+
+ final RpcResult<AddGroupsBatchOutput> addGroupBatchOutput = createAddGroupsBatchSuccessOutput();
+ final RpcResult<Void> barrierOutput = RpcResultBuilder.<Void>success().build();
+ final Pair<RpcResult<AddGroupsBatchOutput>, RpcResult<Void>> input = Pair.of(addGroupBatchOutput, barrierOutput);
+ final RpcResult<AddGroupsBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertTrue(composite.isSuccessful());
+ Assert.assertEquals(0, composite.getErrors().size());
+ Assert.assertEquals(0, composite.getResult().getBatchFailedGroupsOutput().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_failure_success() throws Exception {
+ final Function<Pair<RpcResult<AddGroupsBatchOutput>, RpcResult<Void>>, RpcResult<AddGroupsBatchOutput>> compositeFunction =
+ GroupUtil.createComposingFunction();
+
+ final RpcResult<AddGroupsBatchOutput> addGroupBatchOutput = createAddGroupsBatchFailureOutcome();
+ final RpcResult<Void> barrierOutput = RpcResultBuilder.<Void>success().build();
+ final Pair<RpcResult<AddGroupsBatchOutput>, RpcResult<Void>> input = Pair.of(addGroupBatchOutput, barrierOutput);
+ final RpcResult<AddGroupsBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertFalse(composite.isSuccessful());
+ Assert.assertEquals(1, composite.getErrors().size());
+ Assert.assertEquals(1, composite.getResult().getBatchFailedGroupsOutput().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_success_failure() throws Exception {
+ final Function<Pair<RpcResult<AddGroupsBatchOutput>, RpcResult<Void>>, RpcResult<AddGroupsBatchOutput>> compositeFunction =
+ GroupUtil.createComposingFunction();
+
+ final RpcResult<AddGroupsBatchOutput> addGroupBatchOutput = createAddGroupsBatchSuccessOutput();
+ final RpcResult<Void> barrierOutput = createBarrierFailureOutcome();
+ final Pair<RpcResult<AddGroupsBatchOutput>, RpcResult<Void>> input = Pair.of(addGroupBatchOutput, barrierOutput);
+ final RpcResult<AddGroupsBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertFalse(composite.isSuccessful());
+ Assert.assertEquals(1, composite.getErrors().size());
+ Assert.assertEquals(0, composite.getResult().getBatchFailedGroupsOutput().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_failure_failure() throws Exception {
+ final Function<Pair<RpcResult<AddGroupsBatchOutput>, RpcResult<Void>>, RpcResult<AddGroupsBatchOutput>> compositeFunction =
+ GroupUtil.createComposingFunction();
+
+ final RpcResult<AddGroupsBatchOutput> addGroupBatchOutput = createAddGroupsBatchFailureOutcome();
+ final RpcResult<Void> barrierOutput = createBarrierFailureOutcome();
+ final Pair<RpcResult<AddGroupsBatchOutput>, RpcResult<Void>> input = Pair.of(addGroupBatchOutput, barrierOutput);
+ final RpcResult<AddGroupsBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertFalse(composite.isSuccessful());
+ Assert.assertEquals(2, composite.getErrors().size());
+ Assert.assertEquals(1, composite.getResult().getBatchFailedGroupsOutput().size());
+ }
+
+ private RpcResult<Void> createBarrierFailureOutcome() {
+ return RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-barrier-error")
+ .build();
+ }
+
+ private RpcResult<AddGroupsBatchOutput> createAddGroupsBatchSuccessOutput() {
+ return RpcResultBuilder
+ .success(new AddGroupsBatchOutputBuilder()
+ .setBatchFailedGroupsOutput(Collections.<BatchFailedGroupsOutput>emptyList())
+ .build())
+ .build();
+ }
+
+ private RpcResult<AddGroupsBatchOutput> createAddGroupsBatchFailureOutcome() {
+ final RpcResult<List<BatchFailedGroupsOutput>> batchOutcomeWithError = createBatchOutcomeWithError();
+ return RpcResultBuilder.<AddGroupsBatchOutput>failed()
+ .withResult(new AddGroupsBatchOutputBuilder()
+ .setBatchFailedGroupsOutput(batchOutcomeWithError.getResult())
+ .build())
+ .withRpcErrors(batchOutcomeWithError.getErrors())
+ .build();
+ }
+}
\ No newline at end of file
* Number of currently registrated services (can be changed) in {@link MdSalRegistrationUtils#registerServices
* (RpcContext, DeviceContext)}
*/
- private static final int NUMBER_OF_RPC_SERVICE_REGISTRATION = 11;
+ private static final int NUMBER_OF_RPC_SERVICE_REGISTRATION = 12;
@Test
public void registerServiceTest() {
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Lists;
+import java.util.Collections;
+import java.util.List;
+import org.apache.commons.lang3.tuple.Pair;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.AddMetersBatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.BatchMeterOutputListGrouping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.output.list.grouping.BatchFailedMetersOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meters.service.rev160316.batch.meter.output.list.grouping.BatchFailedMetersOutputBuilder;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * Test for {@link MeterUtil}.
+ */
+public class MeterUtilTest {
+
+ public static final NodeId DUMMY_NODE_ID = new NodeId("dummyNodeId");
+ private static final MeterId DUMMY_METER_ID = new MeterId(42L);
+ private static final MeterId DUMMY_METER_ID_2 = new MeterId(43L);
+
+ @Test
+ public void testBuildGroupPath() throws Exception {
+ final InstanceIdentifier<Node> nodePath = InstanceIdentifier
+ .create(Nodes.class)
+ .child(Node.class, new NodeKey(DUMMY_NODE_ID));
+
+ final MeterRef meterRef = MeterUtil.buildMeterPath(nodePath, DUMMY_METER_ID);
+ final InstanceIdentifier<?> meterRefValue = meterRef.getValue();
+ Assert.assertEquals(DUMMY_NODE_ID, meterRefValue.firstKeyOf(Node.class).getId());
+ Assert.assertEquals(DUMMY_METER_ID, meterRefValue.firstKeyOf(Meter.class).getMeterId());
+ }
+
+ @Test
+ public void testCreateCumulatingFunction() throws Exception {
+ final Function<List<RpcResult<String>>, RpcResult<List<BatchFailedMetersOutput>>> function =
+ MeterUtil.createCumulativeFunction(Lists.newArrayList(
+ createBatchMeter(DUMMY_METER_ID),
+ createBatchMeter(DUMMY_METER_ID_2)));
+
+ final RpcResult<List<BatchFailedMetersOutput>> output = function.apply(Lists.newArrayList(
+ RpcResultBuilder.success("a").build(),
+ RpcResultBuilder.<String>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-meter-error")
+ .build()));
+
+ Assert.assertFalse(output.isSuccessful());
+ Assert.assertEquals(1, output.getResult().size());
+ Assert.assertEquals(DUMMY_METER_ID_2, output.getResult().get(0).getMeterId());
+ Assert.assertEquals(1, output.getResult().get(0).getBatchOrder().intValue());
+ }
+
+ private org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter createBatchMeter(final MeterId meterId) {
+ return new MeterBuilder()
+ .setMeterId(meterId)
+ .build();
+ }
+
+ @Test
+ public void testMETER_ADD_TRANSFORM__failure() throws Exception {
+ final RpcResult<List<BatchFailedMetersOutput>> input = createBatchOutcomeWithError();
+ checkBatchErrorOutcomeTransformation(MeterUtil.METER_ADD_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testMETER_ADD_TRANSFORM__success() throws Exception {
+ final RpcResult<List<BatchFailedMetersOutput>> input = createEmptyBatchOutcome();
+ checkBatchSuccessOutcomeTransformation(MeterUtil.METER_ADD_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testMETER_REMOVE_TRANSFORM__failure() throws Exception {
+ final RpcResult<List<BatchFailedMetersOutput>> input = createBatchOutcomeWithError();
+ checkBatchErrorOutcomeTransformation(MeterUtil.METER_REMOVE_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_REMOVE_TRANSFORM__success() throws Exception {
+ final RpcResult<List<BatchFailedMetersOutput>> input = createEmptyBatchOutcome();
+ checkBatchSuccessOutcomeTransformation(MeterUtil.METER_REMOVE_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_UPDATE_TRANSFORM__failure() throws Exception {
+ final RpcResult<List<BatchFailedMetersOutput>> input = createBatchOutcomeWithError();
+ checkBatchErrorOutcomeTransformation(MeterUtil.METER_UPDATE_TRANSFORM.apply(input));
+ }
+
+ @Test
+ public void testFLOW_UPDATE_TRANSFORM__success() throws Exception {
+ final RpcResult<List<BatchFailedMetersOutput>> input = createEmptyBatchOutcome();
+ checkBatchSuccessOutcomeTransformation(MeterUtil.METER_UPDATE_TRANSFORM.apply(input));
+ }
+
+ private <T extends BatchMeterOutputListGrouping> void checkBatchSuccessOutcomeTransformation(final RpcResult<T> output) {
+ Assert.assertTrue(output.isSuccessful());
+ Assert.assertEquals(0, output.getResult().getBatchFailedMetersOutput().size());
+ Assert.assertEquals(0, output.getErrors().size());
+ }
+
+ private RpcResult<List<BatchFailedMetersOutput>> createEmptyBatchOutcome() {
+ return RpcResultBuilder
+ .<List<BatchFailedMetersOutput>>success(Collections.<BatchFailedMetersOutput>emptyList())
+ .build();
+ }
+
+ private RpcResult<List<BatchFailedMetersOutput>> createBatchOutcomeWithError() {
+ return RpcResultBuilder.<List<BatchFailedMetersOutput>>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-flowAddFail")
+ .withResult(Collections.singletonList(new BatchFailedMetersOutputBuilder()
+ .setMeterId(DUMMY_METER_ID)
+ .build()))
+ .build();
+ }
+
+ private <T extends BatchMeterOutputListGrouping> void checkBatchErrorOutcomeTransformation(final RpcResult<T> output) {
+ Assert.assertFalse(output.isSuccessful());
+ Assert.assertEquals(1, output.getResult().getBatchFailedMetersOutput().size());
+ Assert.assertEquals(DUMMY_METER_ID, output.getResult().getBatchFailedMetersOutput().get(0).getMeterId());
+
+ Assert.assertEquals(1, output.getErrors().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_success_success() throws Exception {
+ final Function<Pair<RpcResult<AddMetersBatchOutput>, RpcResult<Void>>, RpcResult<AddMetersBatchOutput>> compositeFunction =
+ MeterUtil.createComposingFunction();
+
+ final RpcResult<AddMetersBatchOutput> addGroupBatchOutput = createAddMetersBatchSuccessOutput();
+ final RpcResult<Void> barrierOutput = RpcResultBuilder.<Void>success().build();
+ final Pair<RpcResult<AddMetersBatchOutput>, RpcResult<Void>> input = Pair.of(addGroupBatchOutput, barrierOutput);
+ final RpcResult<AddMetersBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertTrue(composite.isSuccessful());
+ Assert.assertEquals(0, composite.getErrors().size());
+ Assert.assertEquals(0, composite.getResult().getBatchFailedMetersOutput().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_failure_success() throws Exception {
+ final Function<Pair<RpcResult<AddMetersBatchOutput>, RpcResult<Void>>, RpcResult<AddMetersBatchOutput>> compositeFunction =
+ MeterUtil.createComposingFunction();
+
+ final RpcResult<AddMetersBatchOutput> addGroupBatchOutput = createAddMetersBatchFailureOutcome();
+ final RpcResult<Void> barrierOutput = RpcResultBuilder.<Void>success().build();
+ final Pair<RpcResult<AddMetersBatchOutput>, RpcResult<Void>> input = Pair.of(addGroupBatchOutput, barrierOutput);
+ final RpcResult<AddMetersBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertFalse(composite.isSuccessful());
+ Assert.assertEquals(1, composite.getErrors().size());
+ Assert.assertEquals(1, composite.getResult().getBatchFailedMetersOutput().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_success_failure() throws Exception {
+ final Function<Pair<RpcResult<AddMetersBatchOutput>, RpcResult<Void>>, RpcResult<AddMetersBatchOutput>> compositeFunction =
+ MeterUtil.createComposingFunction();
+
+ final RpcResult<AddMetersBatchOutput> addGroupBatchOutput = createAddMetersBatchSuccessOutput();
+ final RpcResult<Void> barrierOutput = createBarrierFailureOutcome();
+ final Pair<RpcResult<AddMetersBatchOutput>, RpcResult<Void>> input = Pair.of(addGroupBatchOutput, barrierOutput);
+ final RpcResult<AddMetersBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertFalse(composite.isSuccessful());
+ Assert.assertEquals(1, composite.getErrors().size());
+ Assert.assertEquals(0, composite.getResult().getBatchFailedMetersOutput().size());
+ }
+
+ @Test
+ public void testCreateComposingFunction_failure_failure() throws Exception {
+ final Function<Pair<RpcResult<AddMetersBatchOutput>, RpcResult<Void>>, RpcResult<AddMetersBatchOutput>> compositeFunction =
+ MeterUtil.createComposingFunction();
+
+ final RpcResult<AddMetersBatchOutput> addGroupBatchOutput = createAddMetersBatchFailureOutcome();
+ final RpcResult<Void> barrierOutput = createBarrierFailureOutcome();
+ final Pair<RpcResult<AddMetersBatchOutput>, RpcResult<Void>> input = Pair.of(addGroupBatchOutput, barrierOutput);
+ final RpcResult<AddMetersBatchOutput> composite = compositeFunction.apply(input);
+
+ Assert.assertFalse(composite.isSuccessful());
+ Assert.assertEquals(2, composite.getErrors().size());
+ Assert.assertEquals(1, composite.getResult().getBatchFailedMetersOutput().size());
+ }
+
+ private RpcResult<Void> createBarrierFailureOutcome() {
+ return RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "ut-barrier-error")
+ .build();
+ }
+
+ private RpcResult<AddMetersBatchOutput> createAddMetersBatchSuccessOutput() {
+ return RpcResultBuilder
+ .success(new AddMetersBatchOutputBuilder()
+ .setBatchFailedMetersOutput(Collections.<BatchFailedMetersOutput>emptyList())
+ .build())
+ .build();
+ }
+
+ private RpcResult<AddMetersBatchOutput> createAddMetersBatchFailureOutcome() {
+ final RpcResult<List<BatchFailedMetersOutput>> batchOutcomeWithError = createBatchOutcomeWithError();
+ return RpcResultBuilder.<AddMetersBatchOutput>failed()
+ .withResult(new AddMetersBatchOutputBuilder()
+ .setBatchFailedMetersOutput(batchOutcomeWithError.getResult())
+ .build())
+ .withRpcErrors(batchOutcomeWithError.getErrors())
+ .build();
+ }
+}
\ No newline at end of file
package org.opendaylight.openflowplugin.impl.util;
+import com.google.common.collect.Lists;
import junit.framework.TestCase;
import org.junit.Before;
-import org.junit.Rule;
import org.junit.Test;
-import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.PacketInReason;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.PortNumber;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.TableId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.InPort;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.OpenflowBasicClass;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.match.entries.grouping.MatchEntryBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.match.entry.value.grouping.match.entry.value.InPortCaseBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.match.entry.value.grouping.match.entry.value.in.port._case.InPortBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.match.grouping.MatchBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.GetFeaturesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.PacketIn;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.PacketInMessageBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.features.reply.PhyPort;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.features.reply.PhyPortBuilder;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import static org.mockito.Mockito.*;
@RunWith(MockitoJUnitRunner.class)
public class NodeConnectorRefToPortTranslatorTest extends TestCase {
- @Mock
- DeviceState deviceState;
- @Mock
- KeyedInstanceIdentifier<Node, NodeKey> nodeInstanceIdentifier;
- @Mock
- GetFeaturesOutput getFeaturesOutput;
- @Mock
- List<PhyPort> phyPorts;
- @Mock
- Iterator<PhyPort> phyPortsIterator;
- @Mock
- PhyPort phyPort;
- @Mock
- PhyPort phyPort2;
-
-
- @Mock
- DeviceState secondDeviceState;
- @Mock
- GetFeaturesOutput secondGetFeaturesOutput;
- @Mock
- PhyPort secondPhyPort;
-
+ static final String PACKET_DATA = "Test_Data";
static final Long PORT_NO = 5l;
static final Long SECOND_PORT_NO = 6l;
- static final String ID_VALUE = "openflow:10";
+ static final BigInteger DATA_PATH_ID = BigInteger.TEN;
+ static final short OF_VERSION = OFConstants.OFP_VERSION_1_3;
+ static final String ID_VALUE = "openflow:" + DATA_PATH_ID;
+ static final Long TABLE_ID = 42L;
+
+ private static PacketIn createPacketIn(long portNo) {
+ InPortBuilder inPortBuilder = new InPortBuilder()
+ .setPortNumber(new PortNumber(portNo));
+
+ InPortCaseBuilder caseBuilder = new InPortCaseBuilder()
+ .setInPort(inPortBuilder.build());
+
+ MatchEntryBuilder matchEntryBuilder = new MatchEntryBuilder()
+ .setOxmClass(OpenflowBasicClass.class)
+ .setOxmMatchField(InPort.class)
+ .setHasMask(false)
+ .setMatchEntryValue(caseBuilder.build());
+
+ MatchBuilder matchBuilder = new MatchBuilder()
+ .setMatchEntry(Lists.newArrayList(matchEntryBuilder.build()));
+
+ return new PacketInMessageBuilder()
+ .setVersion(OFConstants.OFP_VERSION_1_0)
+ .setData(PACKET_DATA.getBytes())
+ .setReason(PacketInReason.OFPRACTION)
+ .setMatch(matchBuilder.build())
+ .setVersion(OFConstants.OFP_VERSION_1_3)
+ .setCookie(BigInteger.ZERO)
+ .setTableId(new TableId(TABLE_ID))
+ .build();
+ }
@Before
public void setUp() throws Exception {
- // Create nodePath (we cannot mock it in regular way because KeyedInstanceIdentifier.getKey() is final)
- final KeyedInstanceIdentifier<Node, NodeKey> nodePath = KeyedInstanceIdentifier
- .create(Nodes.class)
- .child(Node.class, new NodeKey(new NodeId(ID_VALUE)));
-
- // Mock first device state
- final List<PhyPort> phyPorts = Arrays.asList(null, phyPort2, phyPort);
- when(deviceState.getVersion()).thenReturn(OFConstants.OFP_VERSION_1_3);
- when(deviceState.getNodeInstanceIdentifier()).thenReturn(nodePath);
- when(deviceState.getFeatures()).thenReturn(getFeaturesOutput);
- when(getFeaturesOutput.getDatapathId()).thenReturn(BigInteger.TEN);
- when(getFeaturesOutput.getPhyPort()).thenReturn(phyPorts);
- when(phyPort.getPortNo()).thenReturn(PORT_NO);
- when(phyPort2.getPortNo()).thenReturn(null);
-
- // Mock second device state
- final List<PhyPort> secondPhyPorts = Arrays.asList(phyPort);
- when(secondDeviceState.getVersion()).thenReturn(OFConstants.OFP_VERSION_1_3);
- when(secondDeviceState.getNodeInstanceIdentifier()).thenReturn(nodePath);
- when(secondDeviceState.getFeatures()).thenReturn(secondGetFeaturesOutput);
- when(secondGetFeaturesOutput.getDatapathId()).thenReturn(BigInteger.TEN);
- when(secondGetFeaturesOutput.getPhyPort()).thenReturn(secondPhyPorts);
- when(secondPhyPort.getPortNo()).thenReturn(SECOND_PORT_NO);
-
-
+ // Initialize the OpenFlow version/port map
OpenflowPortsUtil.init();
}
@Test(expected = NullPointerException.class)
- public void testForNotNullableDeviceStateInGetPortNo() throws Exception {
- NodeConnectorRefToPortTranslator.getPortNoFromDeviceState(null);
+ public void testForNotNullablePacketInInGetPortNo() throws Exception {
+ NodeConnectorRefToPortTranslator.getPortNoFromPacketIn(null);
}
@Test(expected = NullPointerException.class)
- public void testForNotNullableDeviceStateInToNodeConnectorRef() throws Exception {
- NodeConnectorRefToPortTranslator.toNodeConnectorRef(null);
+ public void testForNotNullablePacketInInToNodeConnectorRef() throws Exception {
+ NodeConnectorRefToPortTranslator.toNodeConnectorRef(null, DATA_PATH_ID);
}
@Test(expected = NullPointerException.class)
- public void testForNotNullableDeviceStateInFromNodeConnectorRef() throws Exception {
- NodeConnectorRefToPortTranslator.fromNodeConnectorRef(null, null);
+ public void testForNotNullableNodeConnectorRefInFromNodeConnectorRef() throws Exception {
+ NodeConnectorRefToPortTranslator.fromNodeConnectorRef(null, OF_VERSION);
}
@Test
- public void testGetPortNoFromDeviceState() throws Exception {
- Long portNo = NodeConnectorRefToPortTranslator.getPortNoFromDeviceState(deviceState);
+ public void testGetPortNoFromPacketIn() throws Exception {
+ PacketIn packetIn = createPacketIn(PORT_NO);
+ Long portNo = NodeConnectorRefToPortTranslator.getPortNoFromPacketIn(packetIn);
assertEquals(portNo, PORT_NO);
}
@Test
public void testNodeConnectorConversion() throws Exception {
- // Convert DeviceState to NodeConnectorRef
- NodeConnectorRef ref = NodeConnectorRefToPortTranslator.toNodeConnectorRef(deviceState);
+ // Mock the packet in message
+ PacketIn packetIn = createPacketIn(PORT_NO);
- // Test getting port from NodeConnectorRef
- Long refPort = NodeConnectorRefToPortTranslator.fromNodeConnectorRef(deviceState, ref);
- assertEquals(PORT_NO, refPort);
+ // Convert PacketIn to NodeConnectorRef
+ NodeConnectorRef ref = NodeConnectorRefToPortTranslator.toNodeConnectorRef(packetIn, DATA_PATH_ID);
- // Test for getting same port, even when we used different DeviceState as fallback
- Long secondPort = NodeConnectorRefToPortTranslator.fromNodeConnectorRef(secondDeviceState, ref);
- assertEquals(refPort, secondPort);
+ // Get port number from created NodeConnectorRef
+ Long refPort = NodeConnectorRefToPortTranslator.fromNodeConnectorRef(ref, OF_VERSION);
- // Test fallback to device state if there is any problem with NodeConnectorRef
- refPort = NodeConnectorRefToPortTranslator.fromNodeConnectorRef(deviceState, null);
+ // Check if we got the correct port number
assertEquals(PORT_NO, refPort);
- // Check if 2 NodeConnectorRef created from same DeviceState have same value
- assertEquals(ref, NodeConnectorRefToPortTranslator.toNodeConnectorRef(deviceState));
+ // Check if 2 NodeConnectorRef created from same PacketIn have same value
+ assertEquals(ref, NodeConnectorRefToPortTranslator.toNodeConnectorRef(packetIn, DATA_PATH_ID));
+
+ // Check if 2 NodeConnectorRef created from same PacketIn but different datapaths do not have same value
+ assertNotSame(ref, NodeConnectorRefToPortTranslator.toNodeConnectorRef(packetIn, BigInteger.ONE));
}
}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * Test for {@link PathUtil}.
+ */
+public class PathUtilTest {
+
+ public static final NodeId NODE_ID = new NodeId("ut-dummy-node");
+ public static final NodeKey NODE_KEY = new NodeKey(NODE_ID);
+ public static final NodeRef NODE_REF = new NodeRef(InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY));
+
+ @Test
+ public void testExtractNodeId() throws Exception {
+ Assert.assertEquals(NODE_ID, PathUtil.extractNodeId(NODE_REF));
+ }
+}
\ No newline at end of file
.put(OutputPortValues.ALL.toString(), Long.valueOf(PortNumberValuesV10.ALL.getIntValue())) //0xfffc
.put(OutputPortValues.CONTROLLER.toString(), Long.valueOf(PortNumberValuesV10.CONTROLLER.getIntValue())) //0xfffd
.put(OutputPortValues.LOCAL.toString(), Long.valueOf(PortNumberValuesV10.LOCAL.getIntValue())) //0xfffe
- .put(OutputPortValues.NONE.toString(), Long.valueOf(PortNumberValuesV10.NONE.getIntValue())) //0xfffe
+ .put(OutputPortValues.NONE.toString(), Long.valueOf(PortNumberValuesV10.NONE.getIntValue())) //0xffff
.build();
// openflow 1.3 reserved ports.
<properties>
<project.build.sourceEncoding>utf-8</project.build.sourceEncoding>
- <nexusproxy>http://nexus.opendaylight.org/content</nexusproxy>
<openflowjava.version>0.8.0-SNAPSHOT</openflowjava.version>
<openflowplugin.version>0.3.0-SNAPSHOT</openflowplugin.version>
<sal.api.version>0.11.0-SNAPSHOT</sal.api.version>
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.ActionBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.InstructionBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.InstructionKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.feature.prop.type.table.feature.prop.type.wildcards.WildcardSetfieldBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.table.features.TablePropertiesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.table.features.table.properties.TableFeatureProperties;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.table.features.table.properties.TableFeaturePropertiesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.table.features.table.properties.TableFeaturePropertiesKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import org.osgi.framework.BundleContext;
private DataBroker dataBroker;
private ProviderContext pc;
private final BundleContext ctx;
- private Table testTable;
+ private TableFeatures testTable;
private Node testNode;
private final String originalTableName = "Foo";
private final String updatedTableName = "Bar";
dataBroker = session.getSALService(DataBroker.class);
ctx.registerService(CommandProvider.class.getName(), this, null);
// createTestNode();
- // createTestTable();
+ // createTestTableFeatures();
}
private void createUserNode(String nodeRef) {
return InstanceIdentifier.create(Nodes.class).child(Node.class, node.getKey());
}
- private TableBuilder createTestTable(String tableFeatureTypeArg) {
+ private TableFeaturesBuilder createTestTableFeatures(String tableFeatureTypeArg) {
String tableFeatureType = tableFeatureTypeArg;
if (tableFeatureType == null) {
tableFeatureType = "t1";
}
- // Sample data , committing to DataStore
- short id = 12;
- TableKey key = new TableKey(id);
-
- TableBuilder table = new TableBuilder();
- table.setId((short) 12);
- table.setId(id);
- table.setKey(key);
-
- List<TableFeatures> ofTablefeatures = new ArrayList<TableFeatures>();
-
- // Skip this to send empty table features
+ final TableFeaturesBuilder tableFeature = new TableFeaturesBuilder();
+ // Sample data , committing to DataStore
if (!tableFeatureType.equals("t1")) {
- TableFeaturesBuilder tableFeature1 = new TableFeaturesBuilder();
- tableFeature1.setTableId((short) 0);
- tableFeature1.setName("Table 0");
+ tableFeature.setTableId((short) 0);
+ tableFeature.setName("Table 0");
- tableFeature1.setMetadataMatch(BigInteger.valueOf(10));
- tableFeature1.setMetadataWrite(BigInteger.valueOf(10));
- tableFeature1.setMaxEntries(10000L);
+ tableFeature.setMetadataMatch(BigInteger.valueOf(10));
+ tableFeature.setMetadataWrite(BigInteger.valueOf(10));
+ tableFeature.setMaxEntries(10000L);
- tableFeature1.setConfig(new TableConfig(false));
+ tableFeature.setConfig(new TableConfig(false));
List<TableFeatureProperties> properties = new ArrayList<TableFeatureProperties>();
TablePropertiesBuilder propertyBld = new TablePropertiesBuilder();
propertyBld.setTableFeatureProperties(properties);
- tableFeature1.setTableProperties(propertyBld.build());
-
-
- ofTablefeatures.add(tableFeature1.build());
-
+ tableFeature.setTableProperties(propertyBld.build());
}
- table.setTableFeatures(ofTablefeatures);
-
- testTable = table.build();
- return table;
+ testTable = tableFeature.build();
+ return tableFeature;
}
private TableFeaturePropertiesBuilder createApplyActionsMissTblFeatureProp() {
}
- private void writeTable(final CommandInterpreter ci, Table table) {
+ private void writeTableFeatures(final CommandInterpreter ci, TableFeatures tableFeatures) {
ReadWriteTransaction modification = dataBroker.newReadWriteTransaction();
- InstanceIdentifier<Table> path1 = InstanceIdentifier.create(Nodes.class)
+ KeyedInstanceIdentifier<TableFeatures, TableFeaturesKey> path1 = InstanceIdentifier.create(Nodes.class)
.child(Node.class, testNode.getKey()).augmentation(FlowCapableNode.class).
- child(Table.class, new TableKey(table.getId()));
+ child(TableFeatures.class, new TableFeaturesKey(tableFeatures.getTableId()));
modification.merge(LogicalDatastoreType.OPERATIONAL, nodeToInstanceId(testNode), testNode, true);
- modification.merge(LogicalDatastoreType.OPERATIONAL, path1, table, true);
+ modification.merge(LogicalDatastoreType.OPERATIONAL, path1, tableFeatures, true);
modification.merge(LogicalDatastoreType.CONFIGURATION, nodeToInstanceId(testNode), testNode, true);
- modification.merge(LogicalDatastoreType.CONFIGURATION, path1, table, true);
+ modification.merge(LogicalDatastoreType.CONFIGURATION, path1, tableFeatures, true);
CheckedFuture<Void, TransactionCommitFailedException> commitFuture = modification.submit();
Futures.addCallback(commitFuture, new FutureCallback<Void>() {
@Override
createUserNode(nref);
}
String tableFeatureType = ci.nextArgument();
- TableBuilder table = createTestTable(tableFeatureType);
+ TableFeaturesBuilder tableFeaturesBld = createTestTableFeatures(tableFeatureType);
- writeTable(ci, table.build());
+ writeTableFeatures(ci, tableFeaturesBld.build());
}
@Override