<?xml version="1.0"?>
-<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
- xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.openflowplugin</groupId>
- <artifactId>applications</artifactId>
- <version>0.5.0-SNAPSHOT</version>
- </parent>
- <groupId>org.opendaylight.openflowplugin.applications</groupId>
- <artifactId>bulk-o-matic</artifactId>
- <packaging>bundle</packaging>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.openflowplugin</groupId>
- <artifactId>openflowplugin-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-broker-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.openflowplugin.model</groupId>
- <artifactId>model-flow-base</artifactId>
- </dependency>
+<project
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+ xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>applications</artifactId>
+ <version>0.5.0-SNAPSHOT</version>
+ </parent>
+ <groupId>org.opendaylight.openflowplugin.applications</groupId>
+ <artifactId>bulk-o-matic</artifactId>
+ <packaging>bundle</packaging>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>openflowplugin-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-broker-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-inventory</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin.model</groupId>
+ <artifactId>model-flow-base</artifactId>
+ </dependency>
<!-- Test dependencies -->
- <dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-core</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- </dependencies>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-maven-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ <configuration>
+ <propertyExpansion>checkstyle.violationSeverity=warning</propertyExpansion>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
</project>
/*
- * Copyright (c) 2016 Ericsson Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Ericsson Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
private BulkOMaticUtils() {
}
- public static String ipIntToStr (int k) {
- return new StringBuilder().append(k >> 24 & 0xFF).append(".")
- .append(k >> 16 & 0xFF).append(".")
- .append(k >> 8 & 0xFF).append(".")
- .append(k & 0xFF).append("/32").toString();
+ public static String ipIntToStr(int number) {
+ return new StringBuilder().append(number >> 24 & 0xFF).append(".")
+ .append(number >> 16 & 0xFF).append(".")
+ .append(number >> 8 & 0xFF).append(".")
+ .append(number & 0xFF).append("/32").toString();
}
- public static Match getMatch(final Integer sourceIp){
- Ipv4Match ipv4Match = new Ipv4MatchBuilder().setIpv4Source(
- new Ipv4Prefix(ipIntToStr(sourceIp))).build();
+ public static Match getMatch(final Integer sourceIp) {
+ Ipv4Match ipv4Match = new Ipv4MatchBuilder().setIpv4Source(new Ipv4Prefix(ipIntToStr(sourceIp))).build();
MatchBuilder matchBuilder = new MatchBuilder();
matchBuilder.setLayer3Match(ipv4Match);
EthernetTypeBuilder ethTypeBuilder = new EthernetTypeBuilder();
return matchBuilder.build();
}
- public static Flow buildFlow(Short tableId, String flowId, Match match){
+ public static Flow buildFlow(Short tableId, String flowId, Match match) {
FlowBuilder flowBuilder = new FlowBuilder();
flowBuilder.setKey(new FlowKey(new FlowId(flowId)));
flowBuilder.setTableId(tableId);
}
public static InstanceIdentifier<Flow> getFlowInstanceIdentifier(Short tableId, String flowId, String dpId) {
- return InstanceIdentifier.create(Nodes.class).child(Node.class,
- new NodeKey(new NodeId(dpId)))
- .augmentation(FlowCapableNode.class)
- .child(Table.class, new TableKey(tableId))
+ return InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(new NodeId(dpId)))
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(tableId))
.child(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow.class,
new FlowKey(new FlowId(flowId)));
}
- public static InstanceIdentifier<Node> getFlowCapableNodeId(String dpId){
- return InstanceIdentifier.builder(Nodes.class)
- .child(Node.class, new NodeKey(new NodeId(dpId)))
- .build();
+ public static InstanceIdentifier<Node> getFlowCapableNodeId(String dpId) {
+ return InstanceIdentifier.builder(Nodes.class).child(Node.class, new NodeKey(new NodeId(dpId))).build();
}
public static InstanceIdentifier<Table> getTableId(Short tableId, String dpId) {
- return InstanceIdentifier.builder(Nodes.class)
- .child(Node.class, new NodeKey(new NodeId(dpId)))
- .augmentation(FlowCapableNode.class)
- .child(Table.class, new TableKey(tableId))
- .build();
+ return InstanceIdentifier.builder(Nodes.class).child(Node.class, new NodeKey(new NodeId(dpId)))
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(tableId)).build();
}
public static InstanceIdentifier<Flow> getFlowId(final InstanceIdentifier<Table> tablePath, final String flowId) {
/*
- * Copyright (c) 2016 Ericsson Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Ericsson Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
private FlowCounterMBean writer;
public enum OperationStatus {
- INIT (0),
- SUCCESS (2),
- FAILURE (-1),
- IN_PROGRESS (1);
+ INIT(0), SUCCESS(2), FAILURE(-1), IN_PROGRESS(1);
private final int status;
@Override
public long getFlowCount() {
- if(reader != null) {
+ if (reader != null) {
return reader.getFlowCount();
}
return BulkOMaticUtils.DEFAULT_FLOW_COUNT;
@Override
public int getReadOpStatus() {
- if(reader != null) {
+ if (reader != null) {
return reader.getReadOpStatus();
}
return OperationStatus.INIT.status();
@Override
public int getWriteOpStatus() {
- if(writer != null) {
+ if (writer != null) {
return writer.getWriteOpStatus();
}
return OperationStatus.INIT.status();
@Override
public long getTaskCompletionTime() {
- if(writer != null) {
+ if (writer != null) {
return writer.getTaskCompletionTime();
}
return BulkOMaticUtils.DEFAULT_COMPLETION_TIME;
/*
- * Copyright (c) 2016 Ericsson Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Ericsson Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
public interface FlowCounterMBean {
- default public long getFlowCount() {
+ default long getFlowCount() {
return BulkOMaticUtils.DEFAULT_FLOW_COUNT;
}
- default public int getReadOpStatus() {
+ default int getReadOpStatus() {
return BulkOMaticUtils.DEFUALT_STATUS;
}
- default public int getWriteOpStatus() {
+ default int getWriteOpStatus() {
return BulkOMaticUtils.DEFUALT_STATUS;
}
- default public long getTaskCompletionTime() {
+ default long getTaskCompletionTime() {
return BulkOMaticUtils.DEFAULT_COMPLETION_TIME;
}
- default public String getUnits() {
+ default String getUnits() {
return BulkOMaticUtils.DEFAULT_UNITS;
}
- default public long getTableCount() {
+ default long getTableCount() {
return BulkOMaticUtils.DEFAULT_TABLE_COUNT;
}
}
/*
- * Copyright (c) 2016 Ericsson Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Ericsson Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
private final short startTableId;
private final short endTableId;
private final boolean isConfigDs;
- private AtomicLong flowCount = new AtomicLong();
- private AtomicInteger readOpStatus = new AtomicInteger(FlowCounter.OperationStatus.INIT.status());
+ private final AtomicLong flowCount = new AtomicLong();
+ private final AtomicInteger readOpStatus = new AtomicInteger(FlowCounter.OperationStatus.INIT.status());
- private FlowReader(final DataBroker dataBroker,
- final Integer dpnCount,
- final int flowsPerDpn,
- final boolean verbose,
- final boolean isConfigDs,
- final short startTableId,
- final short endTableId) {
+ private FlowReader(final DataBroker dataBroker, final Integer dpnCount, final int flowsPerDpn,
+ final boolean verbose, final boolean isConfigDs, final short startTableId, final short endTableId) {
this.dataBroker = dataBroker;
this.dpnCount = dpnCount;
this.verbose = verbose;
}
public static FlowReader getNewInstance(final DataBroker dataBroker,
- final Integer dpnCount,
- final int flowsPerDpn,
- final boolean verbose,
- final boolean isConfigDs,
- final short startTableId,
- final short endTableId) {
- return new FlowReader(dataBroker, dpnCount, flowsPerDpn, verbose,
- isConfigDs, startTableId, endTableId);
+ final Integer dpnCount, final int flowsPerDpn,
+ final boolean verbose, final boolean isConfigDs,
+ final short startTableId, final short endTableId) {
+ return new FlowReader(dataBroker, dpnCount,
+ flowsPerDpn, verbose,
+ isConfigDs, startTableId,
+ endTableId);
}
@Override
for (int i = 1; i <= dpnCount; i++) {
String dpId = BulkOMaticUtils.DEVICE_TYPE_PREFIX + i;
for (int j = 0; j < flowsPerDPN; j++) {
- short tableRollover = (short)(endTableId - startTableId + 1);
- short tableId = (short) (((j) % tableRollover) + startTableId);
+ short tableRollover = (short) (endTableId - startTableId + 1);
+ short tableId = (short) (j % tableRollover + startTableId);
Integer sourceIp = j + 1;
ReadOnlyTransaction readOnlyTransaction = dataBroker.newReadOnlyTransaction();
try {
Optional<Flow> flowOptional;
- if(isConfigDs) {
- flowOptional = readOnlyTransaction.read(LogicalDatastoreType.CONFIGURATION, flowIid).checkedGet();
+ if (isConfigDs) {
+ flowOptional = readOnlyTransaction.read(LogicalDatastoreType.CONFIGURATION, flowIid)
+ .checkedGet();
} else {
flowOptional = readOnlyTransaction.read(LogicalDatastoreType.OPERATIONAL, flowIid).checkedGet();
}
}
}
}
- if(readOpStatus.get() != FlowCounter.OperationStatus.FAILURE.status()) {
+ if (readOpStatus.get() != FlowCounter.OperationStatus.FAILURE.status()) {
readOpStatus.set(FlowCounter.OperationStatus.SUCCESS.status());
}
LOG.info("Total Flows read: {}", flowCount);
}
- private InstanceIdentifier<Flow> getFlowInstanceIdentifier(String dpId, Short tableId, String flowId){
+ private InstanceIdentifier<Flow> getFlowInstanceIdentifier(String dpId, Short tableId, String flowId) {
return InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(new NodeId(dpId)))
- .augmentation(FlowCapableNode.class)
- .child(Table.class, new TableKey(tableId))
- .child(Flow.class,
- new FlowKey(new FlowId(flowId)));
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(tableId))
+ .child(Flow.class, new FlowKey(new FlowId(flowId)));
}
@Override
public int getReadOpStatus() {
return readOpStatus.get();
}
-}
\ No newline at end of file
+}
public class FlowWriterConcurrent implements FlowCounterMBean {
private static final Logger LOG = LoggerFactory.getLogger(FlowWriterConcurrent.class);
- public static final String USING_CONCURRENT_IMPLEMENTATION_OF_FLOW_WRITER = "Using Concurrent implementation of Flow Writer.";
+ public static final String USING_CONCURRENT_IMPLEMENTATION_OF_FLOW_WRITER =
+ "Using Concurrent implementation of Flow Writer.";
private final DataBroker dataBroker;
private final ExecutorService flowPusher;
private long startTime;
- private AtomicInteger writeOpStatus = new AtomicInteger(FlowCounter.OperationStatus.INIT.status());
- private AtomicInteger countDpnWriteCompletion = new AtomicInteger();
- private AtomicLong taskCompletionTime = new AtomicLong();
+ private final AtomicInteger writeOpStatus = new AtomicInteger(FlowCounter.OperationStatus.INIT.status());
+ private final AtomicInteger countDpnWriteCompletion = new AtomicInteger();
+ private final AtomicLong taskCompletionTime = new AtomicLong();
public FlowWriterConcurrent(final DataBroker dataBroker, ExecutorService flowPusher) {
this.dataBroker = dataBroker;
LOG.info(USING_CONCURRENT_IMPLEMENTATION_OF_FLOW_WRITER);
}
- public void addFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize,
- int sleepMillis, int sleepAfter, short startTableId, short endTableId,
- boolean isCreateParents) {
+ public void addFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize, int sleepMillis, int sleepAfter,
+ short startTableId, short endTableId, boolean isCreateParents) {
LOG.info(USING_CONCURRENT_IMPLEMENTATION_OF_FLOW_WRITER);
countDpnWriteCompletion.set(dpnCount);
startTime = System.nanoTime();
for (int i = 1; i <= dpnCount; i++) {
- FlowHandlerTask task = new FlowHandlerTask(Integer.toString(i),
- flowsPerDPN, true, batchSize, sleepMillis, sleepAfter, startTableId, endTableId, isCreateParents);
+ FlowHandlerTask task = new FlowHandlerTask(Integer.toString(i), flowsPerDPN, true, batchSize, sleepMillis,
+ sleepAfter, startTableId, endTableId, isCreateParents);
flowPusher.execute(task);
}
}
- public void deleteFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize,
- short startTableId, short endTableId) {
+ public void deleteFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize, short startTableId,
+ short endTableId) {
LOG.info(USING_CONCURRENT_IMPLEMENTATION_OF_FLOW_WRITER);
countDpnWriteCompletion.set(dpnCount);
for (int i = 1; i <= dpnCount; i++) {
- FlowHandlerTask task = new FlowHandlerTask(Integer.toString(i), flowsPerDPN, false, batchSize,
- 0, 1, startTableId, endTableId, false);
+ FlowHandlerTask task = new FlowHandlerTask(Integer.toString(i), flowsPerDPN, false, batchSize, 0, 1,
+ startTableId, endTableId, false);
flowPusher.execute(task);
}
}
private final int sleepMillis;
private final short startTableId;
private final short endTableId;
- private AtomicInteger remainingTxReturn = new AtomicInteger(0);
+ private final AtomicInteger remainingTxReturn = new AtomicInteger(0);
private final boolean isCreateParents;
- public FlowHandlerTask(final String dpId,
- final int flowsPerDpn,
- final boolean add,
- final int batchSize,
- final int sleepMillis,
- final int sleepAfter,
- final short startTableId,
- final short endTableId,
- final boolean isCreateParents){
+ FlowHandlerTask(final String dpId, final int flowsPerDpn,
+ final boolean add, final int batchSize,
+ final int sleepMillis, final int sleepAfter,
+ final short startTableId, final short endTableId,
+ final boolean isCreateParents) {
this.dpId = BulkOMaticUtils.DEVICE_TYPE_PREFIX + dpId;
this.add = add;
this.flowsPerDpn = flowsPerDpn;
this.startTableId = startTableId;
this.endTableId = endTableId;
this.isCreateParents = isCreateParents;
- remainingTxReturn.set(flowsPerDpn/batchSize);
+ remainingTxReturn.set(flowsPerDpn / batchSize);
}
@Override
public void run() {
- LOG.info("Starting flow writer task for dpid: {}. Number of transactions: {}", dpId, flowsPerDpn/batchSize);
+ LOG.info("Starting flow writer task for dpid: {}. Number of transactions: {}", dpId,
+ flowsPerDpn / batchSize);
writeOpStatus.set(FlowCounter.OperationStatus.IN_PROGRESS.status());
short tableId = startTableId;
- int numSubmits = flowsPerDpn/batchSize;
+ int numSubmits = flowsPerDpn / batchSize;
int sourceIp = 1;
int newBatchSize = batchSize;
for (int i = 1; i <= numSubmits; i++) {
WriteTransaction writeTransaction = dataBroker.newWriteOnlyTransaction();
- short k = tableId;
+ short calculatedTableId = tableId;
for (; sourceIp <= newBatchSize; sourceIp++) {
- String flowId = "Flow-" + dpId + "." + k + "." + sourceIp;
+ String flowId = "Flow-" + dpId + "." + calculatedTableId + "." + sourceIp;
Flow flow = null;
if (add) {
Match match = BulkOMaticUtils.getMatch(sourceIp);
- flow = BulkOMaticUtils.buildFlow(k, flowId, match);
+ flow = BulkOMaticUtils.buildFlow(calculatedTableId, flowId, match);
}
addFlowToTx(writeTransaction, flowId,
- BulkOMaticUtils.getFlowInstanceIdentifier(k, flowId, dpId), flow, sourceIp, k);
+ BulkOMaticUtils.getFlowInstanceIdentifier(calculatedTableId, flowId, dpId), flow, sourceIp,
+ calculatedTableId);
if (sourceIp < newBatchSize) {
- short a = 1;
- short b = (short)(endTableId - startTableId + 1);
- k = (short) (((k + a) % b) + startTableId);
+ short numberA = 1;
+ short numberB = (short) (endTableId - startTableId + 1);
+ calculatedTableId = (short) ((calculatedTableId + numberA) % numberB + startTableId);
}
}
- Futures.addCallback(writeTransaction.submit(), new DsCallBack(dpId, tableId, k, sourceIp));
+ Futures.addCallback(writeTransaction.submit(),
+ new DsCallBack(dpId, tableId, calculatedTableId, sourceIp));
// Wrap around
- tableId = (short)(((k + 1)%((short)(endTableId - startTableId + 1))) + startTableId);
+ tableId = (short) ((calculatedTableId + 1) % (short) (endTableId - startTableId + 1) + startTableId);
newBatchSize += batchSize;
- if (((i%sleepAfter) == 0) && (sleepMillis > 0)) {
+ if (i % sleepAfter == 0 && sleepMillis > 0) {
try {
Thread.sleep(sleepMillis);
} catch (InterruptedException e) {
}
}
- private void addFlowToTx(WriteTransaction writeTransaction, String flowId, InstanceIdentifier<Flow> flowIid, Flow flow, Integer sourceIp, Short tableId){
+ private void addFlowToTx(WriteTransaction writeTransaction, String flowId, InstanceIdentifier<Flow> flowIid,
+ Flow flow, Integer sourceIp, Short tableId) {
if (add) {
LOG.trace("Adding flow for flowId: {}, flowIid: {}", flowId, flowIid);
writeTransaction.put(LogicalDatastoreType.CONFIGURATION, flowIid, flow, isCreateParents);
}
private class DsCallBack implements FutureCallback {
- private String dpId;
- private int sourceIp;
- private short endTableId;
- private short beginTableId;
+ private final String dpId;
+ private final int sourceIp;
+ private final short endTableId;
+ private final short beginTableId;
- public DsCallBack(String dpId, Short beginTableId, Short endTableId, Integer sourceIp) {
+ DsCallBack(String dpId, Short beginTableId, Short endTableId, Integer sourceIp) {
this.dpId = dpId;
this.sourceIp = sourceIp;
this.endTableId = endTableId;
}
@Override
- public void onSuccess(Object o) {
+ public void onSuccess(Object object) {
if (remainingTxReturn.decrementAndGet() <= 0) {
long dur = System.nanoTime() - startTime;
- LOG.info("Completed all flows installation for: dpid: {} in {}ns", dpId,
- dur);
- if(0 == countDpnWriteCompletion.decrementAndGet() &&
- writeOpStatus.get() != FlowCounter.OperationStatus.FAILURE.status()) {
+ LOG.info("Completed all flows installation for: dpid: {} in {}ns", dpId, dur);
+ if (0 == countDpnWriteCompletion.decrementAndGet()
+ && writeOpStatus.get() != FlowCounter.OperationStatus.FAILURE.status()) {
writeOpStatus.set(FlowCounter.OperationStatus.SUCCESS.status());
taskCompletionTime.set(dur);
}
}
}
+ @Override
public void onFailure(Throwable error) {
if (remainingTxReturn.decrementAndGet() <= 0) {
long dur = System.nanoTime() - startTime;
- LOG.info("Completed all flows installation for: dpid: {} in {}ns", dpId,
- dur);
+ LOG.info("Completed all flows installation for: dpid: {} in {}ns", dpId, dur);
}
- LOG.error("Error: {} in Datastore write operation: dpid: {}, begin tableId: {}, " +
- "end tableId: {}, sourceIp: {} ", error, dpId, beginTableId, endTableId, sourceIp);
+ LOG.error("Error: {} in Datastore write operation: dpid: {}, begin tableId: {}, "
+ + "end tableId: {}, sourceIp: {} ", error, dpId, beginTableId, endTableId, sourceIp);
writeOpStatus.set(FlowCounter.OperationStatus.FAILURE.status());
}
}
}
-}
\ No newline at end of file
+}
/*
- * Copyright (c) 2016 Ericsson Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Ericsson Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
private final ExecutorService flowPusher;
private static final long PAUSE_BETWEEN_BATCH_MILLIS = 40;
- public FlowWriterDirectOFRpc(final DataBroker dataBroker,
- final SalFlowService salFlowService,
- final ExecutorService flowPusher) {
+ public FlowWriterDirectOFRpc(final DataBroker dataBroker, final SalFlowService salFlowService,
+ final ExecutorService flowPusher) {
this.dataBroker = dataBroker;
this.flowService = salFlowService;
this.flowPusher = flowPusher;
}
-
- public void rpcFlowAdd(String dpId, int flowsPerDpn, int batchSize){
+ public void rpcFlowAdd(String dpId, int flowsPerDpn, int batchSize) {
if (!getAllNodes().isEmpty() && getAllNodes().contains(dpId)) {
FlowRPCHandlerTask addFlowRpcTask = new FlowRPCHandlerTask(dpId, flowsPerDpn, batchSize);
flowPusher.execute(addFlowRpcTask);
}
}
- public void rpcFlowAddAll(int flowsPerDpn, int batchSize){
+ public void rpcFlowAddAll(int flowsPerDpn, int batchSize) {
Set<String> nodeIdSet = getAllNodes();
- if (nodeIdSet.isEmpty()){
+ if (nodeIdSet.isEmpty()) {
LOG.warn("No nodes seen on OPERATIONAL DS. Aborting !!!!");
- }
- else{
- for (String dpId : nodeIdSet){
+ } else {
+ for (String dpId : nodeIdSet) {
LOG.info("Starting FlowRPCTaskHandler for switch id {}", dpId);
FlowRPCHandlerTask addFlowRpcTask = new FlowRPCHandlerTask(dpId, flowsPerDpn, batchSize);
flowPusher.execute(addFlowRpcTask);
}
}
- private Set<String> getAllNodes(){
+ private Set<String> getAllNodes() {
Set<String> nodeIds = new HashSet<>();
InstanceIdentifier<Nodes> nodes = InstanceIdentifier.create(Nodes.class);
- ReadOnlyTransaction rTx = dataBroker.newReadOnlyTransaction();
+ ReadOnlyTransaction readOnlyTransaction = dataBroker.newReadOnlyTransaction();
try {
- Optional<Nodes> nodesDataNode = rTx.read(LogicalDatastoreType.OPERATIONAL, nodes).checkedGet();
- if (nodesDataNode.isPresent()){
+ Optional<Nodes> nodesDataNode = readOnlyTransaction.read(LogicalDatastoreType.OPERATIONAL, nodes)
+ .checkedGet();
+ if (nodesDataNode.isPresent()) {
List<Node> nodesCollection = nodesDataNode.get().getNode();
if (nodesCollection != null && !nodesCollection.isEmpty()) {
for (Node node : nodesCollection) {
LOG.info("Switch with ID {} discovered !!", node.getId().getValue());
nodeIds.add(node.getId().getValue());
}
- }
- else{
+ } else {
return Collections.emptySet();
}
- }
- else{
+ } else {
return Collections.emptySet();
}
- }
- catch(ReadFailedException rdFailedException){
+ } catch (ReadFailedException rdFailedException) {
LOG.error("Failed to read connected nodes {}", rdFailedException);
}
return nodeIds;
private final int flowsPerDpn;
private final int batchSize;
- public FlowRPCHandlerTask(final String dpId,
- final int flowsPerDpn,
- final int batchSize){
+ public FlowRPCHandlerTask(final String dpId, final int flowsPerDpn, final int batchSize) {
this.dpId = dpId;
this.flowsPerDpn = flowsPerDpn;
this.batchSize = batchSize;
@Override
public void run() {
- short tableId = (short)1;
+ short tableId = (short) 1;
int initFlowId = 500;
- for (int i=1; i<= flowsPerDpn; i++){
+ for (int i = 1; i <= flowsPerDpn; i++) {
String flowId = Integer.toString(initFlowId + i);
AddFlowInput addFlowInput = builder.build();
- LOG.debug("RPC invocation for adding flow-id {} with input {}", flowId,
- addFlowInput.toString());
+ LOG.debug("RPC invocation for adding flow-id {} with input {}", flowId, addFlowInput.toString());
flowService.addFlow(addFlowInput);
if (i % batchSize == 0) {
/*
- * Copyright (c) 2016 Ericsson Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Ericsson Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
private final ExecutorService flowPusher;
protected int dpnCount;
private long startTime;
- private AtomicInteger writeOpStatus = new AtomicInteger(FlowCounter.OperationStatus.INIT.status());
- private AtomicInteger countDpnWriteCompletion = new AtomicInteger();
- private AtomicLong taskCompletionTime = new AtomicLong();
+ private final AtomicInteger writeOpStatus = new AtomicInteger(FlowCounter.OperationStatus.INIT.status());
+ private final AtomicInteger countDpnWriteCompletion = new AtomicInteger();
+ private final AtomicLong taskCompletionTime = new AtomicLong();
public FlowWriterSequential(final DataBroker dataBroker, ExecutorService flowPusher) {
this.dataBroker = dataBroker;
LOG.info("Using Sequential implementation of Flow Writer.");
}
- public void addFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize, int sleepMillis,
- short startTableId, short endTableId, boolean isCreateParents) {
+ public void addFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize, int sleepMillis, short startTableId,
+ short endTableId, boolean isCreateParents) {
LOG.info("Using Sequential implementation of Flow Writer.");
this.dpnCount = dpnCount;
countDpnWriteCompletion.set(dpnCount);
startTime = System.nanoTime();
for (int i = 1; i <= dpnCount; i++) {
- FlowHandlerTask task = new FlowHandlerTask(Integer.toString(i), flowsPerDPN, true, batchSize,
- sleepMillis, startTableId, endTableId, isCreateParents);
+ FlowHandlerTask task = new FlowHandlerTask(Integer.toString(i), flowsPerDPN, true, batchSize, sleepMillis,
+ startTableId, endTableId, isCreateParents);
flowPusher.execute(task);
}
}
public void deleteFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize, short startTableId,
- short endTableId) {
+ short endTableId) {
LOG.info("Using Sequential implementation of Flow Writer.");
countDpnWriteCompletion.set(dpnCount);
for (int i = 1; i <= dpnCount; i++) {
private final short endTableId;
private final boolean isCreateParents;
- public FlowHandlerTask(final String dpId,
- final int flowsPerDpn,
- final boolean add,
- final int batchSize,
- int sleepMillis,
- final short startTableId,
- final short endTableId,
- final boolean isCreateParents){
+ FlowHandlerTask(final String dpId,
+ final int flowsPerDpn,
+ final boolean add,
+ final int batchSize,
+ int sleepMillis,
+ final short startTableId,
+ final short endTableId,
+ final boolean isCreateParents) {
this.dpId = BulkOMaticUtils.DEVICE_TYPE_PREFIX + dpId;
this.add = add;
this.flowsPerDpn = flowsPerDpn;
@Override
public void run() {
- LOG.info("Starting flow writer task for dpid: {}. Number of transactions: {}", dpId, flowsPerDpn/batchSize);
+ LOG.info("Starting flow writer task for dpid: {}. Number of transactions: {}", dpId,
+ flowsPerDpn / batchSize);
writeOpStatus.set(FlowCounter.OperationStatus.IN_PROGRESS.status());
Short tableId = startTableId;
Integer sourceIp = 1;
WriteTransaction writeTransaction = dataBroker.newWriteOnlyTransaction();
- short k = tableId;
+ short calculatedTableId = tableId;
for (; sourceIp <= batchSize; sourceIp++) {
- String flowId = "Flow-" + dpId + "." + k + "." + sourceIp;
+ String flowId = "Flow-" + dpId + "." + calculatedTableId + "." + sourceIp;
LOG.debug("Adding flow with id: {}", flowId);
Flow flow = null;
if (add) {
Match match = BulkOMaticUtils.getMatch(sourceIp);
- flow = BulkOMaticUtils.buildFlow(k, flowId, match);
+ flow = BulkOMaticUtils.buildFlow(calculatedTableId, flowId, match);
}
addFlowToTx(writeTransaction, flowId,
- BulkOMaticUtils.getFlowInstanceIdentifier(k, flowId, dpId), flow);
+ BulkOMaticUtils.getFlowInstanceIdentifier(calculatedTableId, flowId, dpId), flow);
if (sourceIp < batchSize) {
- short a = 1;
- short b = (short)(endTableId - startTableId + 1);
- k = (short) (((k + a) % b) + startTableId);
+ short numberA = 1;
+ short numberB = (short) (endTableId - startTableId + 1);
+ calculatedTableId = (short) ((calculatedTableId + numberA) % numberB + startTableId);
}
}
- LOG.debug("Submitting Txn for dpId: {}, begin tableId: {}, end tableId: {}, sourceIp: {}", dpId, tableId, k, sourceIp);
+ LOG.debug("Submitting Txn for dpId: {}, begin tableId: {}, end tableId: {}, sourceIp: {}", dpId, tableId,
+ calculatedTableId, sourceIp);
- Futures.addCallback(writeTransaction.submit(), new DsCallBack(dpId, sourceIp, k));
+ Futures.addCallback(writeTransaction.submit(), new DsCallBack(dpId, sourceIp, calculatedTableId));
}
private void addFlowToTx(WriteTransaction writeTransaction, String flowId, InstanceIdentifier<Flow> flowIid,
- Flow flow) {
+ Flow flow) {
if (add) {
LOG.trace("Adding flow for flowId: {}, flowIid: {}", flowId, flowIid);
writeTransaction.put(LogicalDatastoreType.CONFIGURATION, flowIid, flow, isCreateParents);
}
private class DsCallBack implements FutureCallback {
- private String dpId;
+ private final String dpId;
private Integer sourceIp;
- private Short tableId;
+ private final Short tableId;
- public DsCallBack(String dpId, Integer sourceIp, Short tableId) {
+ DsCallBack(String dpId, Integer sourceIp, Short tableId) {
this.dpId = dpId;
this.sourceIp = sourceIp;
- short a = 1;
- short b = (short)(endTableId - startTableId + 1);
- this.tableId = (short) (((tableId + a) % b) + startTableId);
+ short numberA = 1;
+ short numberB = (short) (endTableId - startTableId + 1);
+ this.tableId = (short) ((tableId + numberA) % numberB + startTableId);
}
@Override
- public void onSuccess(Object o) {
+ public void onSuccess(Object object) {
if (sourceIp > flowsPerDpn) {
long dur = System.nanoTime() - startTime;
LOG.info("Completed all flows installation for: dpid: {}, tableId: {}, sourceIp: {} in {}ns", dpId,
tableId, sourceIp, dur);
- if(0 == countDpnWriteCompletion.decrementAndGet() &&
- writeOpStatus.get() != FlowCounter.OperationStatus.FAILURE.status()) {
+ if (0 == countDpnWriteCompletion.decrementAndGet()
+ && writeOpStatus.get() != FlowCounter.OperationStatus.FAILURE.status()) {
writeOpStatus.set(FlowCounter.OperationStatus.SUCCESS.status());
taskCompletionTime.set(dur);
}
WriteTransaction writeTransaction = dataBroker.newWriteOnlyTransaction();
int newBatchSize = sourceIp + batchSize - 1;
- short k = tableId;
+ short calculatedTableId = tableId;
for (; sourceIp <= newBatchSize; sourceIp++) {
- String flowId = "Flow-" + dpId + "." + k + "." + sourceIp;
+ String flowId = "Flow-" + dpId + "." + calculatedTableId + "." + sourceIp;
Flow flow = null;
if (add) {
Match match = BulkOMaticUtils.getMatch(sourceIp);
- flow = BulkOMaticUtils.buildFlow(k, flowId, match);
+ flow = BulkOMaticUtils.buildFlow(calculatedTableId, flowId, match);
}
LOG.debug("Adding flow with id: {}", flowId);
addFlowToTx(writeTransaction, flowId,
- BulkOMaticUtils.getFlowInstanceIdentifier(k, flowId, dpId), flow);
+ BulkOMaticUtils.getFlowInstanceIdentifier(calculatedTableId, flowId, dpId),
+ flow);
if (sourceIp < newBatchSize) {
- short a = 1;
- short b = (short)(endTableId - startTableId + 1);
- k = (short) (((k + a) % b) + startTableId);
+ short numberA = 1;
+ short numberB = (short) (endTableId - startTableId + 1);
+ calculatedTableId = (short) ((calculatedTableId + numberA) % numberB + startTableId);
}
}
LOG.debug("OnSuccess: Submitting Txn for dpId: {}, begin tableId: {}, end tableId: {}, sourceIp: {}",
- dpId, tableId, k, sourceIp);
- Futures.addCallback(writeTransaction.submit(), new DsCallBack(dpId, sourceIp, k));
+ dpId, tableId, calculatedTableId, sourceIp);
+ Futures.addCallback(writeTransaction.submit(), new DsCallBack(dpId, sourceIp, calculatedTableId));
}
+ @Override
public void onFailure(Throwable error) {
- LOG.error("Error: {} in Datastore write operation: dpid: {}, tableId: {}, sourceIp: {}",
- error, dpId, tableId, sourceIp);
+ LOG.error("Error: {} in Datastore write operation: dpid: {}, tableId: {}, sourceIp: {}", error, dpId,
+ tableId, sourceIp);
writeOpStatus.set(FlowCounter.OperationStatus.FAILURE.status());
}
}
}
-}
\ No newline at end of file
+}
/*
- * Copyright (c) 2016 Ericsson Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Ericsson Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
private final DataBroker dataBroker;
private final ExecutorService flowPusher;
private long startTime;
- private AtomicInteger writeOpStatus = new AtomicInteger(FlowCounter.OperationStatus.INIT.status());
- private AtomicInteger countDpnWriteCompletion = new AtomicInteger();
- private AtomicLong taskCompletionTime = new AtomicLong();
+ private final AtomicInteger writeOpStatus = new AtomicInteger(FlowCounter.OperationStatus.INIT.status());
+ private final AtomicInteger countDpnWriteCompletion = new AtomicInteger();
+ private final AtomicLong taskCompletionTime = new AtomicLong();
- public FlowWriterTxChain(final DataBroker dataBroker, ExecutorService flowPusher){
+ public FlowWriterTxChain(final DataBroker dataBroker, ExecutorService flowPusher) {
this.dataBroker = dataBroker;
this.flowPusher = flowPusher;
LOG.info("Using Ping Pong Flow Tester Impl");
}
- public void addFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize,
- int sleepMillis, int sleepAfter, short startTableId, short endTableId,
- boolean isCreateParents) {
+ public void addFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize, int sleepMillis, int sleepAfter,
+ short startTableId, short endTableId, boolean isCreateParents) {
LOG.info("Using Transaction Chain Flow Writer Impl");
countDpnWriteCompletion.set(dpnCount);
startTime = System.nanoTime();
for (int i = 1; i <= dpnCount; i++) {
- FlowHandlerTask task = new FlowHandlerTask(Integer.toString(i),
- flowsPerDPN, true, batchSize, sleepMillis, sleepAfter, startTableId, endTableId, isCreateParents);
+ FlowHandlerTask task = new FlowHandlerTask(Integer.toString(i), flowsPerDPN, true, batchSize, sleepMillis,
+ sleepAfter, startTableId, endTableId, isCreateParents);
flowPusher.execute(task);
}
}
- public void deleteFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize,
- short startTableId, short endTableId) {
+ public void deleteFlows(Integer dpnCount, Integer flowsPerDPN, int batchSize, short startTableId,
+ short endTableId) {
LOG.info("Using Transaction Chain Flow Writer Impl");
countDpnWriteCompletion.set(dpnCount);
for (int i = 1; i <= dpnCount; i++) {
- FlowHandlerTask task = new FlowHandlerTask(Integer.toString(i), flowsPerDPN, false, batchSize,
- 0, 1, startTableId, endTableId, false);
+ FlowHandlerTask task = new FlowHandlerTask(Integer.toString(i), flowsPerDPN, false, batchSize, 0, 1,
+ startTableId, endTableId, false);
flowPusher.execute(task);
}
}
private final short startTableId;
private final short endTableId;
private final boolean isCreateParents;
- private AtomicInteger remainingTxReturn = new AtomicInteger(0);
+ private final AtomicInteger remainingTxReturn = new AtomicInteger(0);
BindingTransactionChain txChain;
- public FlowHandlerTask(final String dpId,
- final int flowsPerDpn,
- final boolean add,
- final int batchSize,
- final int sleepMillis,
- final int sleepAfter,
- final short startTableId,
- final short endTableId,
- final boolean isCreateParents){
+ FlowHandlerTask(final String dpId,
+ final int flowsPerDpn,
+ final boolean add,
+ final int batchSize,
+ final int sleepMillis,
+ final int sleepAfter,
+ final short startTableId,
+ final short endTableId,
+ final boolean isCreateParents) {
this.dpId = BulkOMaticUtils.DEVICE_TYPE_PREFIX + dpId;
this.add = add;
this.flowsPerDpn = flowsPerDpn;
this.startTableId = startTableId;
this.endTableId = endTableId;
this.isCreateParents = isCreateParents;
- remainingTxReturn.set(flowsPerDpn/batchSize);
+ remainingTxReturn.set(flowsPerDpn / batchSize);
}
@Override
public void run() {
writeOpStatus.set(FlowCounter.OperationStatus.IN_PROGRESS.status());
short tableId = startTableId;
- int numSubmits = flowsPerDpn/batchSize;
+ int numSubmits = flowsPerDpn / batchSize;
int sourceIp = 1;
int newBatchSize = batchSize;
LOG.info("Number of Txn for dpId: {} is: {}", dpId, numSubmits);
LOG.info("Creating new txChain: {} for dpid: {}", txChain, dpId);
for (int i = 1; i <= numSubmits; i++) {
- WriteTransaction writeTransaction;
- try {
- writeTransaction = txChain.newWriteOnlyTransaction();
- } catch (Exception e) {
- LOG.error("Transaction creation failed in txChain: {}, due to: {}", txChain, e);
- break;
- }
- short k = tableId;
+ WriteTransaction writeTransaction = txChain.newWriteOnlyTransaction();
+ short calculatedTableId = tableId;
for (; sourceIp <= newBatchSize; sourceIp++) {
- String flowId = "Flow-" + dpId + "." + k + "." + sourceIp;
+ String flowId = "Flow-" + dpId + "." + calculatedTableId + "." + sourceIp;
Flow flow = null;
if (add) {
Match match = BulkOMaticUtils.getMatch(sourceIp);
- flow = BulkOMaticUtils.buildFlow(k, flowId, match);
+ flow = BulkOMaticUtils.buildFlow(calculatedTableId, flowId, match);
}
writeTxToDs(writeTransaction, flowId,
- BulkOMaticUtils.getFlowInstanceIdentifier(k, flowId, dpId), flow, sourceIp, k);
+ BulkOMaticUtils.getFlowInstanceIdentifier(calculatedTableId, flowId, dpId),
+ flow, sourceIp, calculatedTableId);
if (sourceIp < newBatchSize) {
- short a = 1;
- short b = (short) (endTableId - startTableId + 1);
- k = (short) (((k + a) % b) + startTableId);
+ short numberA = 1;
+ short numberB = (short) (endTableId - startTableId + 1);
+ calculatedTableId = (short) ((calculatedTableId + numberA) % numberB + startTableId);
}
}
- LOG.debug("Submitting Txn for dpId: {}, begin tableId: {}, end tableId: {}, sourceIp: {}", dpId, tableId, k, sourceIp - 1);
- Futures.addCallback(writeTransaction.submit(), new DsCallBack(dpId, tableId, k, sourceIp));
+ LOG.debug("Submitting Txn for dpId: {}, begin tableId: {}, end tableId: {}, sourceIp: {}", dpId,
+ tableId, calculatedTableId, sourceIp - 1);
+ Futures.addCallback(writeTransaction.submit(),
+ new DsCallBack(dpId, tableId, calculatedTableId, sourceIp));
// Wrap around
- tableId = (short) (((k + 1) % ((short) (endTableId - startTableId + 1))) + startTableId);
+ tableId = (short) ((calculatedTableId + 1) % (short) (endTableId - startTableId + 1) + startTableId);
newBatchSize += batchSize;
- if (((i % sleepAfter) == 0) && (sleepMillis > 0)) {
+ if (i % sleepAfter == 0 && sleepMillis > 0) {
try {
Thread.sleep(sleepMillis);
} catch (InterruptedException e) {
}
@Override
- public void onTransactionChainFailed(TransactionChain<?, ?> transactionChain, AsyncTransaction<?, ?> asyncTransaction, Throwable throwable) {
+ public void onTransactionChainFailed(TransactionChain<?, ?> transactionChain,
+ AsyncTransaction<?, ?> asyncTransaction, Throwable throwable) {
LOG.error("Transaction chain: {} FAILED at asyncTransaction: {} due to: {}", transactionChain,
asyncTransaction.getIdentifier(), throwable);
transactionChain.close();
LOG.info("Transaction chain: {} closed successfully.", transactionChain);
}
- private void writeTxToDs(WriteTransaction writeTransaction, String flowId, InstanceIdentifier<Flow> flowIid, Flow flow, Integer sourceIp, Short tableId){
+ private void writeTxToDs(WriteTransaction writeTransaction, String flowId, InstanceIdentifier<Flow> flowIid,
+ Flow flow, Integer sourceIp, Short tableId) {
if (add) {
LOG.trace("Adding flow for flowId: {}, flowIid: {}", flowId, flowIid);
writeTransaction.put(LogicalDatastoreType.CONFIGURATION, flowIid, flow, isCreateParents);
}
private class DsCallBack implements FutureCallback {
- private String dpId;
- private int sourceIp;
- private short endTableId;
- private short beginTableId;
+ private final String dpId;
+ private final int sourceIp;
+ private final short endTableId;
+ private final short beginTableId;
- public DsCallBack(String dpId, Short beginTableId, Short endTableId, Integer sourceIp) {
+ DsCallBack(String dpId, Short beginTableId, Short endTableId, Integer sourceIp) {
this.dpId = dpId;
this.sourceIp = sourceIp;
this.endTableId = endTableId;
}
@Override
- public void onSuccess(Object o) {
+ public void onSuccess(Object object) {
if (remainingTxReturn.decrementAndGet() <= 0) {
long dur = System.nanoTime() - startTime;
- LOG.info("Completed all flows installation for: dpid: {} in {}ns", dpId,
- dur);
- if(0 == countDpnWriteCompletion.decrementAndGet() &&
- writeOpStatus.get() != FlowCounter.OperationStatus.FAILURE.status()) {
+ LOG.info("Completed all flows installation for: dpid: {} in {}ns", dpId, dur);
+ if (0 == countDpnWriteCompletion.decrementAndGet()
+ && writeOpStatus.get() != FlowCounter.OperationStatus.FAILURE.status()) {
writeOpStatus.set(FlowCounter.OperationStatus.SUCCESS.status());
taskCompletionTime.set(dur);
}
}
}
+ @Override
public void onFailure(Throwable error) {
if (remainingTxReturn.decrementAndGet() <= 0) {
long dur = System.nanoTime() - startTime;
- LOG.info("Completed all flows installation for: dpid: {} in {}ns", dpId,
- dur);
+ LOG.info("Completed all flows installation for: dpid: {} in {}ns", dpId, dur);
}
- LOG.error("Error: {} in Datastore write operation: dpid: {}, begin tableId: {}, " +
- "end tableId: {}, sourceIp: {} ", error, dpId, beginTableId, endTableId, sourceIp);
+ LOG.error("Error: {} in Datastore write operation: dpid: {}, begin tableId: {}, "
+ + "end tableId: {}, sourceIp: {} ", error, dpId, beginTableId, endTableId, sourceIp);
writeOpStatus.set(FlowCounter.OperationStatus.FAILURE.status());
}
}
}
-}
\ No newline at end of file
+}
/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.opendaylight.yang.gen.v1.urn.opendaylight.bulk.flow.service.rev150608.RemoveFlowsRpcInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.bulk.flow.service.rev150608.SalBulkFlowService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.bulk.flow.service.rev150608.TableTestInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.bulk.flow.service.rev150608.TableTestInput.Operation;
import org.opendaylight.yang.gen.v1.urn.opendaylight.bulk.flow.service.rev150608.bulk.flow.ds.list.grouping.BulkFlowDsItem;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
private final DataBroker dataBroker;
private final FlowCounter flowCounterBeanImpl = new FlowCounter();
private final ExecutorService fjService = new ForkJoinPool();
+
public SalBulkFlowServiceImpl(SalFlowService flowService, DataBroker dataBroker) {
this.flowService = Preconditions.checkNotNull(flowService);
this.dataBroker = Preconditions.checkNotNull(dataBroker);
private InstanceIdentifier<Flow> getFlowInstanceIdentifier(BulkFlowDsItem bulkFlow) {
final NodeRef nodeRef = bulkFlow.getNode();
- return ((InstanceIdentifier<Node>) nodeRef.getValue())
- .augmentation(FlowCapableNode.class)
+ return ((InstanceIdentifier<Node>) nodeRef.getValue()).augmentation(FlowCapableNode.class)
.child(Table.class, new TableKey(bulkFlow.getTableId()))
- .child(Flow.class,
- new FlowKey(new FlowId(bulkFlow.getFlowId())));
+ .child(Flow.class, new FlowKey(new FlowId(bulkFlow.getFlowId())));
}
@Override
return handleResultFuture(submitFuture);
}
- private ListenableFuture<RpcResult<Void>> handleResultFuture(CheckedFuture<Void,
- TransactionCommitFailedException> submitFuture) {
+ private ListenableFuture<RpcResult<Void>> handleResultFuture(
+ CheckedFuture<Void, TransactionCommitFailedException> submitFuture) {
final SettableFuture<RpcResult<Void>> rpcResult = SettableFuture.create();
Futures.addCallback(submitFuture, new FutureCallback<Void>() {
@Override
}
@Override
- public void onFailure(Throwable t) {
+ public void onFailure(Throwable throwable) {
RpcResultBuilder<Void> rpcResultBld = RpcResultBuilder.<Void>failed()
- .withRpcErrors(Collections.singleton(
- RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, null, t.getMessage())
- ));
+ .withRpcErrors(Collections.singleton(RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION,
+ null, throwable.getMessage())));
rpcResult.set(rpcResultBld.build());
}
});
}
@Override
- public void onFailure(Throwable t) {
+ public void onFailure(Throwable throwable) {
RpcResultBuilder<Void> rpcResultBld = RpcResultBuilder.<Void>failed()
- .withRpcErrors(Collections.singleton(
- RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, null, t.getMessage())
- ));
+ .withRpcErrors(Collections.singleton(RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION,
+ null, throwable.getMessage())));
rpcResult.set(rpcResultBld.build());
}
});
List<ListenableFuture<RpcResult<AddFlowOutput>>> bulkResults = new ArrayList<>();
for (BulkFlowBaseContentGrouping bulkFlow : input.getBulkFlowItem()) {
- AddFlowInputBuilder flowInputBuilder = new AddFlowInputBuilder((org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow) bulkFlow);
+ AddFlowInputBuilder flowInputBuilder = new AddFlowInputBuilder(
+ (org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow) bulkFlow);
final NodeRef nodeRef = bulkFlow.getNode();
flowInputBuilder.setNode(nodeRef);
flowInputBuilder.setTableId(bulkFlow.getTableId());
@Override
public Future<RpcResult<Void>> readFlowTest(ReadFlowTestInput input) {
- FlowReader flowReader = FlowReader.getNewInstance(dataBroker,
- input.getDpnCount().intValue(),
- input.getFlowsPerDpn().intValue(), input.isVerbose(),
- input.isIsConfigDs(),input.getStartTableId().shortValue(),
- input.getEndTableId().shortValue());
+ FlowReader flowReader = FlowReader.getNewInstance(dataBroker, input.getDpnCount().intValue(),
+ input.getFlowsPerDpn().intValue(), input.isVerbose(), input.isIsConfigDs(),
+ input.getStartTableId().shortValue(), input.getEndTableId().shortValue());
flowCounterBeanImpl.setReader(flowReader);
fjService.execute(flowReader);
RpcResultBuilder<Void> rpcResultBuilder = RpcResultBuilder.success();
@Override
public Future<RpcResult<Void>> flowRpcAddTest(FlowRpcAddTestInput input) {
FlowWriterDirectOFRpc flowAddRpcTestImpl = new FlowWriterDirectOFRpc(dataBroker, flowService, fjService);
- flowAddRpcTestImpl.rpcFlowAdd(
- input.getDpnId(),
- input.getFlowCount().intValue(),
+ flowAddRpcTestImpl.rpcFlowAdd(input.getDpnId(), input.getFlowCount().intValue(),
input.getRpcBatchSize().intValue());
-
RpcResultBuilder<Void> rpcResultBuilder = RpcResultBuilder.success();
return Futures.immediateFuture(rpcResultBuilder.build());
}
public Future<RpcResult<Void>> register() {
RpcResultBuilder<Void> rpcResultBuilder = RpcResultBuilder.success();
try {
- MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
- String pathToMBean = String.format("%s:type=%s",
- FlowCounter.class.getPackage().getName(),
- FlowCounter.class.getSimpleName());
- ObjectName name = null;
-
- name = new ObjectName(pathToMBean);
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ String pathToMBean = String.format("%s:type=%s", FlowCounter.class.getPackage().getName(),
+ FlowCounter.class.getSimpleName());
+ ObjectName name = new ObjectName(pathToMBean);
mbs.registerMBean(flowCounterBeanImpl, name);
- } catch (MalformedObjectNameException | InstanceAlreadyExistsException
- | MBeanRegistrationException | NotCompliantMBeanException e) {
+ } catch (MalformedObjectNameException | InstanceAlreadyExistsException | MBeanRegistrationException
+ | NotCompliantMBeanException e) {
rpcResultBuilder = RpcResultBuilder.failed();
LOG.warn("Exception occurred: {} ", e.getMessage(), e);
}
List<ListenableFuture<RpcResult<RemoveFlowOutput>>> bulkResults = new ArrayList<>();
for (BulkFlowBaseContentGrouping bulkFlow : input.getBulkFlowItem()) {
- RemoveFlowInputBuilder flowInputBuilder = new RemoveFlowInputBuilder((org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow) bulkFlow);
+ RemoveFlowInputBuilder flowInputBuilder = new RemoveFlowInputBuilder(
+ (org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow) bulkFlow);
final NodeRef nodeRef = bulkFlow.getNode();
flowInputBuilder.setNode(nodeRef);
flowInputBuilder.setTableId(bulkFlow.getTableId());
if (input.isTxChain()) {
FlowWriterTxChain flowTester = new FlowWriterTxChain(dataBroker, fjService);
flowCounterBeanImpl.setWriter(flowTester);
- if (input.isIsAdd()){
+ if (input.isIsAdd()) {
flowTester.addFlows(input.getDpnCount().intValue(), input.getFlowsPerDpn().intValue(),
input.getBatchSize().intValue(), input.getSleepFor().intValue(),
input.getSleepAfter().intValue(), input.getStartTableId().shortValue(),
if (input.isSeq()) {
FlowWriterSequential flowTester = new FlowWriterSequential(dataBroker, fjService);
flowCounterBeanImpl.setWriter(flowTester);
- if (input.isIsAdd()){
+ if (input.isIsAdd()) {
flowTester.addFlows(input.getDpnCount().intValue(), input.getFlowsPerDpn().intValue(),
input.getBatchSize().intValue(), input.getSleepFor().intValue(),
input.getStartTableId().shortValue(), input.getEndTableId().shortValue(),
} else {
FlowWriterConcurrent flowTester = new FlowWriterConcurrent(dataBroker, fjService);
flowCounterBeanImpl.setWriter(flowTester);
- if (input.isIsAdd()){
+ if (input.isIsAdd()) {
flowTester.addFlows(input.getDpnCount().intValue(), input.getFlowsPerDpn().intValue(),
input.getBatchSize().intValue(), input.getSleepFor().intValue(),
input.getSleepAfter().intValue(), input.getStartTableId().shortValue(),
flowCounterBeanImpl.setWriter(writer);
switch (input.getOperation()) {
case Add:
- writer.addTables(input.getDpnCount().intValue(),
- input.getStartTableId().shortValue(), input.getEndTableId().shortValue());
+ writer.addTables(input.getDpnCount().intValue(), input.getStartTableId().shortValue(),
+ input.getEndTableId().shortValue());
break;
case Delete:
- writer.deleteTables(input.getDpnCount().intValue(),
- input.getStartTableId().shortValue(), input.getEndTableId().shortValue());
+ writer.deleteTables(input.getDpnCount().intValue(), input.getStartTableId().shortValue(),
+ input.getEndTableId().shortValue());
break;
default:
RpcResultBuilder<Void> rpcResultBuilder = RpcResultBuilder.failed();
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.Nullable;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.slf4j.LoggerFactory;
public class TableWriter implements FlowCounterMBean {
- private final Logger LOG = LoggerFactory.getLogger(TableWriter.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TableWriter.class);
private final AtomicInteger writeOpStatus = new AtomicInteger(FlowCounter.OperationStatus.INIT.status());
private final AtomicLong taskCompletionTime = new AtomicLong(BulkOMaticUtils.DEFAULT_COMPLETION_TIME);
private class TableHandlerTask implements Runnable {
- private short startTableId;
- private short endTableId;
- private int dpnCount;
- private boolean isAdd;
+ private final short startTableId;
+ private final short endTableId;
+ private final int dpnCount;
+ private final boolean isAdd;
- public TableHandlerTask(int dpnCount, short startTableId, short endTableId, boolean isAdd) {
+ TableHandlerTask(int dpnCount, short startTableId, short endTableId, boolean isAdd) {
this.dpnCount = dpnCount;
this.startTableId = startTableId;
this.endTableId = endTableId;
String dpId = BulkOMaticUtils.DEVICE_TYPE_PREFIX + String.valueOf(dpn);
for (short tableId = startTableId; tableId <= endTableId; tableId++) {
WriteTransaction wtx = dataBroker.newWriteOnlyTransaction();
- Table table = new TableBuilder().setKey(new TableKey(tableId))
- .setId(tableId)
- .build();
+ Table table = new TableBuilder().setKey(new TableKey(tableId)).setId(tableId).build();
InstanceIdentifier<Table> tableIId = BulkOMaticUtils.getTableId(tableId, dpId);
if (isAdd) {
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
- public void onSuccess(@Nullable Void v) {
+ public void onSuccess(Void voidParameter) {
if (successfulWrites.incrementAndGet() == totalTables) {
if (failedWrites.get() > 0) {
writeOpStatus.set(FlowCounter.OperationStatus.FAILURE.status());
/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
@Mock
private DataBroker mockDataBroker;
@Mock
- private ReadOnlyTransaction rTx;
+ private ReadOnlyTransaction readOnlyTransaction;
@Mock
private Node node;
@Before
public void setUp() throws Exception {
- when(rTx.read(Mockito.any(LogicalDatastoreType.class), Mockito.<InstanceIdentifier<Node>>any()))
+ when(readOnlyTransaction.read(Mockito.any(LogicalDatastoreType.class), Mockito.<InstanceIdentifier<Node>>any()))
.thenReturn(Futures.immediateCheckedFuture(Optional.of(node)));
- when(mockDataBroker.newReadOnlyTransaction()).thenReturn(rTx);
- flowReader = FlowReader.getNewInstance(mockDataBroker, 2, 5, true, false, (short)1, (short)2 );
+ when(mockDataBroker.newReadOnlyTransaction()).thenReturn(readOnlyTransaction);
+ flowReader = FlowReader.getNewInstance(mockDataBroker, 2, 5, true, false, (short) 1, (short) 2);
}
@Test
Assert.assertEquals(10, flowReader.getFlowCount());
Assert.assertEquals(FlowCounter.OperationStatus.SUCCESS.status(), flowReader.getReadOpStatus());
}
-}
\ No newline at end of file
+}
/**
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
-import org.mockito.stubbing.Answer;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
@Mock
private ExecutorService mockFlowPusher;
@Mock
- private WriteTransaction wTx;
+ private WriteTransaction writeTransaction;
@Mock
private Nodes mockNodes;
@Before
public void setUp() throws Exception {
- doReturn(wTx).when(mockDataBroker).newWriteOnlyTransaction();
- Mockito.when(wTx.submit()).thenReturn(Futures.immediateCheckedFuture(null));
+ doReturn(writeTransaction).when(mockDataBroker).newWriteOnlyTransaction();
+ Mockito.when(writeTransaction.submit()).thenReturn(Futures.immediateCheckedFuture(null));
- Mockito.doAnswer(new Answer<Void>() {
- @Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
- ((Runnable)invocation.getArguments()[0]).run();
- return null;
- }
+ Mockito.doAnswer(invocation -> {
+ ((Runnable) invocation.getArguments()[0]).run();
+ return null;
}).when(mockFlowPusher).execute(Matchers.<Runnable>any());
flowWriterConcurrent = new FlowWriterConcurrent(mockDataBroker, mockFlowPusher);
}
+
@Test
public void testAddFlows() throws Exception {
- flowWriterConcurrent.addFlows(1, FLOWS_PER_DPN, 10, 10, 10, (short)0, (short)1, true);
- Mockito.verify(wTx, Mockito.times(FLOWS_PER_DPN)).put(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<DataObject>>any(), Matchers.<DataObject>any(), Matchers.anyBoolean());
+ flowWriterConcurrent.addFlows(1, FLOWS_PER_DPN, 10, 10, 10, (short) 0, (short) 1, true);
+ Mockito.verify(writeTransaction, Mockito.times(FLOWS_PER_DPN)).put(Matchers.<LogicalDatastoreType>any(),
+ Matchers.<InstanceIdentifier<DataObject>>any(), Matchers.<DataObject>any(), Matchers.anyBoolean());
}
@Test
public void testDeleteFlows() throws Exception {
- flowWriterConcurrent.deleteFlows(1, FLOWS_PER_DPN, 10, (short)0, (short)1);
- Mockito.verify(wTx, Mockito.times(FLOWS_PER_DPN)).delete(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<DataObject>>any());
+ flowWriterConcurrent.deleteFlows(1, FLOWS_PER_DPN, 10, (short) 0, (short) 1);
+ Mockito.verify(writeTransaction, Mockito.times(FLOWS_PER_DPN)).delete(Matchers.<LogicalDatastoreType>any(),
+ Matchers.<InstanceIdentifier<DataObject>>any());
}
-
-}
\ No newline at end of file
+}
/**
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
-import org.mockito.stubbing.Answer;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
@Mock
private ExecutorService mockFlowPusher;
@Mock
- private ReadOnlyTransaction rTx;
+ private ReadOnlyTransaction readOnlyTransaction;
@Mock
private Nodes mockNodes;
@Before
public void setUp() throws Exception {
- when(mockDataBroker.newReadOnlyTransaction()).thenReturn(rTx);
+ when(mockDataBroker.newReadOnlyTransaction()).thenReturn(readOnlyTransaction);
NodeBuilder nodeBuilder = new NodeBuilder()
.setId(new NodeId("1"));
when(mockNodes.getNode()).thenReturn(nodes);
- when(rTx.read(Mockito.any(LogicalDatastoreType.class), Mockito.<InstanceIdentifier<Nodes>>any()))
- .thenReturn(Futures.immediateCheckedFuture(Optional.of(mockNodes)));
+ when(readOnlyTransaction.read(Mockito.any(LogicalDatastoreType.class),
+ Mockito.<InstanceIdentifier<Nodes>>any()))
+ .thenReturn(Futures.immediateCheckedFuture(Optional.of(mockNodes)));
- Mockito.doAnswer(new Answer<Void>() {
- @Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
- ((Runnable)invocation.getArguments()[0]).run();
- return null;
- }
+ Mockito.doAnswer(invocation -> {
+ ((Runnable)invocation.getArguments()[0]).run();
+ return null;
}).when(mockFlowPusher).execute(Matchers.<Runnable>any());
flowWriterDirectOFRpc = new FlowWriterDirectOFRpc(mockDataBroker, mockSalFlowService, mockFlowPusher);
flowWriterDirectOFRpc.rpcFlowAddAll(FLOWS_PER_DPN, 10);
Mockito.verify(mockSalFlowService, Mockito.times(FLOWS_PER_DPN)).addFlow(Mockito.<AddFlowInput>any());
}
-
-}
\ No newline at end of file
+}
/**
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
-import org.mockito.stubbing.Answer;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
@Mock
private ExecutorService mockFlowPusher;
@Mock
- private WriteTransaction wTx;
+ private WriteTransaction writeTransaction;
private FlowWriterSequential flowWriterSequential;
@Before
public void setUp() throws Exception {
- doReturn(wTx).when(mockDataBroker).newWriteOnlyTransaction();
- Mockito.when(wTx.submit()).thenReturn(Futures.immediateCheckedFuture(null));
+ doReturn(writeTransaction).when(mockDataBroker).newWriteOnlyTransaction();
+ Mockito.when(writeTransaction.submit()).thenReturn(Futures.immediateCheckedFuture(null));
- Mockito.doAnswer(new Answer<Void>() {
- @Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
- ((Runnable)invocation.getArguments()[0]).run();
- return null;
- }
+ Mockito.doAnswer(invocation -> {
+ ((Runnable) invocation.getArguments()[0]).run();
+ return null;
}).when(mockFlowPusher).execute(Matchers.<Runnable>any());
flowWriterSequential = new FlowWriterSequential(mockDataBroker, mockFlowPusher);
}
+
@Test
public void testAddFlows() throws Exception {
- flowWriterSequential.addFlows(1, FLOWS_PER_DPN, 10, 10, (short)0, (short)1, true);
- Mockito.verify(wTx, Mockito.times(FLOWS_PER_DPN)).put(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<DataObject>>any(), Matchers.<DataObject>any(), Matchers.anyBoolean());
+ flowWriterSequential.addFlows(1, FLOWS_PER_DPN, 10, 10, (short) 0, (short) 1, true);
+ Mockito.verify(writeTransaction, Mockito.times(FLOWS_PER_DPN)).put(Matchers.<LogicalDatastoreType>any(),
+ Matchers.<InstanceIdentifier<DataObject>>any(), Matchers.<DataObject>any(), Matchers.anyBoolean());
}
@Test
public void testDeleteFlows() throws Exception {
- flowWriterSequential.deleteFlows(1, FLOWS_PER_DPN, 10, (short)0, (short)1);
- Mockito.verify(wTx, Mockito.times(FLOWS_PER_DPN)).delete(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<DataObject>>any());
+ flowWriterSequential.deleteFlows(1, FLOWS_PER_DPN, 10, (short) 0, (short) 1);
+ Mockito.verify(writeTransaction, Mockito.times(FLOWS_PER_DPN)).delete(Matchers.<LogicalDatastoreType>any(),
+ Matchers.<InstanceIdentifier<DataObject>>any());
}
-
-}
\ No newline at end of file
+}
/**
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
-import org.mockito.stubbing.Answer;
import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
@Mock
private ExecutorService mockFlowPusher;
@Mock
- private WriteTransaction wTx;
+ private WriteTransaction writeTransaction;
private FlowWriterTxChain flowWriterTxChain;
@Before
public void setUp() throws Exception {
- Mockito.doAnswer(new Answer<Void>() {
- @Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
- ((Runnable)invocation.getArguments()[0]).run();
- return null;
- }
+ Mockito.doAnswer(invocation -> {
+ ((Runnable) invocation.getArguments()[0]).run();
+ return null;
}).when(mockFlowPusher).execute(Matchers.<Runnable>any());
final BindingTransactionChain mockedTxChain = mock(BindingTransactionChain.class);
- when(mockedTxChain.newWriteOnlyTransaction()).thenReturn(wTx);
+ when(mockedTxChain.newWriteOnlyTransaction()).thenReturn(writeTransaction);
doReturn(mockedTxChain).when(mockDataBroker).createTransactionChain(Matchers.<TransactionChainListener>any());
- when(wTx.submit()).thenReturn(Futures.immediateCheckedFuture(null));
+ when(writeTransaction.submit()).thenReturn(Futures.immediateCheckedFuture(null));
flowWriterTxChain = new FlowWriterTxChain(mockDataBroker, mockFlowPusher);
}
+
@Test
public void testAddFlows() throws Exception {
- flowWriterTxChain.addFlows(1, FLOWS_PER_DPN, 10, 10, 10, (short)0, (short)1, true);
- Mockito.verify(wTx, Mockito.times(FLOWS_PER_DPN)).put(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<DataObject>>any(), Matchers.<DataObject>any(), Matchers.anyBoolean());
+ flowWriterTxChain.addFlows(1, FLOWS_PER_DPN, 10, 10, 10, (short) 0, (short) 1, true);
+ Mockito.verify(writeTransaction, Mockito.times(FLOWS_PER_DPN)).put(Matchers.<LogicalDatastoreType>any(),
+ Matchers.<InstanceIdentifier<DataObject>>any(), Matchers.<DataObject>any(), Matchers.anyBoolean());
}
@Test
public void testDeleteFlows() throws Exception {
- flowWriterTxChain.deleteFlows(1, FLOWS_PER_DPN, 10, (short)0, (short)1);
- Mockito.verify(wTx, Mockito.times(FLOWS_PER_DPN)).delete(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<DataObject>>any());
+ flowWriterTxChain.deleteFlows(1, FLOWS_PER_DPN, 10, (short) 0, (short) 1);
+ Mockito.verify(writeTransaction, Mockito.times(FLOWS_PER_DPN)).delete(Matchers.<LogicalDatastoreType>any(),
+ Matchers.<InstanceIdentifier<DataObject>>any());
}
-
-}
\ No newline at end of file
+}
/**
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
@Mock
private SalFlowService mockSalFlowService;
@Mock
- private WriteTransaction wTx;
+ private WriteTransaction writeTransaction;
@Mock
- private ReadOnlyTransaction rTx;
+ private ReadOnlyTransaction readOnlyTransaction;
@Mock
private Nodes mockNodes;
@Mock
@Before
public void setUp() throws Exception {
- when(mockDataBroker.newWriteOnlyTransaction()).thenReturn(wTx);
- when(mockDataBroker.newReadOnlyTransaction()).thenReturn(rTx);
- when(rTx.read(Mockito.any(LogicalDatastoreType.class), Mockito.<InstanceIdentifier<Node>>any()))
+ when(mockDataBroker.newWriteOnlyTransaction()).thenReturn(writeTransaction);
+ when(mockDataBroker.newReadOnlyTransaction()).thenReturn(readOnlyTransaction);
+ when(readOnlyTransaction.read(Mockito.any(LogicalDatastoreType.class), Mockito.<InstanceIdentifier<Node>>any()))
.thenReturn(Futures.immediateCheckedFuture(Optional.of(mockNode)));
salBulkFlowService = new SalBulkFlowServiceImpl(mockSalFlowService, mockDataBroker);
}
@Test
public void testAddRemoveFlowsDs() throws Exception {
- Mockito.when(wTx.submit()).thenReturn(Futures.immediateCheckedFuture(null));
+ Mockito.when(writeTransaction.submit()).thenReturn(Futures.immediateCheckedFuture(null));
- final BulkFlowDsItemBuilder bulkFlowDsItemBuilder = new BulkFlowDsItemBuilder()
- .setFlowId(new FlowId("1"))
- .setTableId((short)2);
+ final BulkFlowDsItemBuilder bulkFlowDsItemBuilder = new BulkFlowDsItemBuilder().setFlowId(new FlowId("1"))
+ .setTableId((short) 2);
final InstanceIdentifier<Node> nodeId = BulkOMaticUtils.getFlowCapableNodeId("1");
bulkFlowDsItemBuilder.setNode(new NodeRef(nodeId));
final AddFlowsDsInput addFlowsDsInput = addFlowsDsInputBuilder.build();
salBulkFlowService.addFlowsDs(addFlowsDsInput);
- verify(wTx).submit();
- verify(wTx).put(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<Flow>>any(), flowArgumentCaptor.capture(), Mockito.anyBoolean());
+ verify(writeTransaction).submit();
+ verify(writeTransaction).put(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<Flow>>any(),
+ flowArgumentCaptor.capture(), Mockito.anyBoolean());
Flow flow = flowArgumentCaptor.getValue();
Assert.assertEquals("1", flow.getId().getValue());
final RemoveFlowsDsInput removeFlowsDsInput = removeFlowsDsInputBuilder.build();
salBulkFlowService.removeFlowsDs(removeFlowsDsInput);
- verify(wTx).delete(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<Flow>>any());
- verify(wTx,times(2)).submit();
+ verify(writeTransaction).delete(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<Flow>>any());
+ verify(writeTransaction, times(2)).submit();
}
@Test
@Test
public void testReadFlowTest() throws Exception {
- final ReadFlowTestInputBuilder readFlowTestInputBuilder = new ReadFlowTestInputBuilder()
- .setDpnCount(1L)
- .setStartTableId(1L)
- .setEndTableId(2L)
- .setIsConfigDs(false)
- .setFlowsPerDpn(1L)
- .setVerbose(true);
+ final ReadFlowTestInputBuilder readFlowTestInputBuilder = new ReadFlowTestInputBuilder().setDpnCount(1L)
+ .setStartTableId(1L).setEndTableId(2L).setIsConfigDs(false).setFlowsPerDpn(1L).setVerbose(true);
final ReadFlowTestInput readFlowTestInput = readFlowTestInputBuilder.build();
final Future<RpcResult<Void>> resultFuture = salBulkFlowService.readFlowTest(readFlowTestInput);
@Test
public void testFlowRpcAddTest() throws Exception {
- when(rTx.read(Mockito.any(LogicalDatastoreType.class), Mockito.<InstanceIdentifier<Nodes>>any()))
- .thenReturn(Futures.immediateCheckedFuture(Optional.of(mockNodes)));
+ when(readOnlyTransaction.read(Mockito.any(LogicalDatastoreType.class),
+ Mockito.<InstanceIdentifier<Nodes>>any()))
+ .thenReturn(Futures.immediateCheckedFuture(Optional.of(mockNodes)));
- final FlowRpcAddTestInputBuilder flowRpcAddTestInputBuilder = new FlowRpcAddTestInputBuilder()
- .setFlowCount(1L)
- .setDpnId("1")
- .setRpcBatchSize(1L);
+ final FlowRpcAddTestInputBuilder flowRpcAddTestInputBuilder = new FlowRpcAddTestInputBuilder().setFlowCount(1L)
+ .setDpnId("1").setRpcBatchSize(1L);
final FlowRpcAddTestInput flowRpcAddTestInput = flowRpcAddTestInputBuilder.build();
final Future<RpcResult<Void>> resultFuture = salBulkFlowService.flowRpcAddTest(flowRpcAddTestInput);
@Test
public void testFlowTest() throws Exception {
- final FlowTestInputBuilder flowTestInputBuilder = new FlowTestInputBuilder()
- .setBatchSize(1L)
- .setDpnCount(1L)
- .setEndTableId(2L)
- .setFlowsPerDpn(1L)
- .setIsAdd(true)
- .setSeq(true)
- .setSleepAfter(20L)
- .setSleepFor(1L)
- .setStartTableId(1L)
- .setTxChain(true)
- .setCreateParents(true);
+ final FlowTestInputBuilder flowTestInputBuilder = new FlowTestInputBuilder().setBatchSize(1L).setDpnCount(1L)
+ .setEndTableId(2L).setFlowsPerDpn(1L).setIsAdd(true).setSeq(true).setSleepAfter(20L).setSleepFor(1L)
+ .setStartTableId(1L).setTxChain(true).setCreateParents(true);
FlowTestInput flowTestInput = flowTestInputBuilder.build();
@Test
public void testFlowRpcAddMultiple() throws Exception {
- when(rTx.read(Mockito.any(LogicalDatastoreType.class), Mockito.<InstanceIdentifier<Nodes>>any()))
- .thenReturn(Futures.immediateCheckedFuture(Optional.of(mockNodes)));
+ when(readOnlyTransaction.read(Mockito.any(LogicalDatastoreType.class),
+ Mockito.<InstanceIdentifier<Nodes>>any()))
+ .thenReturn(Futures.immediateCheckedFuture(Optional.of(mockNodes)));
final FlowRpcAddMultipleInputBuilder flowRpcAddMultipleInputBuilder = new FlowRpcAddMultipleInputBuilder()
- .setFlowCount(1L)
- .setRpcBatchSize(1L);
+ .setFlowCount(1L).setRpcBatchSize(1L);
final FlowRpcAddMultipleInput flowRpcAddMultipleInput = flowRpcAddMultipleInputBuilder.build();
@Test
public void testTableTest() throws Exception {
- final TableTestInputBuilder tableTestInputBuilder = new TableTestInputBuilder()
- .setStartTableId(0L)
- .setEndTableId(99L)
- .setDpnCount(1L)
- .setOperation(Operation.Add);
+ final TableTestInputBuilder tableTestInputBuilder = new TableTestInputBuilder().setStartTableId(0L)
+ .setEndTableId(99L).setDpnCount(1L).setOperation(Operation.Add);
TableTestInput tableTestInput = tableTestInputBuilder.build();
Assert.assertTrue(salBulkFlowService.tableTest(tableTestInput).get().isSuccessful());
}
-}
\ No newline at end of file
+}
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
-import org.mockito.stubbing.Answer;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
@Mock
private ExecutorService mockTablePusher;
@Mock
- private WriteTransaction wTx;
+ private WriteTransaction writeTransaction;
private TableWriter tableWriter;
@Before
public void setUp() throws Exception {
- doReturn(wTx).when(mockDataBroker).newWriteOnlyTransaction();
- Mockito.when(wTx.submit()).thenReturn(Futures.immediateCheckedFuture(null));
+ doReturn(writeTransaction).when(mockDataBroker).newWriteOnlyTransaction();
+ Mockito.when(writeTransaction.submit()).thenReturn(Futures.immediateCheckedFuture(null));
- Mockito.doAnswer(new Answer<Void>() {
- @Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
- ((Runnable)invocation.getArguments()[0]).run();
- return null;
- }
+ Mockito.doAnswer(invocation -> {
+ ((Runnable) invocation.getArguments()[0]).run();
+ return null;
}).when(mockTablePusher).execute(Matchers.<Runnable>any());
tableWriter = new TableWriter(mockDataBroker, mockTablePusher);
}
+
@Test
public void testAddTables() throws Exception {
tableWriter.addTables(DPN_COUNT, START_TABLE_ID, END_TABLE_ID);
- Mockito.verify(wTx, Mockito.times(TABLES_PER_DPN)).put(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<DataObject>>any(), Matchers.<DataObject>any(), Matchers.anyBoolean());
+ Mockito.verify(writeTransaction, Mockito.times(TABLES_PER_DPN)).put(Matchers.<LogicalDatastoreType>any(),
+ Matchers.<InstanceIdentifier<DataObject>>any(), Matchers.<DataObject>any(), Matchers.anyBoolean());
}
@Test
public void testDeleteTables() throws Exception {
tableWriter.deleteTables(DPN_COUNT, START_TABLE_ID, END_TABLE_ID);
- Mockito.verify(wTx, Mockito.times(TABLES_PER_DPN)).delete(Matchers.<LogicalDatastoreType>any(), Matchers.<InstanceIdentifier<DataObject>>any());
+ Mockito.verify(writeTransaction, Mockito.times(TABLES_PER_DPN)).delete(Matchers.<LogicalDatastoreType>any(),
+ Matchers.<InstanceIdentifier<DataObject>>any());
}
-}
\ No newline at end of file
+}