import org.slf4j.LoggerFactory;
/**
- * Listens to config changes and delegates add/remove/update/barrier to {@link SyncReactor}.
+ * Listens to config changes and delegates sync entry to {@link SyncReactor}.
*/
public class SimplifiedConfigListener extends AbstractFrmSyncListener<FlowCapableNode> {
private static final Logger LOG = LoggerFactory.getLogger(SimplifiedConfigListener.class);
}
/**
- * Compare cached operational with current config modification. If operational is not present
- * skip calling Inventory RPCs.
+ * Update cache. If operational data are present, choose appropriate data and start syncup.
+ * Otherwise skip incoming change.
* @throws InterruptedException from syncup
*/
protected Optional<ListenableFuture<Boolean>> processNodeModification(
}
/**
- * Add only what is missing in operational store. Config. node could be added in two situations:
- * <ul>
- * <li>Note very first time after restart was handled by operational listener. Syncup should
- * calculate no delta (we don want to reconfigure switch if not necessary).</li>
- * <li>But later the config. node could be deleted, after that config node added again. Syncup
- * should calculate that everything needs to be added. Operational store should be empty in
- * optimal case (but the switch could be reprogrammed by another person/system.</li>
- * </ul>
+ * Add only what is missing on device. If node was added to config DS and it is already present
+ * in operational DS (connected) diff between current new configuration and actual configuration
+ * (seen in operational) should be calculated and sent to device.
*/
private ListenableFuture<Boolean> onNodeAdded(final InstanceIdentifier<FlowCapableNode> nodePath,
final FlowCapableNode dataAfter,
/**
* Apply minimal changes very fast. For better performance needed just compare config
- * after+before. Config listener should not be dependent on operational flows/groups while
+ * after+before. Config listener should not be dependent on operational flows/groups/meters while
* updating config because operational store is highly async and it depends on another module in
* system which is updating operational store (that components is also trying to solve
* scale/performance issues on several layers).
}
/**
- * Remove values that are being deleted in the config from the switch. Note, this could be
- * probably optimized using dedicated wipe-out RPC, but it has impact on switch if it is
- * programmed by two person/system
+ * Remove values that are being deleted in the config from the switch.
+ * Note, this could be probably optimized using dedicated wipe-out RPC.
*/
private ListenableFuture<Boolean> onNodeDeleted(final InstanceIdentifier<FlowCapableNode> nodePath,
final FlowCapableNode dataBefore) throws InterruptedException {
import org.slf4j.LoggerFactory;
/**
- * Listens to operational new nodes and delegates add/remove/update/barrier to {@link SyncReactor}.
+ * Listens to operational changes and starts reconciliation through {@link SyncReactor} when necessary.
*/
public class SimplifiedOperationalListener extends AbstractFrmSyncListener<Node> {
private static final Logger LOG = LoggerFactory.getLogger(SimplifiedOperationalListener.class);
}
/**
- * This method behaves like this:
- * <ul>
- * <li>If node is added to operational store then reconciliation.</li>
- * <li>Node is deleted from operational cache is removed.</li>
- * <li>Skip this event otherwise.</li>
- * </ul>
- *
+ * Update cache, register for device masterhip when device connected and start reconciliation if device
+ * is registered and actual modification is consistent.Skip the event otherwise.
* @throws InterruptedException from syncup
*/
protected Optional<ListenableFuture<Boolean>> processNodeModification(
deviceMastershipManager.onDeviceConnected(nodeId);
}
- if (isRegisteredAndConsistentForReconcile(modification)) {
+ if (reconciliationRegistry.isRegistered(nodeId) && isConsistentForReconcile(modification)) {
return reconciliation(modification);
} else {
return skipModification(modification);
/**
* Remove if delete. Update only if FlowCapableNode Augmentation modified.
- *
+ * Unregister for device mastership.
* @param modification Datastore modification
*/
private void updateCache(DataTreeModification<Node> modification) {
return false;
}
+ /**
+ * If node is present in config DS diff between wanted configuration (in config DS) and actual device
+ * configuration (coming from operational) should be calculated and sent to device.
+ * @param modification from DS
+ * @return optional syncup future
+ * @throws InterruptedException from syncup
+ */
private Optional<ListenableFuture<Boolean>> reconciliation(DataTreeModification<Node> modification) throws InterruptedException {
final NodeId nodeId = ModificationUtil.nodeId(modification);
final Optional<FlowCapableNode> nodeConfiguration = configDao.loadByNodeId(nodeId);
}
}
- private boolean isRegisteredAndConsistentForReconcile(DataTreeModification<Node> modification) {
+ private boolean isConsistentForReconcile(DataTreeModification<Node> modification) {
final NodeId nodeId = PathUtil.digNodeId(modification.getRootPath().getRootIdentifier());
-
- if (!reconciliationRegistry.isRegistered(nodeId)) {
- return false;
- }
-
final FlowCapableStatisticsGatheringStatus gatheringStatus = modification.getRootNode().getDataAfter()
.getAugmentation(FlowCapableStatisticsGatheringStatus.class);
if (node == null) {
return true;
}
-
final List<NodeConnector> nodeConnectors = node.getNodeConnector();
-
return nodeConnectors == null || nodeConnectors.isEmpty();
}
public LogicalDatastoreType dsType() {
return LogicalDatastoreType.OPERATIONAL;
}
-
+
}
final ListenableFuture<Boolean> endResult =
delegate.syncup(flowcapableNodePath, syncupEntry);
- Futures.addCallback(endResult, new FutureCallback<Boolean>() {
- @Override
- public void onSuccess(@Nullable final Boolean result) {
- if (LOG.isDebugEnabled()) {
- final long stampFinished = System.nanoTime();
- LOG.debug("syncup finished {} took:{} rpc:{} wait:{} guard:{} permits thread:{}", nodeId.getValue(),
- formatNanos(stampFinished - stampBeforeGuard),
- formatNanos(stampFinished - stampAfterGuard),
- formatNanos(stampAfterGuard - stampBeforeGuard),
- guard.availablePermits(), threadName());
- }
-
- releaseGuardForNodeId(guard);
- }
-
- @Override
- public void onFailure(final Throwable t) {
- if (LOG.isDebugEnabled()) {
- final long stampFinished = System.nanoTime();
- LOG.warn("syncup failed {} took:{} rpc:{} wait:{} guard:{} permits thread:{}", nodeId.getValue(),
- formatNanos(stampFinished - stampBeforeGuard),
- formatNanos(stampFinished - stampAfterGuard),
- formatNanos(stampAfterGuard - stampBeforeGuard),
- guard.availablePermits(), threadName());
- }
-
- releaseGuardForNodeId(guard);
- }
- });
+ Futures.addCallback(endResult, createSyncupCallback(guard, stampBeforeGuard, stampAfterGuard, nodeId));
return endResult;
} catch (InterruptedException e) {
releaseGuardForNodeId(guard);
}
}
- private String formatNanos(long nanos) {
+ private static FutureCallback<Boolean> createSyncupCallback(final Semaphore guard,
+ final long stampBeforeGuard,
+ final long stampAfterGuard,
+ final NodeId nodeId) {
+ return new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(@Nullable final Boolean result) {
+ if (LOG.isDebugEnabled()) {
+ final long stampFinished = System.nanoTime();
+ LOG.debug("syncup finished {} took:{} rpc:{} wait:{} guard:{} permits thread:{}", nodeId.getValue(),
+ formatNanos(stampFinished - stampBeforeGuard), formatNanos(stampFinished - stampAfterGuard),
+ formatNanos(stampAfterGuard - stampBeforeGuard), guard.availablePermits(), threadName());
+ }
+ releaseGuardForNodeId(guard);
+ }
+ @Override
+ public void onFailure(final Throwable t) {
+ final long stampFinished = System.nanoTime();
+ LOG.error("syncup failed {} took:{} rpc:{} wait:{} guard:{} permits thread:{}", nodeId.getValue(),
+ formatNanos(stampFinished - stampBeforeGuard), formatNanos(stampFinished - stampAfterGuard),
+ formatNanos(stampAfterGuard - stampBeforeGuard), guard.availablePermits(), threadName());
+ releaseGuardForNodeId(guard);
+ }};
+ }
+
+ private static String formatNanos(long nanos) {
return "'" + TimeUnit.NANOSECONDS.toMillis(nanos) + " ms'";
}
* Unlock and release guard.
* @param guard semaphore guard which should be unlocked
*/
- private void releaseGuardForNodeId(final Semaphore guard) {
+ private static void releaseGuardForNodeId(final Semaphore guard) {
if (guard != null) {
guard.release();
LOG.trace("syncup release guard:{} thread:{}", guard, threadName());
if (input == null) {
return false;
}
-
if (LOG.isDebugEnabled()) {
final CrudCounts flowCrudCounts = counters.getFlowCrudCounts();
final CrudCounts meterCrudCounts = counters.getMeterCrudCounts();
final CrudCounts groupCrudCounts = counters.getGroupCrudCounts();
- LOG.debug("syncup outcome[{}] (added/updated/removed): flow={}/{}/{}, meter={}/{}/{}, group={}/{}/{}, took={} ms",
+ LOG.debug("syncup outcome[{}] (added/updated/removed): flow={}/{}/{}, group={}/{}/{}, meter={}/{}/{}, took={} ms",
nodeId.getValue(),
- flowCrudCounts.getAdded(),
- flowCrudCounts.getUpdated(),
- flowCrudCounts.getRemoved(),
- meterCrudCounts.getAdded(),
- meterCrudCounts.getUpdated(),
- meterCrudCounts.getRemoved(),
- groupCrudCounts.getAdded(),
- groupCrudCounts.getUpdated(),
- groupCrudCounts.getRemoved(),
- TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - counters.getStartNano())
- );
+ flowCrudCounts.getAdded(), flowCrudCounts.getUpdated(), flowCrudCounts.getRemoved(),
+ groupCrudCounts.getAdded(), groupCrudCounts.getUpdated(), groupCrudCounts.getRemoved(),
+ meterCrudCounts.getAdded(), meterCrudCounts.getUpdated(), meterCrudCounts.getRemoved(),
+ TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - counters.getStartNano()));
}
-
LOG.trace("syncup errors: {}", input.getErrors());
return input.isSuccessful();
- }
- });
+ }});
}
@VisibleForTesting
try {
registration.close();
} catch (Exception e) {
- LOG.error("FRS cluster service close fail: {}", nodeId.getValue());
+ LOG.error("FRS cluster service close fail: {} {}", nodeId.getValue(), e);
}
}
LOG.debug("FRS service unregistered for: {}", nodeId.getValue());
public class FlowForwarder implements ForwardingRulesCommitter<Flow, AddFlowOutput, RemoveFlowOutput, UpdateFlowOutput> {
private static final Logger LOG = LoggerFactory.getLogger(FlowForwarder.class);
+ private static final String TABLE_ID_MISMATCH = "tableId mismatch";
private final SalFlowService salFlowService;
public FlowForwarder(final SalFlowService salFlowService) {
return salFlowService.removeFlow(builder.build());
} else {
return RpcResultBuilder.<RemoveFlowOutput>failed()
- .withError(RpcError.ErrorType.APPLICATION, "tableId mismatch").buildFuture();
+ .withError(RpcError.ErrorType.APPLICATION, TABLE_ID_MISMATCH).buildFuture();
}
}
output = salFlowService.updateFlow(builder.build());
} else {
output = RpcResultBuilder.<UpdateFlowOutput>failed()
- .withError(RpcError.ErrorType.APPLICATION, "tableId mismatch").buildFuture();
+ .withError(RpcError.ErrorType.APPLICATION, TABLE_ID_MISMATCH).buildFuture();
}
return output;
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
output = salFlowService.addFlow(builder.build());
} else {
- output = RpcResultBuilder.<AddFlowOutput>failed().withError(RpcError.ErrorType.APPLICATION, "tableId mismatch").buildFuture();
+ output = RpcResultBuilder.<AddFlowOutput>failed().withError(RpcError.ErrorType.APPLICATION, TABLE_ID_MISMATCH).buildFuture();
}
return output;
}
public void onSuccess(@Nullable final RpcResult<ProcessFlatBatchOutput> result) {
if (!result.isSuccessful() && result.getResult() != null && !result.getResult().getBatchFailure().isEmpty()) {
Map<Range<Integer>, Batch> batchMap = mapBatchesToRanges(inputBatchBag, failureIndexLimit);
-
- for (BatchFailure batchFailure : result.getResult().getBatchFailure()) {
- for (Map.Entry<Range<Integer>, Batch> rangeBatchEntry : batchMap.entrySet()) {
- if (rangeBatchEntry.getKey().contains(batchFailure.getBatchOrder())) {
- // get type and decrease
- final BatchChoice batchChoice = rangeBatchEntry.getValue().getBatchChoice();
- decrementCounters(batchChoice, counters);
- break;
- }
- }
- }
+ decrementBatchFailuresCounters(result.getResult().getBatchFailure(), batchMap, counters);
}
}
};
}
+ private static void decrementBatchFailuresCounters(final List<BatchFailure> batchFailures,
+ final Map<Range<Integer>, Batch> batchMap,
+ final SyncCrudCounters counters) {
+ for (BatchFailure batchFailure : batchFailures) {
+ for (Map.Entry<Range<Integer>, Batch> rangeBatchEntry : batchMap.entrySet()) {
+ if (rangeBatchEntry.getKey().contains(batchFailure.getBatchOrder())) {
+ // get type and decrease
+ final BatchChoice batchChoice = rangeBatchEntry.getValue().getBatchChoice();
+ decrementCounters(batchChoice, counters);
+ break;
+ }
+ }
+ }
+ }
+
static void decrementCounters(final BatchChoice batchChoice, final SyncCrudCounters counters) {
if (batchChoice instanceof FlatBatchAddFlowCase) {
counters.getFlowCrudCounts().decAdded();
@VisibleForTesting
static int assembleRemoveFlows(final List<Batch> batchBag, int batchOrder, final Map<TableKey, ItemSyncBox<Flow>> flowItemSyncTableMap) {
// process flow remove
+ int order = batchOrder;
if (flowItemSyncTableMap != null) {
for (Map.Entry<TableKey, ItemSyncBox<Flow>> syncBoxEntry : flowItemSyncTableMap.entrySet()) {
final ItemSyncBox<Flow> flowItemSyncBox = syncBoxEntry.getValue();
.setBatchChoice(new FlatBatchRemoveFlowCaseBuilder()
.setFlatBatchRemoveFlow(flatBatchRemoveFlowBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
}
}
- return batchOrder;
+ return order;
}
@VisibleForTesting
static int assembleAddOrUpdateGroups(final List<Batch> batchBag, int batchOrder, final List<ItemSyncBox<Group>> groupsToAddOrUpdate) {
// process group add+update
+ int order = batchOrder;
if (groupsToAddOrUpdate != null) {
for (ItemSyncBox<Group> groupItemSyncBox : groupsToAddOrUpdate) {
if (!groupItemSyncBox.getItemsToPush().isEmpty()) {
.setBatchChoice(new FlatBatchAddGroupCaseBuilder()
.setFlatBatchAddGroup(flatBatchAddGroupBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
.setBatchChoice(new FlatBatchUpdateGroupCaseBuilder()
.setFlatBatchUpdateGroup(flatBatchUpdateGroupBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
}
}
- return batchOrder;
+ return order;
}
@VisibleForTesting
static int assembleRemoveGroups(final List<Batch> batchBag, int batchOrder, final List<ItemSyncBox<Group>> groupsToRemoveOrUpdate) {
// process group add+update
+ int order = batchOrder;
if (groupsToRemoveOrUpdate != null) {
for (ItemSyncBox<Group> groupItemSyncBox : groupsToRemoveOrUpdate) {
if (!groupItemSyncBox.getItemsToPush().isEmpty()) {
.setBatchChoice(new FlatBatchRemoveGroupCaseBuilder()
.setFlatBatchRemoveGroup(flatBatchRemoveGroupBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
}
}
- return batchOrder;
+ return order;
}
@VisibleForTesting
static int assembleAddOrUpdateMeters(final List<Batch> batchBag, int batchOrder, final ItemSyncBox<Meter> meterItemSyncBox) {
// process meter add+update
+ int order = batchOrder;
if (meterItemSyncBox != null) {
if (!meterItemSyncBox.getItemsToPush().isEmpty()) {
final List<FlatBatchAddMeter> flatBatchAddMeterBag =
.setBatchChoice(new FlatBatchAddMeterCaseBuilder()
.setFlatBatchAddMeter(flatBatchAddMeterBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
.setBatchChoice(new FlatBatchUpdateMeterCaseBuilder()
.setFlatBatchUpdateMeter(flatBatchUpdateMeterBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
}
- return batchOrder;
+ return order;
}
@VisibleForTesting
static int assembleRemoveMeters(final List<Batch> batchBag, int batchOrder, final ItemSyncBox<Meter> meterItemSyncBox) {
// process meter remove
+ int order = batchOrder;
if (meterItemSyncBox != null && !meterItemSyncBox.getItemsToPush().isEmpty()) {
final List<FlatBatchRemoveMeter> flatBatchRemoveMeterBag =
new ArrayList<>(meterItemSyncBox.getItemsToUpdate().size());
.setBatchChoice(new FlatBatchRemoveMeterCaseBuilder()
.setFlatBatchRemoveMeter(flatBatchRemoveMeterBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
- return batchOrder;
+ return order;
}
@VisibleForTesting
static int assembleAddOrUpdateFlows(final List<Batch> batchBag, int batchOrder, final Map<TableKey, ItemSyncBox<Flow>> flowItemSyncTableMap) {
// process flow add+update
+ int order = batchOrder;
if (flowItemSyncTableMap != null) {
for (Map.Entry<TableKey, ItemSyncBox<Flow>> syncBoxEntry : flowItemSyncTableMap.entrySet()) {
final ItemSyncBox<Flow> flowItemSyncBox = syncBoxEntry.getValue();
.setBatchChoice(new FlatBatchAddFlowCaseBuilder()
.setFlatBatchAddFlow(flatBatchAddFlowBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
.setBatchChoice(new FlatBatchUpdateFlowCaseBuilder()
.setFlatBatchUpdateFlow(flatBatchUpdateFlowBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
}
}
- return batchOrder;
+ return order;
}
public SyncPlanPushStrategyFlatBatchImpl setFlatBatchService(final SalFlatBatchService flatBatchService) {
private static final Logger LOG = LoggerFactory.getLogger(FxChainUtil.class);
+ private FxChainUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
public static FutureCallback<RpcResult<Void>> logResultCallback(final NodeId nodeId, final String prefix) {
return new FutureCallback<RpcResult<Void>>() {
* Basic {@link DataTreeModification} related tools.
*/
public class ModificationUtil {
+
+ private ModificationUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
public static String nodeIdValue(DataTreeModification<Node> modification) {
final NodeId nodeId = nodeId(modification);
* Basic {@link InstanceIdentifier} related tools.
*/
public class PathUtil {
+
+ private PathUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
public static NodeId digNodeId(final InstanceIdentifier<?> nodeIdent) {
return nodeIdent.firstKeyOf(Node.class, NodeKey.class).getId();
}
import java.util.Map;
import java.util.Objects;
import java.util.Set;
-import javax.annotation.Nullable;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.GroupActionCase;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
/**
* Util methods for group reconcil task (future chaining, transforms).
*/
-public class ReconcileUtil {
+public final class ReconcileUtil {
private static final Logger LOG = LoggerFactory.getLogger(ReconcileUtil.class);
+ private ReconcileUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
/**
* @param previousItemAction description for case when the triggering future contains failure
* @param <D> type of rpc output (gathered in list)
* @return single rpc result of type Void honoring all partial rpc results
*/
public static <D> Function<List<RpcResult<D>>, RpcResult<Void>> createRpcResultCondenser(final String previousItemAction) {
- return new Function<List<RpcResult<D>>, RpcResult<Void>>() {
- @Nullable
- @Override
- public RpcResult<Void> apply(@Nullable final List<RpcResult<D>> input) {
- final RpcResultBuilder<Void> resultSink;
- if (input != null) {
- List<RpcError> errors = new ArrayList<>();
- for (RpcResult<D> rpcResult : input) {
- if (!rpcResult.isSuccessful()) {
- errors.addAll(rpcResult.getErrors());
- }
- }
- if (errors.isEmpty()) {
- resultSink = RpcResultBuilder.success();
- } else {
- resultSink = RpcResultBuilder.<Void>failed().withRpcErrors(errors);
+ return input -> {
+ final RpcResultBuilder<Void> resultSink;
+ if (input != null) {
+ List<RpcError> errors = new ArrayList<>();
+ for (RpcResult<D> rpcResult : input) {
+ if (!rpcResult.isSuccessful()) {
+ errors.addAll(rpcResult.getErrors());
}
+ }
+ if (errors.isEmpty()) {
+ resultSink = RpcResultBuilder.success();
} else {
- resultSink = RpcResultBuilder.<Void>failed()
- .withError(RpcError.ErrorType.APPLICATION, "previous " + previousItemAction + " failed");
-
+ resultSink = RpcResultBuilder.<Void>failed().withRpcErrors(errors);
}
-
- return resultSink.build();
+ } else {
+ resultSink = RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "previous " + previousItemAction + " failed");
}
+ return resultSink.build();
};
}
* @return single rpc result of type Void honoring all partial rpc results
*/
public static <D> Function<RpcResult<D>, RpcResult<Void>> createRpcResultToVoidFunction(final String actionDescription) {
- return new Function<RpcResult<D>, RpcResult<Void>>() {
- @Nullable
- @Override
- public RpcResult<Void> apply(@Nullable final RpcResult<D> input) {
- final RpcResultBuilder<Void> resultSink;
- if (input != null) {
- List<RpcError> errors = new ArrayList<>();
- if (!input.isSuccessful()) {
- errors.addAll(input.getErrors());
- resultSink = RpcResultBuilder.<Void>failed().withRpcErrors(errors);
- } else {
- resultSink = RpcResultBuilder.success();
- }
+ return input -> {
+ final RpcResultBuilder<Void> resultSink;
+ if (input != null) {
+ List<RpcError> errors = new ArrayList<>();
+ if (!input.isSuccessful()) {
+ errors.addAll(input.getErrors());
+ resultSink = RpcResultBuilder.<Void>failed().withRpcErrors(errors);
} else {
- resultSink = RpcResultBuilder.<Void>failed()
- .withError(RpcError.ErrorType.APPLICATION, "action of " + actionDescription + " failed");
-
+ resultSink = RpcResultBuilder.success();
}
-
- return resultSink.build();
+ } else {
+ resultSink = RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "action of " + actionDescription + " failed");
}
+ return resultSink.build();
};
}
final Map<Long, Group> installedGroupsArg,
final Collection<Group> pendingGroups,
final boolean gatherUpdates) {
-
final Map<Long, Group> installedGroups = new HashMap<>(installedGroupsArg);
final List<ItemSyncBox<Group>> plan = new ArrayList<>();
this.match = flow.getMatch();
}
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ SwitchFlowId that = (SwitchFlowId) o;
+
+ if (tableId != null ? !tableId.equals(that.tableId) : that.tableId != null) {
+ return false;
+ }
+ if (priority != null ? !priority.equals(that.priority) : that.priority != null) {
+ return false;
+ }
+ return match != null ? match.equals(that.match) : that.match == null;
+ }
+
@Override
public int hashCode() {
final int prime = 31;
return result;
}
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (getClass() != obj.getClass())
- return false;
- SwitchFlowId other = (SwitchFlowId) obj;
- if (match == null) {
- if (other.match != null)
- return false;
- } else if (!match.equals(other.match))
- return false;
- if (priority == null) {
- if (other.priority != null)
- return false;
- } else if (!priority.equals(other.priority))
- return false;
- if (tableId == null) {
- if (other.tableId != null)
- return false;
- } else if (!tableId.equals(other.tableId))
- return false;
- return true;
- }
}
return dsTypeBefore == that.dsTypeBefore;
}
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = after != null ? after.hashCode() : 0;
+ result = prime * result + (dsTypeAfter != null ? dsTypeAfter.hashCode() : 0);
+ result = prime * result + (before != null ? before.hashCode() : 0);
+ result = prime * result + (dsTypeBefore != null ? dsTypeBefore.hashCode() : 0);
+ return result;
+ }
+
}
*/
package org.opendaylight.openflowplugin.applications.frsync.impl;
+import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
Matchers.<DataTreeIdentifier<FlowCapableNode>>any(),
Matchers.<DataTreeChangeListener<FlowCapableNode>>any());
}
+
+ @After
+ public void tearDown() throws InterruptedException {
+ provider.close();
+ }
+
}
\ No newline at end of file
<groupId>org.opendaylight.openflowplugin.model</groupId>
<artifactId>model-flow-service</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>openflowplugin-common</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-api</artifactId>
* Objects of this class send LLDP frames over all flow-capable ports that can
* be discovered through inventory.
*/
-public class LLDPSpeaker implements AutoCloseable, NodeConnectorEventsObserver,
- Runnable {
-
- private static final Logger LOG = LoggerFactory
- .getLogger(LLDPSpeaker.class);
+public class LLDPSpeaker implements AutoCloseable, NodeConnectorEventsObserver, Runnable {
+ private static final Logger LOG = LoggerFactory.getLogger(LLDPSpeaker.class);
private static final long LLDP_FLOOD_PERIOD = 5;
private final PacketProcessingService packetProcessingService;
private final ScheduledExecutorService scheduledExecutorService;
- private final Map<InstanceIdentifier<NodeConnector>, TransmitPacketInput> nodeConnectorMap = new ConcurrentHashMap<>();
+ private final Map<InstanceIdentifier<NodeConnector>, TransmitPacketInput> nodeConnectorMap =
+ new ConcurrentHashMap<>();
private final ScheduledFuture<?> scheduledSpeakerTask;
private final MacAddress addressDestionation;
private volatile OperStatus operationalStatus = OperStatus.RUN;
- public LLDPSpeaker(final PacketProcessingService packetProcessingService,
- final MacAddress addressDestionation) {
- this(packetProcessingService, Executors
- .newSingleThreadScheduledExecutor(), addressDestionation);
+ public LLDPSpeaker(final PacketProcessingService packetProcessingService, final MacAddress addressDestionation) {
+ this(packetProcessingService, Executors.newSingleThreadScheduledExecutor(), addressDestionation);
}
public void setOperationalStatus(final OperStatus operationalStatus) {
}
public LLDPSpeaker(final PacketProcessingService packetProcessingService,
- final ScheduledExecutorService scheduledExecutorService,
- final MacAddress addressDestionation) {
+ final ScheduledExecutorService scheduledExecutorService,
+ final MacAddress addressDestionation) {
this.addressDestionation = addressDestionation;
this.scheduledExecutorService = scheduledExecutorService;
scheduledSpeakerTask = this.scheduledExecutorService
- .scheduleAtFixedRate(this, LLDP_FLOOD_PERIOD,
- LLDP_FLOOD_PERIOD, TimeUnit.SECONDS);
+ .scheduleAtFixedRate(this, LLDP_FLOOD_PERIOD,LLDP_FLOOD_PERIOD, TimeUnit.SECONDS);
this.packetProcessingService = packetProcessingService;
- LOG.info(
- "LLDPSpeaker started, it will send LLDP frames each {} seconds",
- LLDP_FLOOD_PERIOD);
+ LOG.info("LLDPSpeaker started, it will send LLDP frames each {} seconds", LLDP_FLOOD_PERIOD);
}
/**
@Override
public void run() {
if (OperStatus.RUN.equals(operationalStatus)) {
- LOG.debug("Sending LLDP frames to {} ports...", nodeConnectorMap
- .keySet().size());
-
- for (InstanceIdentifier<NodeConnector> nodeConnectorInstanceId : nodeConnectorMap
- .keySet()) {
- NodeConnectorId nodeConnectorId = InstanceIdentifier.keyOf(
- nodeConnectorInstanceId).getId();
- LOG.trace("Sending LLDP through port {}",
- nodeConnectorId.getValue());
- packetProcessingService.transmitPacket(nodeConnectorMap
- .get(nodeConnectorInstanceId));
+ LOG.debug("Sending LLDP frames to {} ports...", nodeConnectorMap.keySet().size());
+ for (InstanceIdentifier<NodeConnector> nodeConnectorInstanceId : nodeConnectorMap.keySet()) {
+ NodeConnectorId nodeConnectorId = InstanceIdentifier.keyOf(nodeConnectorInstanceId).getId();
+ LOG.trace("Sending LLDP through port {}", nodeConnectorId.getValue());
+ packetProcessingService.transmitPacket(nodeConnectorMap.get(nodeConnectorInstanceId));
}
}
}
* {@inheritDoc}
*/
@Override
- public void nodeConnectorAdded(
- final InstanceIdentifier<NodeConnector> nodeConnectorInstanceId,
- final FlowCapableNodeConnector flowConnector) {
- NodeConnectorId nodeConnectorId = InstanceIdentifier.keyOf(
- nodeConnectorInstanceId).getId();
+ public void nodeConnectorAdded(final InstanceIdentifier<NodeConnector> nodeConnectorInstanceId,
+ final FlowCapableNodeConnector flowConnector) {
+ NodeConnectorId nodeConnectorId = InstanceIdentifier.keyOf(nodeConnectorInstanceId).getId();
// nodeConnectorAdded can be called even if we already sending LLDP
// frames to
}
// Prepare to build LLDP payload
- InstanceIdentifier<Node> nodeInstanceId = nodeConnectorInstanceId
- .firstIdentifierOf(Node.class);
+ InstanceIdentifier<Node> nodeInstanceId = nodeConnectorInstanceId.firstIdentifierOf(Node.class);
NodeId nodeId = InstanceIdentifier.keyOf(nodeInstanceId).getId();
MacAddress srcMacAddress = flowConnector.getHardwareAddress();
Long outputPortNo = flowConnector.getPortNumber().getUint32();
// No need to send LLDP frames on local ports
if (outputPortNo == null) {
- LOG.trace("Port {} is local, not sending LLDP frames through it",
- nodeConnectorId.getValue());
+ LOG.trace("Port {} is local, not sending LLDP frames through it", nodeConnectorId.getValue());
return;
}
TransmitPacketInput packet = new TransmitPacketInputBuilder()
.setEgress(new NodeConnectorRef(nodeConnectorInstanceId))
.setNode(new NodeRef(nodeInstanceId))
- .setPayload(
- LLDPUtil.buildLldpFrame(nodeId, nodeConnectorId,
- srcMacAddress, outputPortNo,
- addressDestionation)).build();
+ .setPayload(LLDPUtil
+ .buildLldpFrame(nodeId, nodeConnectorId, srcMacAddress, outputPortNo, addressDestionation))
+ .build();
// Save packet to node connector id -> packet map to transmit it every 5
// seconds
nodeConnectorMap.put(nodeConnectorInstanceId, packet);
- LOG.trace("Port {} added to LLDPSpeaker.nodeConnectorMap",
- nodeConnectorId.getValue());
+ LOG.trace("Port {} added to LLDPSpeaker.nodeConnectorMap", nodeConnectorId.getValue());
// Transmit packet for first time immediately
packetProcessingService.transmitPacket(packet);
* {@inheritDoc}
*/
@Override
- public void nodeConnectorRemoved(
- final InstanceIdentifier<NodeConnector> nodeConnectorInstanceId) {
+ public void nodeConnectorRemoved(final InstanceIdentifier<NodeConnector> nodeConnectorInstanceId) {
nodeConnectorMap.remove(nodeConnectorInstanceId);
- NodeConnectorId nodeConnectorId = InstanceIdentifier.keyOf(
- nodeConnectorInstanceId).getId();
- LOG.trace("Port {} removed from LLDPSpeaker.nodeConnectorMap",
- nodeConnectorId.getValue());
+ NodeConnectorId nodeConnectorId = InstanceIdentifier.keyOf(nodeConnectorInstanceId).getId();
+ LOG.trace("Port {} removed from LLDPSpeaker.nodeConnectorMap", nodeConnectorId.getValue());
}
+
}
import static org.opendaylight.controller.liblldp.LLDPTLV.CUSTOM_TLV_SUB_TYPE_CUSTOM_SEC;
import static org.opendaylight.openflowplugin.applications.topology.lldp.utils.LLDPDiscoveryUtils.getValueForLLDPPacketIntegrityEnsuring;
+
import java.math.BigInteger;
import java.security.NoSuchAlgorithmException;
import org.apache.commons.lang3.StringUtils;
private static final String OF_URI_PREFIX = "openflow:";
static byte[] buildLldpFrame(final NodeId nodeId,
- final NodeConnectorId nodeConnectorId, final MacAddress src, final Long outPortNo,
- final MacAddress destinationAddress) {
+ final NodeConnectorId nodeConnectorId,
+ final MacAddress src,
+ final Long outPortNo,
+ final MacAddress destinationAddress) {
// Create discovery pkt
LLDP discoveryPkt = new LLDP();
// Create LLDP ChassisID TLV
BigInteger dataPathId = dataPathIdFromNodeId(nodeId);
- byte[] cidValue = LLDPTLV
- .createChassisIDTLVValue(colonize(bigIntegerToPaddedHex(dataPathId)));
+ byte[] cidValue = LLDPTLV.createChassisIDTLVValue(colonize(bigIntegerToPaddedHex(dataPathId)));
LLDPTLV chassisIdTlv = new LLDPTLV();
chassisIdTlv.setType(LLDPTLV.TLVType.ChassisID.getValue());
chassisIdTlv.setType(LLDPTLV.TLVType.ChassisID.getValue())
- .setLength((short) cidValue.length).setValue(cidValue);
+ .setLength((short) cidValue.length)
+ .setValue(cidValue);
discoveryPkt.setChassisId(chassisIdTlv);
// Create LLDP PortID TL
byte[] pidValue = LLDPTLV.createPortIDTLVValue(hexString);
LLDPTLV portIdTlv = new LLDPTLV();
portIdTlv.setType(LLDPTLV.TLVType.PortID.getValue())
- .setLength((short) pidValue.length).setValue(pidValue);
+ .setLength((short) pidValue.length)
+ .setValue(pidValue);
portIdTlv.setType(LLDPTLV.TLVType.PortID.getValue());
discoveryPkt.setPortId(portIdTlv);
// Create LLDP TTL TLV
byte[] ttl = new byte[] { (byte) 0x13, (byte) 0x37 };
LLDPTLV ttlTlv = new LLDPTLV();
- ttlTlv.setType(LLDPTLV.TLVType.TTL.getValue())
- .setLength((short) ttl.length).setValue(ttl);
+ ttlTlv.setType(LLDPTLV.TLVType.TTL.getValue()).setLength((short) ttl.length).setValue(ttl);
discoveryPkt.setTtl(ttlTlv);
// Create LLDP SystemName TLV
LLDPTLV systemNameTlv = new LLDPTLV();
systemNameTlv.setType(LLDPTLV.TLVType.SystemName.getValue());
systemNameTlv.setType(LLDPTLV.TLVType.SystemName.getValue())
- .setLength((short) snValue.length).setValue(snValue);
+ .setLength((short) snValue.length)
+ .setValue(snValue);
discoveryPkt.setSystemNameId(systemNameTlv);
// Create LLDP Custom TLV
- byte[] customValue = LLDPTLV.createCustomTLVValue(nodeConnectorId
- .getValue());
+ byte[] customValue = LLDPTLV.createCustomTLVValue(nodeConnectorId.getValue());
LLDPTLV customTlv = new LLDPTLV();
customTlv.setType(LLDPTLV.TLVType.Custom.getValue())
- .setLength((short) customValue.length).setValue(customValue);
+ .setLength((short) customValue.length)
+ .setValue(customValue);
discoveryPkt.addCustomTLV(customTlv);
//Create LLDP CustomSec TLV
byte[] customSecValue = LLDPTLV.createCustomTLVValue(CUSTOM_TLV_SUB_TYPE_CUSTOM_SEC, pureValue);
LLDPTLV customSecTlv = new LLDPTLV();
customSecTlv.setType(LLDPTLV.TLVType.Custom.getValue())
- .setLength((short)customSecValue.length)
- .setValue(customSecValue);
+ .setLength((short)customSecValue.length)
+ .setValue(customSecValue);
discoveryPkt.addCustomTLV(customSecTlv);
} catch (NoSuchAlgorithmException e1) {
LOG.info("LLDP extra authenticator creation failed: {}", e1.getMessage());
byte[] sourceMac = HexEncode.bytesFromHexString(src.getValue());
Ethernet ethPkt = new Ethernet();
ethPkt.setSourceMACAddress(sourceMac)
- .setEtherType(EtherTypes.LLDP.shortValue())
- .setPayload(discoveryPkt);
+ .setEtherType(EtherTypes.LLDP.shortValue())
+ .setPayload(discoveryPkt);
if (destinationAddress == null) {
ethPkt.setDestinationMACAddress(LLDP.LLDPMulticastMac);
} else {
- ethPkt.setDestinationMACAddress(HexEncode
- .bytesFromHexString(destinationAddress.getValue()));
+ ethPkt.setDestinationMACAddress(HexEncode.bytesFromHexString(destinationAddress.getValue()));
}
try {
}
static byte[] buildLldpFrame(final NodeId nodeId,
- final NodeConnectorId nodeConnectorId, final MacAddress srcMacAddress,
- final Long outputPortNo) {
- return buildLldpFrame(nodeId, nodeConnectorId, srcMacAddress,
- outputPortNo, null);
+ final NodeConnectorId nodeConnectorId,
+ final MacAddress srcMacAddress,
+ final Long outputPortNo) {
+ return buildLldpFrame(nodeId, nodeConnectorId, srcMacAddress, outputPortNo, null);
}
}
* @param nodeConnectorInstanceId Object that uniquely identify added node connector
* @param flowConnector object containing almost all of details about node connector
*/
- public void nodeConnectorAdded(InstanceIdentifier<NodeConnector> nodeConnectorInstanceId,
+ void nodeConnectorAdded(InstanceIdentifier<NodeConnector> nodeConnectorInstanceId,
FlowCapableNodeConnector flowConnector);
/**
* the same removal event.
* @param nodeConnectorInstanceId Object that uniquely identify added node connector
*/
- public void nodeConnectorRemoved(InstanceIdentifier<NodeConnector> nodeConnectorInstanceId);
+ void nodeConnectorRemoved(InstanceIdentifier<NodeConnector> nodeConnectorInstanceId);
}
package org.opendaylight.openflowplugin.applications.lldpspeaker;
+import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
-
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.flow.capable.port.State;
+import java.util.Collection;
import java.util.HashMap;
-import com.google.common.collect.ImmutableSet;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.Callable;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.openflowplugin.common.wait.SimpleTaskRetryLooper;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortConfig;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortState;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.flow.capable.port.State;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
/**
* NodeConnectorInventoryEventTranslator is listening for changes in inventory operational DOM tree
* and update LLDPSpeaker and topology.
*/
-public class NodeConnectorInventoryEventTranslator implements DataChangeListener, AutoCloseable {
- /**
- *
- */
- private static final InstanceIdentifier<State> II_TO_STATE
+public class NodeConnectorInventoryEventTranslator<T extends DataObject>
+ implements DataTreeChangeListener<T>, AutoCloseable {
+
+ private static final InstanceIdentifier<State> II_TO_STATE
= InstanceIdentifier.builder(Nodes.class)
.child(Node.class)
.child(NodeConnector.class)
.augmentation(FlowCapableNodeConnector.class)
.build();
+ private static final long STARTUP_LOOP_TICK = 500L;
+ private static final int STARTUP_LOOP_MAX_RETRIES = 8;
private static final Logger LOG = LoggerFactory.getLogger(NodeConnectorInventoryEventTranslator.class);
- private final ListenerRegistration<DataChangeListener> dataChangeListenerRegistration;
- private final ListenerRegistration<DataChangeListener> listenerOnPortStateRegistration;
+ private final ListenerRegistration<DataTreeChangeListener> dataChangeListenerRegistration;
+ private final ListenerRegistration<DataTreeChangeListener> listenerOnPortStateRegistration;
private final Set<NodeConnectorEventsObserver> observers;
private final Map<InstanceIdentifier<?>,FlowCapableNodeConnector> iiToDownFlowCapableNodeConnectors = new HashMap<>();
public NodeConnectorInventoryEventTranslator(DataBroker dataBroker, NodeConnectorEventsObserver... observers) {
this.observers = ImmutableSet.copyOf(observers);
- dataChangeListenerRegistration = dataBroker.registerDataChangeListener(
- LogicalDatastoreType.OPERATIONAL,
- II_TO_FLOW_CAPABLE_NODE_CONNECTOR,
- this, AsyncDataBroker.DataChangeScope.BASE);
- listenerOnPortStateRegistration = dataBroker.registerDataChangeListener(
- LogicalDatastoreType.OPERATIONAL,
- II_TO_STATE,
- this, AsyncDataBroker.DataChangeScope.SUBTREE);
+ final DataTreeIdentifier<T> dtiToNodeConnector =
+ new DataTreeIdentifier(LogicalDatastoreType.OPERATIONAL, II_TO_FLOW_CAPABLE_NODE_CONNECTOR);
+ final DataTreeIdentifier<T> dtiToNodeConnectorState =
+ new DataTreeIdentifier(LogicalDatastoreType.OPERATIONAL, II_TO_STATE);
+ final SimpleTaskRetryLooper looper = new SimpleTaskRetryLooper(STARTUP_LOOP_TICK, STARTUP_LOOP_MAX_RETRIES);
+ try {
+ dataChangeListenerRegistration = looper.loopUntilNoException(new Callable<ListenerRegistration<DataTreeChangeListener>>() {
+ @Override
+ public ListenerRegistration<DataTreeChangeListener> call() throws Exception {
+ return dataBroker.registerDataTreeChangeListener(dtiToNodeConnector, NodeConnectorInventoryEventTranslator.this);
+ }
+ });
+ listenerOnPortStateRegistration = looper.loopUntilNoException(new Callable<ListenerRegistration<DataTreeChangeListener>>() {
+ @Override
+ public ListenerRegistration<DataTreeChangeListener> call() throws Exception {
+ return dataBroker.registerDataTreeChangeListener(dtiToNodeConnectorState, NodeConnectorInventoryEventTranslator.this);
+ }
+ });
+ } catch (Exception e) {
+ LOG.error("DataTreeChangeListeners registration failed: {}", e);
+ throw new IllegalStateException("NodeConnectorInventoryEventTranslator startup failed!", e);
+ }
+ LOG.info("NodeConnectorInventoryEventTranslator has started.");
}
@Override
public void close() {
- dataChangeListenerRegistration.close();
- listenerOnPortStateRegistration.close();
+ if (dataChangeListenerRegistration != null) {
+ dataChangeListenerRegistration.close();
+ }
+ if (listenerOnPortStateRegistration != null) {
+ listenerOnPortStateRegistration.close();
+ }
}
@Override
- public void onDataChanged(AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
- LOG.trace("Node connectors in inventory changed: {} created, {} updated, {} removed",
- change.getCreatedData().size(), change.getUpdatedData().size(), change.getRemovedPaths().size());
-
- // Iterate over created node connectors
- for (Map.Entry<InstanceIdentifier<?>, DataObject> entry : change.getCreatedData().entrySet()) {
- InstanceIdentifier<NodeConnector> nodeConnectorInstanceId =
- entry.getKey().firstIdentifierOf(NodeConnector.class);
- if (compareIITail(entry.getKey(),II_TO_FLOW_CAPABLE_NODE_CONNECTOR)) {
- FlowCapableNodeConnector flowConnector = (FlowCapableNodeConnector) entry.getValue();
- if (!isPortDown(flowConnector)) {
- notifyNodeConnectorAppeared(nodeConnectorInstanceId, flowConnector);
- } else {
- iiToDownFlowCapableNodeConnectors.put(nodeConnectorInstanceId, flowConnector);
- }
+ public void onDataTreeChanged(@Nonnull Collection<DataTreeModification<T>> modifications) {
+ for(DataTreeModification modification : modifications) {
+ LOG.trace("Node connectors in inventory changed -> {}", modification.getRootNode().getModificationType());
+ switch (modification.getRootNode().getModificationType()) {
+ case WRITE:
+ processAddedConnector(modification);
+ break;
+ case SUBTREE_MODIFIED:
+ processUpdatedConnector(modification);
+ break;
+ case DELETE:
+ processRemovedConnector(modification);
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled modification type: {}" +
+ modification.getRootNode().getModificationType());
}
}
+ }
- // Iterate over updated node connectors (port down state may change)
- for (Map.Entry<InstanceIdentifier<?>, DataObject> entry : change.getUpdatedData().entrySet()) {
- InstanceIdentifier<NodeConnector> nodeConnectorInstanceId =
- entry.getKey().firstIdentifierOf(NodeConnector.class);
- if (compareIITail(entry.getKey(),II_TO_FLOW_CAPABLE_NODE_CONNECTOR)) {
- FlowCapableNodeConnector flowConnector = (FlowCapableNodeConnector) entry.getValue();
- if (isPortDown(flowConnector)) {
- notifyNodeConnectorDisappeared(nodeConnectorInstanceId);
- } else {
- notifyNodeConnectorAppeared(nodeConnectorInstanceId, flowConnector);
- }
- } else if (compareIITail(entry.getKey(),II_TO_STATE)) {
- FlowCapableNodeConnector flowNodeConnector = iiToDownFlowCapableNodeConnectors.get(nodeConnectorInstanceId);
- if (flowNodeConnector != null) {
- State state = (State)entry.getValue();
- if (!state.isLinkDown()) {
- FlowCapableNodeConnectorBuilder flowCapableNodeConnectorBuilder = new FlowCapableNodeConnectorBuilder(flowNodeConnector);
- flowCapableNodeConnectorBuilder.setState(state);
- notifyNodeConnectorAppeared(nodeConnectorInstanceId, flowCapableNodeConnectorBuilder.build());
- iiToDownFlowCapableNodeConnectors.remove(nodeConnectorInstanceId);
- }
- }
+ private void processAddedConnector(final DataTreeModification<T> modification) {
+ final InstanceIdentifier<T> identifier = modification.getRootPath().getRootIdentifier();
+ InstanceIdentifier<NodeConnector> nodeConnectorInstanceId =identifier.firstIdentifierOf(NodeConnector.class);
+ if (compareIITail(identifier, II_TO_FLOW_CAPABLE_NODE_CONNECTOR)) {
+ FlowCapableNodeConnector flowConnector = (FlowCapableNodeConnector) modification.getRootNode().getDataAfter();
+ if (!isPortDown(flowConnector)) {
+ notifyNodeConnectorAppeared(nodeConnectorInstanceId, flowConnector);
+ } else {
+ iiToDownFlowCapableNodeConnectors.put(nodeConnectorInstanceId, flowConnector);
}
}
+ }
- // Iterate over removed node connectors
- for (InstanceIdentifier<?> removed : change.getRemovedPaths()) {
- if (compareIITail(removed,II_TO_FLOW_CAPABLE_NODE_CONNECTOR)) {
- InstanceIdentifier<NodeConnector> nodeConnectorInstanceId = removed.firstIdentifierOf(NodeConnector.class);
+ private void processUpdatedConnector(final DataTreeModification<T> modification) {
+ final InstanceIdentifier<T> identifier = modification.getRootPath().getRootIdentifier();
+ InstanceIdentifier<NodeConnector> nodeConnectorInstanceId = identifier.firstIdentifierOf(NodeConnector.class);
+ if (compareIITail(identifier, II_TO_FLOW_CAPABLE_NODE_CONNECTOR)) {
+ FlowCapableNodeConnector flowConnector = (FlowCapableNodeConnector) modification.getRootNode().getDataAfter();
+ if (isPortDown(flowConnector)) {
notifyNodeConnectorDisappeared(nodeConnectorInstanceId);
+ } else {
+ notifyNodeConnectorAppeared(nodeConnectorInstanceId, flowConnector);
}
+ } else if (compareIITail(identifier, II_TO_STATE)) {
+ FlowCapableNodeConnector flowNodeConnector = iiToDownFlowCapableNodeConnectors.get(nodeConnectorInstanceId);
+ if (flowNodeConnector != null) {
+ State state = (State) modification.getRootNode().getDataAfter();
+ if (!state.isLinkDown()) {
+ FlowCapableNodeConnectorBuilder flowCapableNodeConnectorBuilder =
+ new FlowCapableNodeConnectorBuilder(flowNodeConnector);
+ flowCapableNodeConnectorBuilder.setState(state);
+ notifyNodeConnectorAppeared(nodeConnectorInstanceId, flowCapableNodeConnectorBuilder.build());
+ iiToDownFlowCapableNodeConnectors.remove(nodeConnectorInstanceId);
+ }
+ }
+ }
+ }
+
+ private void processRemovedConnector(final DataTreeModification<T> modification) {
+ final InstanceIdentifier<T> identifier = modification.getRootPath().getRootIdentifier();
+ if (compareIITail(identifier, II_TO_FLOW_CAPABLE_NODE_CONNECTOR)) {
+ InstanceIdentifier<NodeConnector> nodeConnectorInstanceId = identifier.firstIdentifierOf(NodeConnector.class);
+ notifyNodeConnectorDisappeared(nodeConnectorInstanceId);
}
}
- /**
- * @param key
- * @param iiToFlowCapableNodeConnector
- * @return
- */
- private boolean compareIITail(InstanceIdentifier<?> ii1,
- InstanceIdentifier<?> ii2) {
+ private boolean compareIITail(final InstanceIdentifier<?> ii1, final InstanceIdentifier<?> ii2) {
return Iterables.getLast(ii1.getPathArguments()).equals(Iterables.getLast(ii2.getPathArguments()));
}
- private static boolean isPortDown(FlowCapableNodeConnector flowCapableNodeConnector) {
+ private static boolean isPortDown(final FlowCapableNodeConnector flowCapableNodeConnector) {
PortState portState = flowCapableNodeConnector.getState();
PortConfig portConfig = flowCapableNodeConnector.getConfiguration();
- return portState != null && portState.isLinkDown() ||
- portConfig != null && portConfig.isPORTDOWN();
+ return portState != null && portState.isLinkDown()
+ || portConfig != null && portConfig.isPORTDOWN();
}
- private void notifyNodeConnectorAppeared(InstanceIdentifier<NodeConnector> nodeConnectorInstanceId,
- FlowCapableNodeConnector flowConnector) {
+ private void notifyNodeConnectorAppeared(final InstanceIdentifier<NodeConnector> nodeConnectorInstanceId,
+ final FlowCapableNodeConnector flowConnector) {
for (NodeConnectorEventsObserver observer : observers) {
observer.nodeConnectorAdded(nodeConnectorInstanceId, flowConnector);
}
}
- private void notifyNodeConnectorDisappeared(InstanceIdentifier<NodeConnector> nodeConnectorInstanceId) {
+ private void notifyNodeConnectorDisappeared(final InstanceIdentifier<NodeConnector> nodeConnectorInstanceId) {
for (NodeConnectorEventsObserver observer : observers) {
observer.nodeConnectorRemoved(nodeConnectorInstanceId);
}
}
+
}
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-/**
- * Created by Martin Bobak mbobak@cisco.com on 11/20/14.
- */
public class OperationalStatusChangeService implements LldpSpeakerService {
private final LLDPSpeaker speakerInstance;
<?xml version="1.0" encoding="UTF-8"?>
<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
- odl:use-default-for-reference-types="true">
+ odl:use-default-for-reference-types="true">
<reference id="dataBroker" interface="org.opendaylight.controller.md.sal.binding.api.DataBroker"/>
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
-
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
/**
- * Tests for @{LLDPSpeaker} class.
+ * Tests for {@link LLDPSpeaker}.
*/
@RunWith(MockitoJUnitRunner.class)
public class LLDPSpeakerTest {
- static InstanceIdentifier<NodeConnector> id;
- static FlowCapableNodeConnector fcnc;
- static TransmitPacketInput packet;
+ private static final InstanceIdentifier<NodeConnector> id;
+ private static final FlowCapableNodeConnector fcnc;
+ private static final TransmitPacketInput packet;
static {
MacAddress mac = new MacAddress("01:23:45:67:89:AB");
}
@Mock
- PacketProcessingService packetProcessingService;
+ private PacketProcessingService packetProcessingService;
@Mock
- ScheduledExecutorService scheduledExecutorService;
+ private ScheduledExecutorService scheduledExecutorService;
@Mock
- ScheduledFuture scheduledSpeakerTask;
+ private ScheduledFuture scheduledSpeakerTask;
- MacAddress destinationMACAddress = null;
- LLDPSpeaker lldpSpeaker;
+ private final MacAddress destinationMACAddress = null;
+ private LLDPSpeaker lldpSpeaker;
@Before
- @SuppressWarnings("unchecked")
public void setUp() {
when(
scheduledExecutorService.scheduleAtFixedRate(
}
/**
- * Test that when @{LLDPSpeaker#nodeConnectorAdded} is called multiple times
+ * Test that when {@link LLDPSpeaker#nodeConnectorAdded} is called multiple times
* with same arguments, only the first one have effect.
*/
@Test
/**
* Test that lldpSpeaker cancels periodic LLDP flood task and stops
- *
- * @{ScheduledExecutorService .
- * @throws Exception
*/
@Test
- public void testCleanup() throws Exception {
+ public void testCleanup() {
lldpSpeaker.close();
verify(scheduledSpeakerTask, times(1)).cancel(true);
verify(scheduledExecutorService, times(1)).shutdown();
package org.opendaylight.openflowplugin.applications.lldpspeaker;
-import static org.mockito.Mockito.*;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import org.junit.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
+import static org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType.DELETE;
+import static org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType.SUBTREE_MODIFIED;
+import static org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType.WRITE;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import org.junit.Before;
+import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
/**
- * Tests for @{NodeConnectorInventoryEventTranslator} class.
+ * Tests for {@link NodeConnectorInventoryEventTranslator}.
*/
@RunWith(MockitoJUnitRunner.class)
public class NodeConnectorInventoryEventTranslatorTest {
- static InstanceIdentifier<NodeConnector> id = TestUtils.createNodeConnectorId("openflow:1", "openflow:1:1");
- static InstanceIdentifier<FlowCapableNodeConnector> iiToConnector = id.augmentation(FlowCapableNodeConnector.class);
- static FlowCapableNodeConnector fcnc = TestUtils.createFlowCapableNodeConnector().build();
+ private static final InstanceIdentifier<NodeConnector> id = TestUtils.createNodeConnectorId("openflow:1", "openflow:1:1");
+ private static final InstanceIdentifier<FlowCapableNodeConnector> iiToConnector = id.augmentation(FlowCapableNodeConnector.class);
+ private static final FlowCapableNodeConnector fcnc = TestUtils.createFlowCapableNodeConnector().build();
- @Mock DataBroker dataBroker;
- @Mock ListenerRegistration<DataChangeListener> dataChangeListenerRegistration;
- @Mock NodeConnectorEventsObserver eventsObserver;
- @Mock NodeConnectorEventsObserver eventsObserver2;
+ @Mock
+ private NodeConnectorEventsObserver eventsObserver;
+ @Mock
+ private NodeConnectorEventsObserver eventsObserver2;
- MockDataChangedEvent dataChangedEvent = new MockDataChangedEvent();
- NodeConnectorInventoryEventTranslator translator;
+ private NodeConnectorInventoryEventTranslator translator;
@Before
public void setUp() {
-
- when(dataBroker.registerDataChangeListener(
- any(LogicalDatastoreType.class),
- any(InstanceIdentifier.class),
- any(DataChangeListener.class),
- any(AsyncDataBroker.DataChangeScope.class)))
- .thenReturn(dataChangeListenerRegistration);
- translator = new NodeConnectorInventoryEventTranslator(dataBroker, eventsObserver, eventsObserver2);
+ translator = new NodeConnectorInventoryEventTranslator(mock(DataBroker.class), eventsObserver, eventsObserver2);
}
/**
- * Test that checks if @{NodeConnectorEventsObserver#nodeConnectorAdded} is called
+ * Test that checks if {@link NodeConnectorEventsObserver#nodeConnectorAdded} is called
* for each FlowCapableNodeConnector item that @{AsyncDataChangeEvent#getCreatedData} return.
*/
@Test
public void testNodeConnectorCreation() {
- // Setup dataChangedEvent to mock new port creation in inventory
- dataChangedEvent.created.put(iiToConnector, fcnc);
-
- // Invoke NodeConnectorInventoryEventTranslator and check result
- translator.onDataChanged(dataChangedEvent);
+ DataTreeModification dataTreeModification = setupDataTreeChange(WRITE, iiToConnector, fcnc);
+ translator.onDataTreeChanged(Collections.singleton(dataTreeModification));
verify(eventsObserver).nodeConnectorAdded(id, fcnc);
}
@Test
public void testNodeConnectorCreationLinkDown() {
FlowCapableNodeConnector fcnc = TestUtils.createFlowCapableNodeConnector(true, false).build();
-
- // Setup dataChangedEvent to mock new port creation in inventory
- dataChangedEvent.created.put(id, fcnc);
-
- // Invoke NodeConnectorInventoryEventTranslator and check result
- translator.onDataChanged(dataChangedEvent);
+ DataTreeModification dataTreeModification = setupDataTreeChange(WRITE, id, fcnc);
+ translator.onDataTreeChanged(Collections.singleton(dataTreeModification));
verifyZeroInteractions(eventsObserver);
}
@Test
public void testNodeConnectorCreationAdminDown() {
FlowCapableNodeConnector fcnc = TestUtils.createFlowCapableNodeConnector(false, true).build();
-
- // Setup dataChangedEvent to mock new port creation in inventory
- dataChangedEvent.created.put(id, fcnc);
-
- // Invoke NodeConnectorInventoryEventTranslator and check result
- translator.onDataChanged(dataChangedEvent);
+ DataTreeModification dataTreeModification = setupDataTreeChange(WRITE, id, fcnc);
+ translator.onDataTreeChanged(Collections.singleton(dataTreeModification));
verifyZeroInteractions(eventsObserver);
}
/**
- * Test that checks if @{NodeConnectorEventsObserver#nodeConnectorRemoved} is called
+ * Test that checks if {@link NodeConnectorEventsObserver#nodeConnectorRemoved} is called
* for each FlowCapableNodeConnector item inside @{AsyncDataChangeEvent#getUpdatedData}
* that have link down state.
*/
@Test
public void testNodeConnectorUpdateToLinkDown() {
FlowCapableNodeConnector fcnc = TestUtils.createFlowCapableNodeConnector(true, false).build();
-
- // Setup dataChangedEvent to mock link down
- dataChangedEvent.updated.put(iiToConnector, fcnc);
-
- // Invoke NodeConnectorInventoryEventTranslator and check result
- translator.onDataChanged(dataChangedEvent);
+ DataTreeModification dataTreeModification = setupDataTreeChange(SUBTREE_MODIFIED, iiToConnector, fcnc);
+ translator.onDataTreeChanged(Collections.singleton(dataTreeModification));
verify(eventsObserver).nodeConnectorRemoved(id);
}
/**
- * Test that checks if @{NodeConnectorEventsObserver#nodeConnectorRemoved} is called
+ * Test that checks if {@link NodeConnectorEventsObserver#nodeConnectorRemoved} is called
* for each FlowCapableNodeConnector item inside @{AsyncDataChangeEvent#getUpdatedData}
* that have administrative down state.
*/
@Test
public void testNodeConnectorUpdateToAdminDown() {
FlowCapableNodeConnector fcnc = TestUtils.createFlowCapableNodeConnector(false, true).build();
-
- // Setup dataChangedEvent to mock link down and administrative port down
- dataChangedEvent.updated.put(iiToConnector, fcnc);
-
- // Invoke NodeConnectorInventoryEventTranslator and check result
- translator.onDataChanged(dataChangedEvent);
+ DataTreeModification dataTreeModification = setupDataTreeChange(SUBTREE_MODIFIED, iiToConnector, fcnc);
+ translator.onDataTreeChanged(Collections.singleton(dataTreeModification));
verify(eventsObserver).nodeConnectorRemoved(id);
}
/**
- * Test that checks if @{NodeConnectorEventsObserver#nodeConnectorAdded} is called
+ * Test that checks if {@link NodeConnectorEventsObserver#nodeConnectorAdded} is called
* for each FlowCapableNodeConnector item inside @{AsyncDataChangeEvent#getUpdatedData}
* that have administrative up and link up state.
*/
@Test
public void testNodeConnectorUpdateToUp() {
- // Setup dataChangedEvent to mock link up and administrative port up
- dataChangedEvent.updated.put(iiToConnector, fcnc);
-
- // Invoke NodeConnectorInventoryEventTranslator and check result
- translator.onDataChanged(dataChangedEvent);
+ DataTreeModification dataTreeModification = setupDataTreeChange(SUBTREE_MODIFIED, iiToConnector, fcnc);
+ translator.onDataTreeChanged(Collections.singleton(dataTreeModification));
verify(eventsObserver).nodeConnectorAdded(id, fcnc);
}
/**
- * Test that checks if @{NodeConnectorEventsObserver#nodeConnectorRemoved} is called
+ * Test that checks if {@link NodeConnectorEventsObserver#nodeConnectorRemoved} is called
* for each FlowCapableNodeConnector path that @{AsyncDataChangeEvent#getRemovedPaths} return.
*/
@Test
public void testNodeConnectorRemoval() {
- // Setup dataChangedEvent to mock node connector removal
- dataChangedEvent.removed.add(iiToConnector);
-
+ DataTreeModification dataTreeModification = setupDataTreeChange(DELETE, iiToConnector, null);
// Invoke NodeConnectorInventoryEventTranslator and check result
- translator.onDataChanged(dataChangedEvent);
+ translator.onDataTreeChanged(Collections.singleton(dataTreeModification));
verify(eventsObserver).nodeConnectorRemoved(id);
}
/**
- * Test that checks if @{NodeConnectorEventsObserver#nodeConnectorAdded} and
+ * Test that checks if {@link NodeConnectorEventsObserver#nodeConnectorAdded} and
* @{NodeConnectorEventsObserver#nodeConnectorRemoved} are called for each
* observer when multiple observers are registered for notifications.
*/
// Create prerequisites
InstanceIdentifier<NodeConnector> id2 = TestUtils.createNodeConnectorId("openflow:1", "openflow:1:2");
InstanceIdentifier<FlowCapableNodeConnector> iiToConnector2 = id2.augmentation(FlowCapableNodeConnector.class);
-
- // Setup dataChangedEvent to mock port creation and removal
- dataChangedEvent.created.put(iiToConnector, fcnc);
- dataChangedEvent.removed.add(iiToConnector2);
-
+ List<DataTreeModification> modifications = new ArrayList();
+ modifications.add(setupDataTreeChange(WRITE, iiToConnector, fcnc));
+ modifications.add(setupDataTreeChange(DELETE, iiToConnector2, null));
// Invoke onDataChanged and check that both observers notified
- translator.onDataChanged(dataChangedEvent);
+ translator.onDataTreeChanged(modifications);
verify(eventsObserver).nodeConnectorAdded(id, fcnc);
verify(eventsObserver).nodeConnectorRemoved(id2);
verify(eventsObserver2).nodeConnectorAdded(id, fcnc);
verify(eventsObserver2).nodeConnectorRemoved(id2);
}
- /**
- * Test that @{ListenerRegistration} is closed when ${NodeConnectorInventoryEventTranslator#close}
- * method is called.
- * @throws Exception
- */
@Test
- public void testCleanup() throws Exception {
- // Trigger cleanup
+ public void tearDown() throws Exception {
translator.close();
-
- // Verify that ListenerRegistration to DOM events
- verify(dataChangeListenerRegistration, times(2)).close();
}
- static class MockDataChangedEvent implements AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> {
- Map<InstanceIdentifier<?>,DataObject> created = new HashMap<>();
- Map<InstanceIdentifier<?>,DataObject> updated = new HashMap<>();
- Set<InstanceIdentifier<?>> removed = new HashSet<>();
-
- @Override
- public Map<InstanceIdentifier<?>, DataObject> getCreatedData() {
- return created;
- }
-
- @Override
- public Map<InstanceIdentifier<?>, DataObject> getUpdatedData() {
- return updated;
- }
-
- @Override
- public Set<InstanceIdentifier<?>> getRemovedPaths() {
- return removed;
- }
-
- @Override
- public Map<InstanceIdentifier<?>, DataObject> getOriginalData() {
- throw new UnsupportedOperationException("Not implemented by mock");
- }
-
- @Override
- public DataObject getOriginalSubtree() {
- throw new UnsupportedOperationException("Not implemented by mock");
- }
-
- @Override
- public DataObject getUpdatedSubtree() {
- throw new UnsupportedOperationException("Not implemented by mock");
- }
+ private <T extends DataObject> DataTreeModification setupDataTreeChange(final ModificationType type,
+ final InstanceIdentifier<T> ii,
+ final FlowCapableNodeConnector connector) {
+ final DataTreeModification dataTreeModification = mock(DataTreeModification.class);
+ when(dataTreeModification.getRootNode()).thenReturn(mock(DataObjectModification.class));
+ DataTreeIdentifier<T> identifier = new DataTreeIdentifier(LogicalDatastoreType.OPERATIONAL, ii);
+ when(dataTreeModification.getRootNode().getModificationType()).thenReturn(type);
+ when(dataTreeModification.getRootPath()).thenReturn(identifier);
+ when(dataTreeModification.getRootNode().getDataAfter()).thenReturn(connector);
+ return dataTreeModification;
+
}
}
return createFlowCapableNodeConnector(false, false, mac, port);
}
- static FlowCapableNodeConnectorBuilder createFlowCapableNodeConnector(boolean linkDown, boolean adminDown,
+ private static FlowCapableNodeConnectorBuilder createFlowCapableNodeConnector(boolean linkDown, boolean adminDown,
MacAddress mac, long port) {
return new FlowCapableNodeConnectorBuilder()
.setHardwareAddress(mac)
-/*
+/**
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.PacketProcessingListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.PacketReceived;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
public class LLDPDiscoveryListener implements PacketProcessingListener {
- private static final Logger LOG = LoggerFactory.getLogger(LLDPDiscoveryListener.class);
-
private final LLDPLinkAger lldpLinkAger;
private final NotificationProviderService notificationService;
import java.util.Optional;
import javax.annotation.CheckForNull;
import org.opendaylight.openflowplugin.api.OFConstants;
+import org.opendaylight.openflowplugin.impl.util.GroupUtil;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.data.VersionConvertorData;
import org.opendaylight.openflowplugin.openflow.md.util.OpenflowPortsUtil;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterKbps;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterPktps;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterStats;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.ActionType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.multipart.reply.multipart.reply.body.multipart.reply.desc._case.MultipartReplyDesc;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.multipart.reply.multipart.reply.body.multipart.reply.group.features._case.MultipartReplyGroupFeatures;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.multipart.reply.multipart.reply.body.multipart.reply.meter.features._case.MultipartReplyMeterFeatures;
addGroupCapabilities(reply, gCapability);
groupFeature.setGroupCapabilitiesSupported(gCapability);
- final List<Long> supportActionByGroups = new ArrayList<>();
- for (final ActionType actionType : reply.getActionsBitmap()) {
- long supportActionBitmap = 0;
- supportActionBitmap |= actionType.isOFPATOUTPUT() ? (1 << 0) : 0;
- supportActionBitmap |= actionType.isOFPATCOPYTTLOUT() ? (1 << 11) : 0;
- supportActionBitmap |= actionType.isOFPATCOPYTTLIN() ? (1 << 12) : 0;
- supportActionBitmap |= actionType.isOFPATSETMPLSTTL() ? (1 << 15) : 0;
- supportActionBitmap |= actionType.isOFPATDECMPLSTTL() ? (1 << 16) : 0;
- supportActionBitmap |= actionType.isOFPATPUSHVLAN() ? (1 << 17) : 0;
- supportActionBitmap |= actionType.isOFPATPOPVLAN() ? (1 << 18) : 0;
- supportActionBitmap |= actionType.isOFPATPUSHMPLS() ? (1 << 19) : 0;
- supportActionBitmap |= actionType.isOFPATPOPMPLS() ? (1 << 20) : 0;
- supportActionBitmap |= actionType.isOFPATSETQUEUE() ? (1 << 21) : 0;
- supportActionBitmap |= actionType.isOFPATGROUP() ? (1 << 22) : 0;
- supportActionBitmap |= actionType.isOFPATSETNWTTL() ? (1 << 23) : 0;
- supportActionBitmap |= actionType.isOFPATDECNWTTL() ? (1 << 24) : 0;
- supportActionBitmap |= actionType.isOFPATSETFIELD() ? (1 << 25) : 0;
- supportActionBitmap |= actionType.isOFPATPUSHPBB() ? (1 << 26) : 0;
- supportActionBitmap |= actionType.isOFPATPOPPBB() ? (1 << 27) : 0;
- supportActionByGroups.add(Long.valueOf(supportActionBitmap));
- }
- groupFeature.setActions(supportActionByGroups);
+ groupFeature.setActions(GroupUtil.extractGroupActionsSupportBitmap(reply.getActionsBitmap()));
return new NodeGroupFeaturesBuilder().setGroupFeatures(groupFeature.build()).build();
}
return messageSpy;
}
- protected abstract OfHeader buildRequest(Xid xid, I input) throws Exception;
+ protected abstract OfHeader buildRequest(Xid xid, I input) throws ServiceException;
protected abstract FutureCallback<OfHeader> createCallback(RequestContext<O> context, Class<?> requestType);
}
@Override
- protected OfHeader buildRequest(final Xid xid, final EchoInputBuilder input) throws Exception {
+ protected OfHeader buildRequest(final Xid xid, final EchoInputBuilder input) throws ServiceException {
return input
.setXid(xid.getValue())
.setVersion(getVersion())
}
@Override
- protected OfHeader buildRequest(final Xid xid, final SendBarrierInput input) {
+ protected OfHeader buildRequest(final Xid xid, final SendBarrierInput input) throws ServiceException {
final BarrierInputBuilder barrierInputOFJavaBuilder = new BarrierInputBuilder();
barrierInputOFJavaBuilder.setVersion(getVersion());
barrierInputOFJavaBuilder.setXid(xid.getValue());
}
@Override
- protected OfHeader buildRequest(final Xid xid, final FlowModInputBuilder input) {
+ protected OfHeader buildRequest(final Xid xid, final FlowModInputBuilder input) throws ServiceException {
input.setXid(xid.getValue());
return input.build();
}
}
@Override
- protected OfHeader buildRequest(final Xid xid, final I input) {
+ protected OfHeader buildRequest(final Xid xid, final I input) throws ServiceException {
final Optional<GroupModInputBuilder> ofGroupModInput = convertorExecutor.convert(input, data);
final GroupModInputBuilder groupModInputBuilder = ofGroupModInput
}
@Override
- protected OfHeader buildRequest(final Xid xid, final I input) {
+ protected OfHeader buildRequest(final Xid xid, final I input) throws ServiceException {
final Optional<MeterModInputBuilder> ofMeterModInput = convertorExecutor.convert(input, data);
final MeterModInputBuilder meterModInputBuilder = ofMeterModInput
.orElse(MeterConvertor.defaultResult(getVersion()));
}
@Override
- protected OfHeader buildRequest(final Xid xid, final SetConfigInput input) {
+ protected OfHeader buildRequest(final Xid xid, final SetConfigInput input) throws ServiceException {
SetConfigInputBuilder builder = new SetConfigInputBuilder();
SwitchConfigFlag flag = SwitchConfigFlag.valueOf(input.getFlag());
}
@Override
- protected OfHeader buildRequest(final Xid xid, final TransmitPacketInput input) {
+ protected OfHeader buildRequest(final Xid xid, final TransmitPacketInput input) throws ServiceException {
final PacketOutConvertorData data = new PacketOutConvertorData(getVersion());
data.setDatapathId(getDatapathId());
data.setXid(xid.getValue());
}
@Override
- protected OfHeader buildRequest(final Xid xid, final RoleRequestInputBuilder input) {
+ protected OfHeader buildRequest(final Xid xid, final RoleRequestInputBuilder input) throws ServiceException {
input.setXid(xid.getValue());
return input.build();
}
}
@Override
- protected OfHeader buildRequest(Xid xid, SendExperimenterInput input) throws ConversionException {
+ protected OfHeader buildRequest(Xid xid, SendExperimenterInput input) throws ServiceException {
final TypeVersionKey key = new TypeVersionKey(input.getExperimenterMessageOfChoice().getImplementedInterface(), getVersion());
final ConvertorMessageToOFJava<ExperimenterMessageOfChoice, ExperimenterDataOfChoice> messageConverter =
extensionConverterProvider.getMessageConverter(key);
if (messageConverter == null) {
- throw new ConverterNotFoundException(key.toString());
+ throw new ServiceException(new ConverterNotFoundException(key.toString()));
}
-
- final ExperimenterInputBuilder experimenterInputBld = new ExperimenterInputBuilder()
- .setExperimenter(messageConverter.getExperimenterId())
- .setExpType(messageConverter.getType())
- .setExperimenterDataOfChoice(messageConverter.convert(input.getExperimenterMessageOfChoice()))
- .setVersion(getVersion())
- .setXid(xid.getValue());
+
+ final ExperimenterInputBuilder experimenterInputBld;
+
+ try {
+ experimenterInputBld = new ExperimenterInputBuilder()
+ .setExperimenter(messageConverter.getExperimenterId())
+ .setExpType(messageConverter.getType())
+ .setExperimenterDataOfChoice(messageConverter.convert(input.getExperimenterMessageOfChoice()))
+ .setVersion(getVersion())
+ .setXid(xid.getValue());
+ } catch (ConversionException e) {
+ throw new ServiceException(e);
+ }
+
return experimenterInputBld.build();
}
LOG.trace("batch progressing on step type {}", planStep.getStepType());
LOG.trace("batch progressing previous steps result: {}", chainInput.isSuccessful());
- final ListenableFuture<RpcResult<ProcessFlatBatchOutput>> chainOutput;
- switch (planStep.getStepType()) {
- case FLOW_ADD:
- final AddFlowsBatchInput addFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchAddFlow(planStep, node);
- final Future<RpcResult<AddFlowsBatchOutput>> resultAddFlowFuture = salFlowService.addFlowsBatch(addFlowsBatchInput);
- chainOutput = FlatBatchFlowAdapters.convertFlowBatchFutureForChain(resultAddFlowFuture, currentOffset);
- break;
- case FLOW_REMOVE:
- final RemoveFlowsBatchInput removeFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchRemoveFlow(planStep, node);
- final Future<RpcResult<RemoveFlowsBatchOutput>> resultRemoveFlowFuture = salFlowService.removeFlowsBatch(removeFlowsBatchInput);
- chainOutput = FlatBatchFlowAdapters.convertFlowBatchFutureForChain(resultRemoveFlowFuture, currentOffset);
- break;
- case FLOW_UPDATE:
- final UpdateFlowsBatchInput updateFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchUpdateFlow(planStep, node);
- final Future<RpcResult<UpdateFlowsBatchOutput>> resultUpdateFlowFuture = salFlowService.updateFlowsBatch(updateFlowsBatchInput);
- chainOutput = FlatBatchFlowAdapters.convertFlowBatchFutureForChain(resultUpdateFlowFuture, currentOffset);
- break;
- case GROUP_ADD:
- final AddGroupsBatchInput addGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchAddGroup(planStep, node);
- final Future<RpcResult<AddGroupsBatchOutput>> resultAddGroupFuture = salGroupService.addGroupsBatch(addGroupsBatchInput);
- chainOutput = FlatBatchGroupAdapters.convertGroupBatchFutureForChain(resultAddGroupFuture, currentOffset);
- break;
- case GROUP_REMOVE:
- final RemoveGroupsBatchInput removeGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchRemoveGroup(planStep, node);
- final Future<RpcResult<RemoveGroupsBatchOutput>> resultRemoveGroupFuture = salGroupService.removeGroupsBatch(removeGroupsBatchInput);
- chainOutput = FlatBatchGroupAdapters.convertGroupBatchFutureForChain(resultRemoveGroupFuture, currentOffset);
- break;
- case GROUP_UPDATE:
- final UpdateGroupsBatchInput updateGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchUpdateGroup(planStep, node);
- final Future<RpcResult<UpdateGroupsBatchOutput>> resultUpdateGroupFuture = salGroupService.updateGroupsBatch(updateGroupsBatchInput);
- chainOutput = FlatBatchGroupAdapters.convertGroupBatchFutureForChain(resultUpdateGroupFuture, currentOffset);
- break;
- case METER_ADD:
- final AddMetersBatchInput addMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchAddMeter(planStep, node);
- final Future<RpcResult<AddMetersBatchOutput>> resultAddMeterFuture = salMeterService.addMetersBatch(addMetersBatchInput);
- chainOutput = FlatBatchMeterAdapters.convertMeterBatchFutureForChain(resultAddMeterFuture, currentOffset);
- break;
- case METER_REMOVE:
- final RemoveMetersBatchInput removeMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchRemoveMeter(planStep, node);
- final Future<RpcResult<RemoveMetersBatchOutput>> resultRemoveMeterFuture = salMeterService.removeMetersBatch(removeMetersBatchInput);
- chainOutput = FlatBatchMeterAdapters.convertMeterBatchFutureForChain(resultRemoveMeterFuture, currentOffset);
- break;
- case METER_UPDATE:
- final UpdateMetersBatchInput updateMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchUpdateMeter(planStep, node);
- final Future<RpcResult<UpdateMetersBatchOutput>> resultUpdateMeterFuture = salMeterService.updateMetersBatch(updateMetersBatchInput);
- chainOutput = FlatBatchMeterAdapters.convertMeterBatchFutureForChain(resultUpdateMeterFuture, currentOffset);
- break;
- default:
- LOG.warn("Unsupported plan-step type occurred: {} -> OMITTING", planStep.getStepType());
- chainOutput = FlatBatchUtil.createEmptyRpcBatchResultFuture(true);
- }
- return chainOutput;
+ return getChainOutput(node, planStep, currentOffset);
}));
stepOffset += planStep.getTaskBag().size();
}
return chainJobs;
}
+ private ListenableFuture<RpcResult<ProcessFlatBatchOutput>> getChainOutput(final NodeRef node,
+ final BatchPlanStep planStep,
+ final int currentOffset) {
+ final ListenableFuture<RpcResult<ProcessFlatBatchOutput>> chainOutput;
+
+ switch (planStep.getStepType()) {
+ case FLOW_ADD:
+ final AddFlowsBatchInput addFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchAddFlow(planStep, node);
+ final Future<RpcResult<AddFlowsBatchOutput>> resultAddFlowFuture = salFlowService.addFlowsBatch(addFlowsBatchInput);
+ chainOutput = FlatBatchFlowAdapters.convertFlowBatchFutureForChain(resultAddFlowFuture, currentOffset);
+ break;
+ case FLOW_REMOVE:
+ final RemoveFlowsBatchInput removeFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchRemoveFlow(planStep, node);
+ final Future<RpcResult<RemoveFlowsBatchOutput>> resultRemoveFlowFuture = salFlowService.removeFlowsBatch(removeFlowsBatchInput);
+ chainOutput = FlatBatchFlowAdapters.convertFlowBatchFutureForChain(resultRemoveFlowFuture, currentOffset);
+ break;
+ case FLOW_UPDATE:
+ final UpdateFlowsBatchInput updateFlowsBatchInput = FlatBatchFlowAdapters.adaptFlatBatchUpdateFlow(planStep, node);
+ final Future<RpcResult<UpdateFlowsBatchOutput>> resultUpdateFlowFuture = salFlowService.updateFlowsBatch(updateFlowsBatchInput);
+ chainOutput = FlatBatchFlowAdapters.convertFlowBatchFutureForChain(resultUpdateFlowFuture, currentOffset);
+ break;
+ case GROUP_ADD:
+ final AddGroupsBatchInput addGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchAddGroup(planStep, node);
+ final Future<RpcResult<AddGroupsBatchOutput>> resultAddGroupFuture = salGroupService.addGroupsBatch(addGroupsBatchInput);
+ chainOutput = FlatBatchGroupAdapters.convertGroupBatchFutureForChain(resultAddGroupFuture, currentOffset);
+ break;
+ case GROUP_REMOVE:
+ final RemoveGroupsBatchInput removeGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchRemoveGroup(planStep, node);
+ final Future<RpcResult<RemoveGroupsBatchOutput>> resultRemoveGroupFuture = salGroupService.removeGroupsBatch(removeGroupsBatchInput);
+ chainOutput = FlatBatchGroupAdapters.convertGroupBatchFutureForChain(resultRemoveGroupFuture, currentOffset);
+ break;
+ case GROUP_UPDATE:
+ final UpdateGroupsBatchInput updateGroupsBatchInput = FlatBatchGroupAdapters.adaptFlatBatchUpdateGroup(planStep, node);
+ final Future<RpcResult<UpdateGroupsBatchOutput>> resultUpdateGroupFuture = salGroupService.updateGroupsBatch(updateGroupsBatchInput);
+ chainOutput = FlatBatchGroupAdapters.convertGroupBatchFutureForChain(resultUpdateGroupFuture, currentOffset);
+ break;
+ case METER_ADD:
+ final AddMetersBatchInput addMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchAddMeter(planStep, node);
+ final Future<RpcResult<AddMetersBatchOutput>> resultAddMeterFuture = salMeterService.addMetersBatch(addMetersBatchInput);
+ chainOutput = FlatBatchMeterAdapters.convertMeterBatchFutureForChain(resultAddMeterFuture, currentOffset);
+ break;
+ case METER_REMOVE:
+ final RemoveMetersBatchInput removeMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchRemoveMeter(planStep, node);
+ final Future<RpcResult<RemoveMetersBatchOutput>> resultRemoveMeterFuture = salMeterService.removeMetersBatch(removeMetersBatchInput);
+ chainOutput = FlatBatchMeterAdapters.convertMeterBatchFutureForChain(resultRemoveMeterFuture, currentOffset);
+ break;
+ case METER_UPDATE:
+ final UpdateMetersBatchInput updateMetersBatchInput = FlatBatchMeterAdapters.adaptFlatBatchUpdateMeter(planStep, node);
+ final Future<RpcResult<UpdateMetersBatchOutput>> resultUpdateMeterFuture = salMeterService.updateMetersBatch(updateMetersBatchInput);
+ chainOutput = FlatBatchMeterAdapters.convertMeterBatchFutureForChain(resultUpdateMeterFuture, currentOffset);
+ break;
+ default:
+ LOG.warn("Unsupported plan-step type occurred: {} -> OMITTING", planStep.getStepType());
+ chainOutput = FlatBatchUtil.createEmptyRpcBatchResultFuture(true);
+ }
+
+ return chainOutput;
+ }
}
@Override
public Future<RpcResult<AddFlowOutput>> addFlow(final AddFlowInput input) {
final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(input);
-
final ListenableFuture<RpcResult<AddFlowOutput>> future =
flowAdd.processFlowModInputBuilders(flowAdd.toFlowModInputs(input));
- Futures.addCallback(future, new FutureCallback<RpcResult<AddFlowOutput>>() {
- @Override
- public void onSuccess(final RpcResult<AddFlowOutput> rpcResult) {
- if (rpcResult.isSuccessful()) {
- final FlowId flowId;
- final FlowDescriptor flowDescriptor;
-
- if (Objects.nonNull(input.getFlowRef())) {
- flowId = input.getFlowRef().getValue().firstKeyOf(Flow.class, FlowKey.class).getId();
- flowDescriptor = FlowDescriptorFactory.create(input.getTableId(), flowId);
- deviceContext.getDeviceFlowRegistry().store(flowRegistryKey, flowDescriptor);
-
- } else {
- flowId = deviceContext.getDeviceFlowRegistry().storeIfNecessary(flowRegistryKey);
- flowDescriptor = FlowDescriptorFactory.create(input.getTableId(), flowId);
- }
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("flow add with id={},finished without error,", flowId.getValue());
- }
- if (itemLifecycleListener != null) {
- KeyedInstanceIdentifier<Flow, FlowKey> flowPath = createFlowPath(flowDescriptor,
- deviceContext.getDeviceInfo().getNodeInstanceIdentifier());
- final FlowBuilder flowBuilder = new FlowBuilder(input).setId(flowDescriptor.getFlowId());
- itemLifecycleListener.onAdded(flowPath, flowBuilder.build());
- }
- } else {
- deviceContext.getDeviceFlowRegistry().markToBeremoved(flowRegistryKey);
- LOG.error("flow add failed for flow={}, errors={}", input.toString(),
- errorsToString(rpcResult.getErrors()));
- }
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- LOG.error("Service call for adding flow={} failed, reason {} .", input.toString(), throwable);
- }
- });
+ Futures.addCallback(future, new AddFlowCallback(input, flowRegistryKey));
return future;
}
final ListenableFuture<RpcResult<RemoveFlowOutput>> future =
flowRemove.processFlowModInputBuilders(flowRemove.toFlowModInputs(input));
- Futures.addCallback(future, new FutureCallback<RpcResult<RemoveFlowOutput>>() {
- @Override
- public void onSuccess(final RpcResult<RemoveFlowOutput> result) {
- if (result.isSuccessful()) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("flow removed finished without error,");
- }
- FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(input);
- deviceContext.getDeviceFlowRegistry().markToBeremoved(flowRegistryKey);
- if (itemLifecycleListener != null) {
- final FlowDescriptor flowDescriptor =
- deviceContext.getDeviceFlowRegistry().retrieveIdForFlow(flowRegistryKey);
- if (flowDescriptor != null) {
- KeyedInstanceIdentifier<Flow, FlowKey> flowPath = createFlowPath(flowDescriptor,
- deviceContext.getDeviceInfo().getNodeInstanceIdentifier());
- itemLifecycleListener.onRemoved(flowPath);
- }
- }
- } else {
- LOG.error("Flow remove failed with errors : {}",errorsToString(result.getErrors()));
- }
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- LOG.error("Service call for removing flow with id {} failed ,reason {}",input.getFlowRef().getValue(), throwable);
- }
- });
+ Futures.addCallback(future, new RemoveFlowCallback(input));
return future;
}
@Override
public Future<RpcResult<UpdateFlowOutput>> updateFlow(final UpdateFlowInput input) {
- final UpdateFlowInput in = input;
- final UpdatedFlow updated = in.getUpdatedFlow();
- final OriginalFlow original = in.getOriginalFlow();
+ final UpdatedFlow updated = input.getUpdatedFlow();
+ final OriginalFlow original = input.getOriginalFlow();
final List<FlowModInputBuilder> allFlowMods = new ArrayList<>();
final List<FlowModInputBuilder> ofFlowModInputs;
allFlowMods.addAll(ofFlowModInputs);
ListenableFuture<RpcResult<UpdateFlowOutput>> future = flowUpdate.processFlowModInputBuilders(allFlowMods);
- Futures.addCallback(future, new FutureCallback<RpcResult<UpdateFlowOutput>>() {
- @Override
- public void onSuccess(final RpcResult<UpdateFlowOutput> o) {
- FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(original);
+ Futures.addCallback(future, new UpdateFlowCallback(input));
+ return future;
+ }
+
+ @VisibleForTesting
+ static KeyedInstanceIdentifier<Flow, FlowKey> createFlowPath(FlowDescriptor flowDescriptor,
+ KeyedInstanceIdentifier<Node, NodeKey> nodePath) {
+ return nodePath.augmentation(FlowCapableNode.class)
+ .child(Table.class, flowDescriptor.getTableKey())
+ .child(Flow.class, new FlowKey(flowDescriptor.getFlowId()));
+ }
- FlowRegistryKey updatedflowRegistryKey = FlowRegistryKeyFactory.create(updated);
- final FlowRef flowRef = input.getFlowRef();
- final DeviceFlowRegistry deviceFlowRegistry = deviceContext.getDeviceFlowRegistry();
+ private class AddFlowCallback implements FutureCallback<RpcResult<AddFlowOutput>> {
+ private final AddFlowInput input;
+ private final FlowRegistryKey flowRegistryKey;
- if (flowRef == null) { //then this is equivalent to a delete
- deviceFlowRegistry.markToBeremoved(flowRegistryKey);
+ private AddFlowCallback(final AddFlowInput input,
+ final FlowRegistryKey flowRegistryKey) {
+ this.input = input;
+ this.flowRegistryKey = flowRegistryKey;
+ }
- if (itemLifecycleListener != null) {
- final FlowDescriptor flowDescriptor =
- deviceContext.getDeviceFlowRegistry().retrieveIdForFlow( flowRegistryKey);
- KeyedInstanceIdentifier<Flow, FlowKey> flowPath = createFlowPath(flowDescriptor,
- deviceContext.getDeviceInfo().getNodeInstanceIdentifier());
- itemLifecycleListener.onRemoved(flowPath);
- }
- } else { //this is either an add or an update
- final FlowId flowId = flowRef.getValue().firstKeyOf(Flow.class, FlowKey.class).getId();
- final FlowDescriptor flowDescriptor = FlowDescriptorFactory.create(updated.getTableId(), flowId);
- deviceFlowRegistry.store(updatedflowRegistryKey, flowDescriptor);
+ @Override
+ public void onSuccess(final RpcResult<AddFlowOutput> rpcResult) {
+ if (rpcResult.isSuccessful()) {
+ final FlowDescriptor flowDescriptor;
+
+ if (Objects.nonNull(input.getFlowRef())) {
+ final FlowId flowId = input.getFlowRef().getValue().firstKeyOf(Flow.class, FlowKey.class).getId();
+ flowDescriptor = FlowDescriptorFactory.create(input.getTableId(), flowId);
+ deviceContext.getDeviceFlowRegistry().store(flowRegistryKey, flowDescriptor);
+ } else {
+ final FlowId flowId = deviceContext.getDeviceFlowRegistry().storeIfNecessary(flowRegistryKey);
+ flowDescriptor = FlowDescriptorFactory.create(input.getTableId(), flowId);
+ }
- if (itemLifecycleListener != null) {
+ LOG.debug("flow add with id={},finished without error,", flowDescriptor.getFlowId().getValue());
+
+ if (itemLifecycleListener != null) {
+ KeyedInstanceIdentifier<Flow, FlowKey> flowPath = createFlowPath(flowDescriptor,
+ deviceContext.getDeviceInfo().getNodeInstanceIdentifier());
+ final FlowBuilder flowBuilder = new FlowBuilder(input).setId(flowDescriptor.getFlowId());
+ itemLifecycleListener.onAdded(flowPath, flowBuilder.build());
+ }
+ } else {
+ LOG.error("flow add failed for flow={}, errors={}", input.toString(), errorsToString(rpcResult.getErrors()));
+ }
+ }
+
+ @Override
+ public void onFailure(final Throwable throwable) {
+ LOG.error("Service call for adding flow={} failed, reason {} .", input.toString(), throwable);
+ }
+ }
+
+ private class RemoveFlowCallback implements FutureCallback<RpcResult<RemoveFlowOutput>> {
+ private final RemoveFlowInput input;
+
+ private RemoveFlowCallback(final RemoveFlowInput input) {
+ this.input = input;
+ }
+
+ @Override
+ public void onSuccess(final RpcResult<RemoveFlowOutput> result) {
+ if (result.isSuccessful()) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("flow removed finished without error,");
+ }
+ FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(input);
+ deviceContext.getDeviceFlowRegistry().markToBeremoved(flowRegistryKey);
+ if (itemLifecycleListener != null) {
+ final FlowDescriptor flowDescriptor =
+ deviceContext.getDeviceFlowRegistry().retrieveIdForFlow(flowRegistryKey);
+ if (flowDescriptor != null) {
KeyedInstanceIdentifier<Flow, FlowKey> flowPath = createFlowPath(flowDescriptor,
deviceContext.getDeviceInfo().getNodeInstanceIdentifier());
- final FlowBuilder flowBuilder = new FlowBuilder(
- input.getUpdatedFlow()).setId(flowDescriptor.getFlowId());
-
- boolean isUpdate = null !=
- deviceFlowRegistry.retrieveIdForFlow(flowRegistryKey);
- if (isUpdate) {
- itemLifecycleListener.onUpdated(flowPath, flowBuilder.build());
- } else {
- itemLifecycleListener.onAdded(flowPath, flowBuilder.build());
- }
+ itemLifecycleListener.onRemoved(flowPath);
}
}
+ } else {
+ LOG.error("Flow remove failed with errors : {}",errorsToString(result.getErrors()));
}
+ }
- @Override
- public void onFailure(final Throwable throwable) {
- LOG.error("Service call for updating flow failed, reason{}", throwable);
- }
- });
- return future;
+ @Override
+ public void onFailure(final Throwable throwable) {
+ LOG.error("Service call for removing flow with id {} failed ,reason {}",input.getFlowRef().getValue(), throwable);
+ }
}
- @VisibleForTesting
- static KeyedInstanceIdentifier<Flow, FlowKey> createFlowPath(FlowDescriptor flowDescriptor,
- KeyedInstanceIdentifier<Node, NodeKey> nodePath) {
- return nodePath.augmentation(FlowCapableNode.class)
- .child(Table.class, flowDescriptor.getTableKey())
- .child(Flow.class, new FlowKey(flowDescriptor.getFlowId()));
+ private class UpdateFlowCallback implements FutureCallback<RpcResult<UpdateFlowOutput>> {
+ private final UpdateFlowInput input;
+
+ private UpdateFlowCallback(UpdateFlowInput input) {
+ this.input = input;
+ }
+
+ @Override
+ public void onSuccess(final RpcResult<UpdateFlowOutput> o) {
+ final UpdatedFlow updated = input.getUpdatedFlow();
+ final OriginalFlow original = input.getOriginalFlow();
+ FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(original);
+
+ FlowRegistryKey updatedflowRegistryKey = FlowRegistryKeyFactory.create(updated);
+ final FlowRef flowRef = input.getFlowRef();
+ final DeviceFlowRegistry deviceFlowRegistry = deviceContext.getDeviceFlowRegistry();
+
+ if (flowRef == null) {
+ // then this is equivalent to a delete
+ deviceFlowRegistry.markToBeremoved(flowRegistryKey);
+
+ if (itemLifecycleListener != null) {
+ final FlowDescriptor flowDescriptor =
+ deviceContext.getDeviceFlowRegistry().retrieveIdForFlow( flowRegistryKey);
+ KeyedInstanceIdentifier<Flow, FlowKey> flowPath = createFlowPath(flowDescriptor,
+ deviceContext.getDeviceInfo().getNodeInstanceIdentifier());
+ itemLifecycleListener.onRemoved(flowPath);
+ }
+ } else {
+ // this is either an add or an update
+ final FlowId flowId = flowRef.getValue().firstKeyOf(Flow.class, FlowKey.class).getId();
+ final FlowDescriptor flowDescriptor = FlowDescriptorFactory.create(updated.getTableId(), flowId);
+ deviceFlowRegistry.store(updatedflowRegistryKey, flowDescriptor);
+
+ if (itemLifecycleListener != null) {
+ KeyedInstanceIdentifier<Flow, FlowKey> flowPath = createFlowPath(flowDescriptor,
+ deviceContext.getDeviceInfo().getNodeInstanceIdentifier());
+ final FlowBuilder flowBuilder = new FlowBuilder(
+ input.getUpdatedFlow()).setId(flowDescriptor.getFlowId());
+
+ boolean isUpdate = null !=
+ deviceFlowRegistry.retrieveIdForFlow(flowRegistryKey);
+ if (isUpdate) {
+ itemLifecycleListener.onUpdated(flowPath, flowBuilder.build());
+ } else {
+ itemLifecycleListener.onAdded(flowPath, flowBuilder.build());
+ }
+ }
+ }
+ }
+
+ @Override
+ public void onFailure(final Throwable throwable) {
+ LOG.error("Service call for updating flow failed, reason{}", throwable);
+ }
}
}
\ No newline at end of file
}
@Override
- protected OfHeader buildRequest(final Xid xid, final UpdatePortInput input) {
+ protected OfHeader buildRequest(final Xid xid, final UpdatePortInput input) throws ServiceException {
final Port inputPort = input.getUpdatedPort().getPort().getPort().get(0);
final Optional<PortModInput> ofPortModInput = convertorExecutor.convert(inputPort, data);
}
@Override
- protected OfHeader buildRequest(final Xid xid, final SetRoleInput input) {
+ protected OfHeader buildRequest(final Xid xid, final SetRoleInput input) throws ServiceException {
return null;
}
}
@Override
- protected OfHeader buildRequest(final Xid xid, final UpdateTableInput input) {
+ protected OfHeader buildRequest(final Xid xid, final UpdateTableInput input) throws ServiceException {
final MultipartRequestTableFeaturesCaseBuilder caseBuilder = new MultipartRequestTableFeaturesCaseBuilder();
final MultipartRequestTableFeaturesBuilder requestBuilder = new MultipartRequestTableFeaturesBuilder();
--- /dev/null
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.services;
+
+/**
+ * Exception thrown by {@link org.opendaylight.openflowplugin.impl.services.AbstractService#buildRequest(org.opendaylight.openflowplugin.api.openflow.device.Xid, Object)}
+ */
+public class ServiceException extends Exception {
+ public ServiceException(Throwable cause) {
+ super(cause);
+ }
+}
import java.util.List;
import java.util.Optional;
import org.opendaylight.openflowplugin.api.openflow.md.util.OpenflowVersion;
+import org.opendaylight.openflowplugin.impl.util.GroupUtil;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.data.VersionDatapathIdConvertorData;
import org.opendaylight.openflowplugin.openflow.md.util.InventoryDataServiceUtil;
message.setGroupCapabilitiesSupported(supportedCapabilities);
- message.setActions(getGroupActionsSupportBitmap(replyBody.getActionsBitmap()));
+ message.setActions(GroupUtil.extractGroupActionsSupportBitmap(replyBody.getActionsBitmap()));
listDataObject.add(message.build());
}
BigInteger bigIntXid = BigInteger.valueOf(xid);
return new TransactionId(bigIntXid);
}
-
- /*
- * Method returns the bitmap of actions supported by each group.
- *
- * @param actionsSupported
- * @return
- */
- static List<Long> getGroupActionsSupportBitmap(final List<org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.ActionType> actionsSupported) {
- List<Long> supportActionByGroups = new ArrayList<Long>();
- for (org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.ActionType supportedActions : actionsSupported) {
- long supportActionBitmap = 0;
- supportActionBitmap |= supportedActions.isOFPATOUTPUT() ? (1 << 0) : 0;
- supportActionBitmap |= supportedActions.isOFPATCOPYTTLOUT() ? (1 << 11) : 0;
- supportActionBitmap |= supportedActions.isOFPATCOPYTTLIN() ? (1 << 12) : 0;
- supportActionBitmap |= supportedActions.isOFPATSETMPLSTTL() ? (1 << 15) : 0;
- supportActionBitmap |= supportedActions.isOFPATDECMPLSTTL() ? (1 << 16) : 0;
- supportActionBitmap |= supportedActions.isOFPATPUSHVLAN() ? (1 << 17) : 0;
- supportActionBitmap |= supportedActions.isOFPATPOPVLAN() ? (1 << 18) : 0;
- supportActionBitmap |= supportedActions.isOFPATPUSHMPLS() ? (1 << 19) : 0;
- supportActionBitmap |= supportedActions.isOFPATPOPMPLS() ? (1 << 20) : 0;
- supportActionBitmap |= supportedActions.isOFPATSETQUEUE() ? (1 << 21) : 0;
- supportActionBitmap |= supportedActions.isOFPATGROUP() ? (1 << 22) : 0;
- supportActionBitmap |= supportedActions.isOFPATSETNWTTL() ? (1 << 23) : 0;
- supportActionBitmap |= supportedActions.isOFPATDECNWTTL() ? (1 << 24) : 0;
- supportActionBitmap |= supportedActions.isOFPATSETFIELD() ? (1 << 25) : 0;
- supportActionBitmap |= supportedActions.isOFPATPUSHPBB() ? (1 << 26) : 0;
- supportActionBitmap |= supportedActions.isOFPATPOPPBB() ? (1 << 27) : 0;
- supportActionByGroups.add(Long.valueOf(supportActionBitmap));
- }
- return supportActionByGroups;
- }
-
}
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.api.openflow.md.core.TranslatorKey;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.openflow.md.util.FlowCreatorUtil;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsUpdate;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetAggregateFlowStatisticsFromFlowTableForAllFlowsInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetAggregateFlowStatisticsFromFlowTableForAllFlowsInput input) throws ServiceException {
// Create multipart request body for fetch all the group stats
final MultipartRequestAggregateCaseBuilder multipartRequestAggregateCaseBuilder = new MultipartRequestAggregateCaseBuilder();
final MultipartRequestAggregateBuilder mprAggregateRequestBuilder = new MultipartRequestAggregateBuilder();
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.FlowStatisticsToNotificationTransformer;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetAllFlowsStatisticsFromAllFlowTablesInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetAllFlowsStatisticsFromAllFlowTablesInput input) throws ServiceException {
final MultipartRequestInputBuilder mprInput = RequestInputUtils.createMultipartHeader(
MultipartType.OFPMPFLOW, xid.getValue(), getVersion());
mprInput.setMultipartRequestBody(flowCase);
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.FlowStatisticsToNotificationTransformer;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetAllFlowStatisticsFromFlowTableInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetAllFlowStatisticsFromFlowTableInput input) throws ServiceException {
final MultipartRequestFlowBuilder mprFlowRequestBuilder = new MultipartRequestFlowBuilder();
mprFlowRequestBuilder.setTableId(input.getTableId().getValue());
mprFlowRequestBuilder.setOutPort(OFConstants.OFPP_ANY);
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.GroupStatisticsToNotificationTransformer;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
@Override
- protected OfHeader buildRequest(final Xid xid, final GetAllGroupStatisticsInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetAllGroupStatisticsInput input) throws ServiceException {
// Create multipart request header
final MultipartRequestInputBuilder mprInput = RequestInputUtils.createMultipartHeader(
MultipartType.OFPMPGROUP, xid.getValue(), getVersion());
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.data.VersionConvertorData;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetAllMeterConfigStatisticsInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetAllMeterConfigStatisticsInput input) throws ServiceException {
MultipartRequestInputBuilder mprInput = RequestInputUtils
.createMultipartHeader(MultipartType.OFPMPMETERCONFIG, xid.getValue(), getVersion());
return mprInput.setMultipartRequestBody(METER_CONFIG_CASE).build();
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.MeterStatisticsToNotificationTransformer;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetAllMeterStatisticsInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetAllMeterStatisticsInput input) throws ServiceException {
MultipartRequestInputBuilder mprInput = RequestInputUtils
.createMultipartHeader(MultipartType.OFPMPMETER, xid.getValue(), getVersion());
return mprInput.setMultipartRequestBody(METER_CASE).build();
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.NodeConnectorStatisticsToNotificationTransformer;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.TransactionId;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetAllNodeConnectorsStatisticsInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetAllNodeConnectorsStatisticsInput input) throws ServiceException {
MultipartRequestInputBuilder mprInput = RequestInputUtils
.createMultipartHeader(MultipartType.OFPMPPORTSTATS, xid.getValue(), getVersion());
mprInput.setMultipartRequestBody(PORT_STATS_CASE);
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.QueueStatisticsToNotificationTransformer;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.TransactionId;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetAllQueuesStatisticsFromAllPortsInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetAllQueuesStatisticsFromAllPortsInput input) throws ServiceException {
// Set request body to main multipart request
MultipartRequestInputBuilder mprInput = RequestInputUtils.createMultipartHeader(
MultipartType.OFPMPQUEUE, xid.getValue(), getVersion());
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.api.openflow.md.util.OpenflowVersion;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.QueueStatisticsToNotificationTransformer;
import org.opendaylight.openflowplugin.openflow.md.util.InventoryDataServiceUtil;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetAllQueuesStatisticsFromGivenPortInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetAllQueuesStatisticsFromGivenPortInput input) throws ServiceException {
MultipartRequestQueueCaseBuilder caseBuilder = new MultipartRequestQueueCaseBuilder();
MultipartRequestQueueBuilder mprQueueBuilder = new MultipartRequestQueueBuilder();
// Select all queues
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.FlowStatisticsToNotificationTransformer;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetFlowStatisticsFromFlowTableInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetFlowStatisticsFromFlowTableInput input) throws ServiceException {
final MultipartRequestFlowCaseBuilder multipartRequestFlowCaseBuilder = new MultipartRequestFlowCaseBuilder();
final MultipartRequestFlowBuilder mprFlowRequestBuilder = new MultipartRequestFlowBuilder();
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.data.VersionConvertorData;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetGroupDescriptionInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetGroupDescriptionInput input) throws ServiceException {
final MultipartRequestInputBuilder mprInput = RequestInputUtils.createMultipartHeader(
MultipartType.OFPMPGROUPDESC, xid.getValue(), getVersion());
mprInput.setMultipartRequestBody(GROUP_DESC_CASE);
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
+import org.opendaylight.openflowplugin.impl.util.GroupUtil;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.TransactionId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupFeaturesInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupFeaturesOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.SelectLiveness;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.SelectWeight;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.ActionType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.GroupCapabilities;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.GroupTypes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.MultipartType;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetGroupFeaturesInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetGroupFeaturesInput input) throws ServiceException {
final MultipartRequestInputBuilder mprInput = RequestInputUtils.createMultipartHeader(
MultipartType.OFPMPGROUPFEATURES, xid.getValue(), getVersion());
mprInput.setMultipartRequestBody(GROUP_FEAT_CASE);
notification.setGroupTypesSupported(extractSupportedGroupTypes(replyBody.getTypes()));
notification.setMaxGroups(replyBody.getMaxGroups());
notification.setGroupCapabilitiesSupported(extractSupportedCapabilities(replyBody.getCapabilities()));
- notification.setActions(extractGroupActionsSupportBitmap(replyBody.getActionsBitmap()));
+ notification.setActions(GroupUtil.extractGroupActionsSupportBitmap(replyBody.getActionsBitmap()));
return notification.build();
}
}
return supportedGroups;
}
-
- @VisibleForTesting
- static List<Long> extractGroupActionsSupportBitmap(final List<ActionType> actionsSupported) {
- List<Long> supportActionByGroups = new ArrayList<>();
- for (ActionType supportedActions : actionsSupported) {
- long supportActionBitmap = 0;
- supportActionBitmap |= supportedActions.isOFPATOUTPUT() ? (1 << 0) : 0;
- supportActionBitmap |= supportedActions.isOFPATCOPYTTLOUT() ? (1 << 11) : 0;
- supportActionBitmap |= supportedActions.isOFPATCOPYTTLIN() ? (1 << 12) : 0;
- supportActionBitmap |= supportedActions.isOFPATSETMPLSTTL() ? (1 << 15) : 0;
- supportActionBitmap |= supportedActions.isOFPATDECMPLSTTL() ? (1 << 16) : 0;
- supportActionBitmap |= supportedActions.isOFPATPUSHVLAN() ? (1 << 17) : 0;
- supportActionBitmap |= supportedActions.isOFPATPOPVLAN() ? (1 << 18) : 0;
- supportActionBitmap |= supportedActions.isOFPATPUSHMPLS() ? (1 << 19) : 0;
- supportActionBitmap |= supportedActions.isOFPATPOPMPLS() ? (1 << 20) : 0;
- supportActionBitmap |= supportedActions.isOFPATSETQUEUE() ? (1 << 21) : 0;
- supportActionBitmap |= supportedActions.isOFPATGROUP() ? (1 << 22) : 0;
- supportActionBitmap |= supportedActions.isOFPATSETNWTTL() ? (1 << 23) : 0;
- supportActionBitmap |= supportedActions.isOFPATDECNWTTL() ? (1 << 24) : 0;
- supportActionBitmap |= supportedActions.isOFPATSETFIELD() ? (1 << 25) : 0;
- supportActionBitmap |= supportedActions.isOFPATPUSHPBB() ? (1 << 26) : 0;
- supportActionBitmap |= supportedActions.isOFPATPOPPBB() ? (1 << 27) : 0;
- supportActionByGroups.add(Long.valueOf(supportActionBitmap));
- }
- return supportActionByGroups;
- }
}
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.GroupStatisticsToNotificationTransformer;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetGroupStatisticsInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetGroupStatisticsInput input) throws ServiceException {
final MultipartRequestGroupCaseBuilder caseBuilder = new MultipartRequestGroupCaseBuilder();
final MultipartRequestGroupBuilder mprGroupBuild = new MultipartRequestGroupBuilder();
mprGroupBuild.setGroupId(new GroupId(input.getGroupId().getValue()));
import org.opendaylight.openflowplugin.impl.services.AbstractMultipartService;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.match.MatchReactor;
import org.opendaylight.openflowplugin.openflow.md.util.FlowCreatorUtil;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForGivenMatchInput;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetAggregateFlowStatisticsFromFlowTableForGivenMatchInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetAggregateFlowStatisticsFromFlowTableForGivenMatchInput input) throws ServiceException {
final MultipartRequestAggregateCaseBuilder multipartRequestAggregateCaseBuilder = new MultipartRequestAggregateCaseBuilder();
final MultipartRequestAggregateBuilder mprAggregateRequestBuilder = new MultipartRequestAggregateBuilder();
final short tableId = MoreObjects.firstNonNull(input.getTableId(), OFConstants.OFPTT_ALL).shortValue();
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev130715.Counter32;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.TransactionId;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetMeterFeaturesInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetMeterFeaturesInput input) throws ServiceException {
MultipartRequestInputBuilder mprInput =
RequestInputUtils.createMultipartHeader(MultipartType.OFPMPMETERFEATURES, xid.getValue(), getVersion());
mprInput.setMultipartRequestBody(METER_FEATURES_CASE);
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.MeterStatisticsToNotificationTransformer;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetMeterStatisticsInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetMeterStatisticsInput input) throws ServiceException {
MultipartRequestMeterCaseBuilder caseBuilder =
new MultipartRequestMeterCaseBuilder();
MultipartRequestMeterBuilder mprMeterBuild =
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.api.openflow.md.util.OpenflowVersion;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.QueueStatisticsToNotificationTransformer;
import org.opendaylight.openflowplugin.openflow.md.util.InventoryDataServiceUtil;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetQueueStatisticsFromGivenPortInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetQueueStatisticsFromGivenPortInput input) throws ServiceException {
MultipartRequestQueueCaseBuilder caseBuilder = new MultipartRequestQueueCaseBuilder();
MultipartRequestQueueBuilder mprQueueBuilder = new MultipartRequestQueueBuilder();
// Select specific queue
import org.opendaylight.openflowplugin.api.openflow.device.RequestContextStack;
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev130715.Counter32;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev130715.Counter64;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetFlowTablesStatisticsInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetFlowTablesStatisticsInput input) throws ServiceException {
// Create multipart request body for fetch all the group stats
final MultipartRequestTableCaseBuilder multipartRequestTableCaseBuilder = new MultipartRequestTableCaseBuilder();
final MultipartRequestTableBuilder multipartRequestTableBuilder = new MultipartRequestTableBuilder();
import org.opendaylight.openflowplugin.api.openflow.device.Xid;
import org.opendaylight.openflowplugin.api.openflow.md.util.OpenflowVersion;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.AbstractCompatibleStatService;
import org.opendaylight.openflowplugin.impl.statistics.services.compatibility.NodeConnectorStatisticsToNotificationTransformer;
import org.opendaylight.openflowplugin.openflow.md.util.InventoryDataServiceUtil;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final GetNodeConnectorStatisticsInput input) {
+ protected OfHeader buildRequest(final Xid xid, final GetNodeConnectorStatisticsInput input) throws ServiceException {
MultipartRequestPortStatsCaseBuilder caseBuilder =
new MultipartRequestPortStatsCaseBuilder();
MultipartRequestPortStatsBuilder mprPortStatsBuilder =
* pulled out flow stats to notification transformation
*/
public class FlowStatisticsToNotificationTransformer {
+
+ private FlowStatisticsToNotificationTransformer() {
+ // Hide implicit constructor
+ }
+
/**
* @param mpResult raw multipart response from device
* @param deviceInfo device state
* pulled out group stats to notification transformation
*/
public class GroupStatisticsToNotificationTransformer {
+ private GroupStatisticsToNotificationTransformer() {
+ // Hide implicit constructor
+ }
/**
* @param mpReplyList raw multipart response from device
* @param deviceInfo device state
*/
public class MeterStatisticsToNotificationTransformer {
+ private MeterStatisticsToNotificationTransformer() {
+ // Hide implicit constructor
+ }
/**
* @param mpReplyList raw multipart response from device
* @param deviceInfo device state
*/
public class NodeConnectorStatisticsToNotificationTransformer {
+ private NodeConnectorStatisticsToNotificationTransformer() {
+ // Hide implicit constructor
+ }
+
/**
* @param mpReplyList raw multipart response from device
* @param deviceInfo device basic info
*/
public class QueueStatisticsToNotificationTransformer {
+ private QueueStatisticsToNotificationTransformer() {
+ // Hide implicit constructor
+ }
+
/**
* @param mpReplyList raw multipart response from device
* @param deviceInfo device state
import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.StatisticsGatherer;
import org.opendaylight.openflowplugin.impl.common.MultipartRequestInputFactory;
import org.opendaylight.openflowplugin.impl.services.AbstractMultipartOnTheFlyService;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.ofpspecific.EventsTimeCounter;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.MultipartType;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final MultipartType input) {
+ protected OfHeader buildRequest(final Xid xid, final MultipartType input) throws ServiceException {
return MultipartRequestInputFactory.makeMultipartRequestInput(xid.getValue(), getVersion(), input);
}
}
import org.opendaylight.openflowplugin.api.openflow.statistics.ofpspecific.StatisticsGatherer;
import org.opendaylight.openflowplugin.impl.common.MultipartRequestInputFactory;
import org.opendaylight.openflowplugin.impl.services.AbstractMultipartService;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.openflowplugin.impl.statistics.ofpspecific.EventsTimeCounter;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.MultipartType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.MultipartReply;
}
@Override
- protected OfHeader buildRequest(final Xid xid, final MultipartType input) {
+ protected OfHeader buildRequest(final Xid xid, final MultipartType input) throws ServiceException {
return MultipartRequestInputFactory.makeMultipartRequestInput(xid.getValue(), getVersion(), input);
}
}
import org.opendaylight.openflowplugin.impl.services.AbstractMultipartService;
import org.opendaylight.openflowplugin.impl.services.RequestInputUtils;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
+import org.opendaylight.openflowplugin.impl.services.ServiceException;
import org.opendaylight.yang.gen.v1.urn.opendaylight.direct.statistics.rev160511.StoreStatsGrouping;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.MultipartType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.MultipartReply;
}
@Override
- protected OfHeader buildRequest(Xid xid, I input) throws Exception {
+ protected OfHeader buildRequest(Xid xid, I input) throws ServiceException {
return RequestInputUtils.createMultipartHeader(multipartType, xid.getValue(), getVersion())
.setMultipartRequestBody(buildRequestBody(input))
.build();
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
-import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ExecutionException;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
private static final Logger LOG = LoggerFactory.getLogger(DeviceInitializationUtils.class);
+ private DeviceInitializationUtils() {
+ // Hiding implicit constructor
+ }
+
/**
* InitializationNodeInformation is good to call only for MASTER otherwise we will have not empty transaction
* for every Cluster Node (SLAVE too) and we will get race-condition by closing Connection.
final InstanceIdentifier<Node> nodeII, final Collection<MultipartReply> result,
final ConvertorExecutor convertorExecutor) {
try {
- for (final MultipartReply reply : result) {
- final MultipartReplyBody body = reply.getMultipartReplyBody();
- switch (type) {
- case OFPMPDESC:
- Preconditions.checkArgument(body instanceof MultipartReplyDescCase);
- final MultipartReplyDesc replyDesc = ((MultipartReplyDescCase) body).getMultipartReplyDesc();
- final FlowCapableNode fcNode = NodeStaticReplyTranslatorUtil.nodeDescTranslator(replyDesc,
- getIpAddressOf(dContext));
- final InstanceIdentifier<FlowCapableNode> fNodeII = nodeII.augmentation(FlowCapableNode.class);
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, fNodeII, fcNode);
- break;
-
- case OFPMPTABLEFEATURES:
- Preconditions.checkArgument(body instanceof MultipartReplyTableFeaturesCase);
- final MultipartReplyTableFeatures tableFeaturesMP = ((MultipartReplyTableFeaturesCase) body)
- .getMultipartReplyTableFeatures();
- final List<TableFeatures> tableFeatures = NodeStaticReplyTranslatorUtil
- .nodeTableFeatureTranslator(tableFeaturesMP, dContext.getDeviceInfo().getVersion(), convertorExecutor);
- for (final TableFeatures tableFeature : tableFeatures) {
- final Short tableId = tableFeature.getTableId();
- final KeyedInstanceIdentifier<TableFeatures, TableFeaturesKey> tableFeaturesII =
- nodeII.augmentation(FlowCapableNode.class)
- .child(TableFeatures.class, new TableFeaturesKey(tableId));
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, tableFeaturesII, tableFeature);
-
- // write parent for table statistics
- final KeyedInstanceIdentifier<Table, TableKey> tableII =
- nodeII.augmentation(FlowCapableNode.class)
- .child(Table.class, new TableKey(tableId));
- final TableBuilder tableBld = new TableBuilder().setId(tableId)
- .addAugmentation(FlowTableStatisticsData.class,
- new FlowTableStatisticsDataBuilder().build());
-
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, tableII, tableBld.build());
- }
- break;
-
- case OFPMPMETERFEATURES:
- Preconditions.checkArgument(body instanceof MultipartReplyMeterFeaturesCase);
- final MultipartReplyMeterFeatures meterFeatures = ((MultipartReplyMeterFeaturesCase) body)
- .getMultipartReplyMeterFeatures();
- final NodeMeterFeatures mFeature = NodeStaticReplyTranslatorUtil
- .nodeMeterFeatureTranslator(meterFeatures);
- final InstanceIdentifier<NodeMeterFeatures> mFeatureII = nodeII
- .augmentation(NodeMeterFeatures.class);
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, mFeatureII, mFeature);
- if (0L < mFeature.getMeterFeatures().getMaxMeter().getValue()) {
- dContext.getDeviceState().setMeterAvailable(true);
+ result.stream()
+ .map(MultipartReply::getMultipartReplyBody)
+ .forEach(multipartReplyBody -> {
+ if (!(writeDesc(type, multipartReplyBody, dContext, nodeII)
+ || writeTableFeatures(type, multipartReplyBody, dContext, nodeII, convertorExecutor)
+ || writeMeterFeatures(type, multipartReplyBody, dContext, nodeII)
+ || writeGroupFeatures(type, multipartReplyBody, dContext, nodeII)
+ || writePortDesc(type, multipartReplyBody, dContext, nodeII))) {
+ throw new IllegalArgumentException("Unexpected MultipartType " + type);
}
- break;
-
- case OFPMPGROUPFEATURES:
- Preconditions.checkArgument(body instanceof MultipartReplyGroupFeaturesCase);
- final MultipartReplyGroupFeatures groupFeatures = ((MultipartReplyGroupFeaturesCase) body)
- .getMultipartReplyGroupFeatures();
- final NodeGroupFeatures gFeature = NodeStaticReplyTranslatorUtil
- .nodeGroupFeatureTranslator(groupFeatures);
- final InstanceIdentifier<NodeGroupFeatures> gFeatureII = nodeII
- .augmentation(NodeGroupFeatures.class);
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, gFeatureII, gFeature);
- break;
-
- case OFPMPPORTDESC:
- Preconditions.checkArgument(body instanceof MultipartReplyPortDescCase);
- final MultipartReplyPortDesc portDesc = ((MultipartReplyPortDescCase) body)
- .getMultipartReplyPortDesc();
- for (final PortGrouping port : portDesc.getPorts()) {
- final short ofVersion = dContext.getDeviceInfo().getVersion();
- final TranslatorKey translatorKey = new TranslatorKey(ofVersion, PortGrouping.class.getName());
- final MessageTranslator<PortGrouping, FlowCapableNodeConnector> translator = dContext.oook()
- .lookupTranslator(translatorKey);
- final FlowCapableNodeConnector fcNodeConnector = translator.translate(port, dContext.getDeviceInfo(), null);
-
- final BigInteger dataPathId = dContext.getPrimaryConnectionContext().getFeatures()
- .getDatapathId();
- final NodeConnectorId nodeConnectorId = NodeStaticReplyTranslatorUtil.nodeConnectorId(
- dataPathId.toString(), port.getPortNo(), ofVersion);
- final NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder().setId(nodeConnectorId);
- ncBuilder.addAugmentation(FlowCapableNodeConnector.class, fcNodeConnector);
-
- ncBuilder.addAugmentation(FlowCapableNodeConnectorStatisticsData.class,
- new FlowCapableNodeConnectorStatisticsDataBuilder().build());
- final NodeConnector connector = ncBuilder.build();
-
- final InstanceIdentifier<NodeConnector> connectorII = nodeII.child(NodeConnector.class,
- connector.getKey());
- dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, connectorII, connector);
- }
-
- break;
-
- default:
- throw new IllegalArgumentException("Unnexpected MultipartType " + type);
- }
- }
+ });
} catch (final Exception e) {
LOG.debug("translateAndWriteReply: Failed to write node {} to DS ", dContext.getDeviceInfo().getNodeId().toString(), e);
}
}
+ private static boolean writeDesc(final MultipartType type,
+ final MultipartReplyBody body,
+ final DeviceContext dContext,
+ final InstanceIdentifier<Node> nodeII) {
+ if (!MultipartType.OFPMPDESC.equals(type)) {
+ return false;
+ }
+
+ Preconditions.checkArgument(body instanceof MultipartReplyDescCase);
+ final MultipartReplyDesc replyDesc = ((MultipartReplyDescCase) body).getMultipartReplyDesc();
+ final FlowCapableNode fcNode = NodeStaticReplyTranslatorUtil.nodeDescTranslator(replyDesc,
+ getIpAddressOf(dContext));
+ final InstanceIdentifier<FlowCapableNode> fNodeII = nodeII.augmentation(FlowCapableNode.class);
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, fNodeII, fcNode);
+ return true;
+ }
+
+ private static boolean writeTableFeatures(final MultipartType type,
+ final MultipartReplyBody body,
+ final DeviceContext dContext,
+ final InstanceIdentifier<Node> nodeII,
+ final ConvertorExecutor convertorExecutor) {
+ if (!MultipartType.OFPMPTABLEFEATURES.equals(type)) {
+ return false;
+ }
+
+ Preconditions.checkArgument(body instanceof MultipartReplyTableFeaturesCase);
+ final MultipartReplyTableFeatures tableFeaturesMP = ((MultipartReplyTableFeaturesCase) body)
+ .getMultipartReplyTableFeatures();
+ final List<TableFeatures> tableFeatures = NodeStaticReplyTranslatorUtil
+ .nodeTableFeatureTranslator(tableFeaturesMP, dContext.getDeviceInfo().getVersion(), convertorExecutor);
+ for (final TableFeatures tableFeature : tableFeatures) {
+ final Short tableId = tableFeature.getTableId();
+ final KeyedInstanceIdentifier<TableFeatures, TableFeaturesKey> tableFeaturesII =
+ nodeII.augmentation(FlowCapableNode.class)
+ .child(TableFeatures.class, new TableFeaturesKey(tableId));
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, tableFeaturesII, tableFeature);
+
+ // write parent for table statistics
+ final KeyedInstanceIdentifier<Table, TableKey> tableII =
+ nodeII.augmentation(FlowCapableNode.class)
+ .child(Table.class, new TableKey(tableId));
+ final TableBuilder tableBld = new TableBuilder().setId(tableId)
+ .addAugmentation(FlowTableStatisticsData.class,
+ new FlowTableStatisticsDataBuilder().build());
+
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, tableII, tableBld.build());
+ }
+
+ return true;
+ }
+
+ private static boolean writeMeterFeatures(final MultipartType type,
+ final MultipartReplyBody body,
+ final DeviceContext dContext,
+ final InstanceIdentifier<Node> nodeII) {
+ if (!MultipartType.OFPMPMETERFEATURES.equals(type)) {
+ return false;
+ }
+
+ Preconditions.checkArgument(body instanceof MultipartReplyMeterFeaturesCase);
+ final MultipartReplyMeterFeatures meterFeatures = ((MultipartReplyMeterFeaturesCase) body)
+ .getMultipartReplyMeterFeatures();
+ final NodeMeterFeatures mFeature = NodeStaticReplyTranslatorUtil
+ .nodeMeterFeatureTranslator(meterFeatures);
+ final InstanceIdentifier<NodeMeterFeatures> mFeatureII = nodeII
+ .augmentation(NodeMeterFeatures.class);
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, mFeatureII, mFeature);
+ if (0L < mFeature.getMeterFeatures().getMaxMeter().getValue()) {
+ dContext.getDeviceState().setMeterAvailable(true);
+ }
+
+ return true;
+ }
+
+ private static boolean writeGroupFeatures(final MultipartType type,
+ final MultipartReplyBody body,
+ final DeviceContext dContext,
+ final InstanceIdentifier<Node> nodeII) {
+ if (!MultipartType.OFPMPGROUPFEATURES.equals(type)) {
+ return false;
+ }
+
+ Preconditions.checkArgument(body instanceof MultipartReplyGroupFeaturesCase);
+ final MultipartReplyGroupFeatures groupFeatures = ((MultipartReplyGroupFeaturesCase) body)
+ .getMultipartReplyGroupFeatures();
+ final NodeGroupFeatures gFeature = NodeStaticReplyTranslatorUtil
+ .nodeGroupFeatureTranslator(groupFeatures);
+ final InstanceIdentifier<NodeGroupFeatures> gFeatureII = nodeII
+ .augmentation(NodeGroupFeatures.class);
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, gFeatureII, gFeature);
+
+ return true;
+ }
+
+ private static boolean writePortDesc(final MultipartType type,
+ final MultipartReplyBody body,
+ final DeviceContext dContext,
+ final InstanceIdentifier<Node> nodeII) {
+ if (!MultipartType.OFPMPPORTDESC.equals(type)) {
+ return false;
+ }
+
+ Preconditions.checkArgument(body instanceof MultipartReplyPortDescCase);
+ final MultipartReplyPortDesc portDesc = ((MultipartReplyPortDescCase) body)
+ .getMultipartReplyPortDesc();
+ for (final PortGrouping port : portDesc.getPorts()) {
+ final short ofVersion = dContext.getDeviceInfo().getVersion();
+ final TranslatorKey translatorKey = new TranslatorKey(ofVersion, PortGrouping.class.getName());
+ final MessageTranslator<PortGrouping, FlowCapableNodeConnector> translator = dContext.oook()
+ .lookupTranslator(translatorKey);
+ final FlowCapableNodeConnector fcNodeConnector = translator.translate(port, dContext.getDeviceInfo(), null);
+
+ final BigInteger dataPathId = dContext.getPrimaryConnectionContext().getFeatures()
+ .getDatapathId();
+ final NodeConnectorId nodeConnectorId = NodeStaticReplyTranslatorUtil.nodeConnectorId(
+ dataPathId.toString(), port.getPortNo(), ofVersion);
+ final NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder().setId(nodeConnectorId);
+ ncBuilder.addAugmentation(FlowCapableNodeConnector.class, fcNodeConnector);
+
+ ncBuilder.addAugmentation(FlowCapableNodeConnectorStatisticsData.class,
+ new FlowCapableNodeConnectorStatisticsDataBuilder().build());
+ final NodeConnector connector = ncBuilder.build();
+
+ final InstanceIdentifier<NodeConnector> connectorII = nodeII.child(NodeConnector.class,
+ connector.getKey());
+ dContext.writeToTransaction(LogicalDatastoreType.OPERATIONAL, connectorII, connector);
+ }
+
+ return true;
+ }
+
private static void createEmptyFlowCapableNodeInDs(final DeviceContext deviceContext) {
final FlowCapableNodeBuilder flowCapableNodeBuilder = new FlowCapableNodeBuilder();
final InstanceIdentifier<FlowCapableNode> fNodeII = deviceContext.getDeviceInfo().getNodeInstanceIdentifier()
LOG.info("Static node {} info: {} collected", deviceContext.getDeviceInfo().getNodeId(), type);
translateAndWriteReply(type, deviceContext, nodeII, result, convertorExecutor);
} else {
- final Iterator<RpcError> rpcErrorIterator = rpcResult.getErrors().iterator();
- while (rpcErrorIterator.hasNext()) {
- final RpcError rpcError = rpcErrorIterator.next();
+ for (RpcError rpcError : rpcResult.getErrors()) {
LOG.info("Failed to retrieve static node {} info: {}", type, rpcError.getMessage());
if (null != rpcError.getCause()) {
LOG.trace("Detailed error:", rpcError.getCause());
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* provides flat batch util methods
*/
public final class FlatBatchUtil {
- private static final Logger LOG = LoggerFactory.getLogger(FlatBatchUtil.class);
-
private FlatBatchUtil() {
throw new IllegalStateException("This class should not be instantiated.");
}
@VisibleForTesting
static boolean decideBarrier(final EnumSet<BatchStepType> previousTypes, final BatchStepType type) {
- final boolean needBarrier;
- switch (type) {
- case FLOW_ADD:
- case FLOW_UPDATE:
- needBarrier = previousTypes.contains(BatchStepType.GROUP_ADD)
- || previousTypes.contains(BatchStepType.METER_ADD);
- break;
- case GROUP_ADD:
- needBarrier = previousTypes.contains(BatchStepType.GROUP_ADD)
- || previousTypes.contains(BatchStepType.GROUP_UPDATE);
- break;
- case GROUP_REMOVE:
- needBarrier = previousTypes.contains(BatchStepType.FLOW_REMOVE)
- || previousTypes.contains(BatchStepType.FLOW_UPDATE)
- || previousTypes.contains(BatchStepType.GROUP_REMOVE)
- || previousTypes.contains(BatchStepType.GROUP_UPDATE);
- break;
- case METER_REMOVE:
- needBarrier = previousTypes.contains(BatchStepType.FLOW_REMOVE)
- || previousTypes.contains(BatchStepType.FLOW_UPDATE);
- break;
- default:
- needBarrier = false;
- }
- return needBarrier;
+ return isFlowBarrierNeeded(previousTypes, type)
+ || isGroupBarrierNeeded(previousTypes, type)
+ || isMeterBarrierNeeded(previousTypes, type);
+ }
+
+ private static boolean isFlowBarrierNeeded(final EnumSet<BatchStepType> previousTypes, final BatchStepType type) {
+ return (type == BatchStepType.FLOW_ADD
+ || type == BatchStepType.FLOW_UPDATE)
+ && (previousTypes.contains(BatchStepType.GROUP_ADD)
+ || previousTypes.contains(BatchStepType.METER_ADD));
+ }
+
+ private static boolean isGroupBarrierNeeded(final EnumSet<BatchStepType> previousTypes, final BatchStepType type) {
+ return (type == BatchStepType.GROUP_ADD
+ && (previousTypes.contains(BatchStepType.GROUP_ADD)
+ || previousTypes.contains(BatchStepType.GROUP_UPDATE)))
+ || (type == BatchStepType.GROUP_REMOVE
+ && (previousTypes.contains(BatchStepType.FLOW_REMOVE)
+ || previousTypes.contains(BatchStepType.FLOW_UPDATE)
+ || previousTypes.contains(BatchStepType.GROUP_REMOVE)
+ || previousTypes.contains(BatchStepType.GROUP_UPDATE)));
+ }
+
+ private static boolean isMeterBarrierNeeded(final EnumSet<BatchStepType> previousTypes, final BatchStepType type) {
+ return type == BatchStepType.METER_REMOVE
+ && (previousTypes.contains(BatchStepType.FLOW_REMOVE)
+ || previousTypes.contains(BatchStepType.FLOW_UPDATE));
}
public static List<BatchPlanStep> assembleBatchPlan(List<Batch> batches) {
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
public final class FlowUtil {
- private static final Logger LOG = LoggerFactory.getLogger(FlowUtil.class);
+
private static final RpcResultBuilder<List<BatchFailedFlowsOutput>> SUCCESSFUL_FLOW_OUTPUT_RPC_RESULT =
RpcResultBuilder.success(Collections.<BatchFailedFlowsOutput>emptyList());
*/
public static <O> Function<List<RpcResult<O>>, RpcResult<List<BatchFailedFlowsOutput>>> createCumulatingFunction(
final List<? extends BatchFlowIdGrouping> inputBatchFlows) {
- return new Function<List<RpcResult<O>>, RpcResult<List<BatchFailedFlowsOutput>>>() {
- @Nullable
- @Override
- public RpcResult<List<BatchFailedFlowsOutput>> apply(@Nullable final List<RpcResult<O>> innerInput) {
- final int sizeOfFutures = innerInput.size();
- final int sizeOfInputBatch = inputBatchFlows.size();
- Preconditions.checkArgument(sizeOfFutures == sizeOfInputBatch,
- "wrong amount of returned futures: {} <> {}", sizeOfFutures, sizeOfInputBatch);
-
- final ArrayList<BatchFailedFlowsOutput> batchFlows = new ArrayList<>(sizeOfFutures);
- final Iterator<? extends BatchFlowIdGrouping> batchFlowIterator = inputBatchFlows.iterator();
-
- Collection<RpcError> flowErrors = new ArrayList<>(sizeOfFutures);
-
- int batchOrder = 0;
- for (RpcResult<O> flowModOutput : innerInput) {
- final FlowId flowId = batchFlowIterator.next().getFlowId();
-
- if (!flowModOutput.isSuccessful()) {
- batchFlows.add(new BatchFailedFlowsOutputBuilder()
- .setFlowId(flowId)
- .setBatchOrder(batchOrder)
- .build());
- flowErrors.addAll(flowModOutput.getErrors());
+ return new CumulatingFunction<O>(inputBatchFlows).invoke();
+ }
+
+ private static class CumulatingFunction<O> {
+ private final List<? extends BatchFlowIdGrouping> inputBatchFlows;
+
+ public CumulatingFunction(List<? extends BatchFlowIdGrouping> inputBatchFlows) {
+ this.inputBatchFlows = inputBatchFlows;
+ }
+
+ public Function<List<RpcResult<O>>, RpcResult<List<BatchFailedFlowsOutput>>> invoke() {
+ return new Function<List<RpcResult<O>>, RpcResult<List<BatchFailedFlowsOutput>>>() {
+ @Nullable
+ @Override
+ public RpcResult<List<BatchFailedFlowsOutput>> apply(@Nullable final List<RpcResult<O>> innerInput) {
+ final int sizeOfFutures = innerInput.size();
+ final int sizeOfInputBatch = inputBatchFlows.size();
+ Preconditions.checkArgument(sizeOfFutures == sizeOfInputBatch,
+ "wrong amount of returned futures: {} <> {}", sizeOfFutures, sizeOfInputBatch);
+
+ final ArrayList<BatchFailedFlowsOutput> batchFlows = new ArrayList<>(sizeOfFutures);
+ final Iterator<? extends BatchFlowIdGrouping> batchFlowIterator = inputBatchFlows.iterator();
+
+ Collection<RpcError> flowErrors = new ArrayList<>(sizeOfFutures);
+
+ int batchOrder = 0;
+ for (RpcResult<O> flowModOutput : innerInput) {
+ final FlowId flowId = batchFlowIterator.next().getFlowId();
+
+ if (!flowModOutput.isSuccessful()) {
+ batchFlows.add(new BatchFailedFlowsOutputBuilder()
+ .setFlowId(flowId)
+ .setBatchOrder(batchOrder)
+ .build());
+ flowErrors.addAll(flowModOutput.getErrors());
+ }
+ batchOrder++;
}
- batchOrder++;
- }
- final RpcResultBuilder<List<BatchFailedFlowsOutput>> resultBuilder;
- if (!flowErrors.isEmpty()) {
- resultBuilder = RpcResultBuilder.<List<BatchFailedFlowsOutput>>failed()
- .withRpcErrors(flowErrors).withResult(batchFlows);
- } else {
- resultBuilder = SUCCESSFUL_FLOW_OUTPUT_RPC_RESULT;
+ final RpcResultBuilder<List<BatchFailedFlowsOutput>> resultBuilder;
+ if (!flowErrors.isEmpty()) {
+ resultBuilder = RpcResultBuilder.<List<BatchFailedFlowsOutput>>failed()
+ .withRpcErrors(flowErrors).withResult(batchFlows);
+ } else {
+ resultBuilder = SUCCESSFUL_FLOW_OUTPUT_RPC_RESULT;
+ }
+ return resultBuilder.build();
}
- return resultBuilder.build();
- }
- };
+ };
+ }
}
}
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.ActionType;
/**
* provides group util methods
public static <O> Function<List<RpcResult<O>>, RpcResult<List<BatchFailedGroupsOutput>>> createCumulatingFunction(
final Iterable<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.Group> inputBatchGroups,
final int sizeOfInputBatch) {
- return new Function<List<RpcResult<O>>, RpcResult<List<BatchFailedGroupsOutput>>>() {
- @Nullable
- @Override
- public RpcResult<List<BatchFailedGroupsOutput>> apply(@Nullable final List<RpcResult<O>> innerInput) {
- final int sizeOfFutures = innerInput.size();
- Preconditions.checkArgument(sizeOfFutures == sizeOfInputBatch,
- "wrong amount of returned futures: {} <> {}", sizeOfFutures, sizeOfInputBatch);
-
- final List<BatchFailedGroupsOutput> batchGroups = new ArrayList<>();
- final Iterator<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.Group>
- batchGroupIterator = inputBatchGroups.iterator();
-
- Collection<RpcError> groupErrors = new ArrayList<>(sizeOfFutures);
-
- int batchOrder = 0;
- for (RpcResult<O> groupModOutput : innerInput) {
- final GroupId groupId = batchGroupIterator.next().getGroupId();
-
- if (!groupModOutput.isSuccessful()) {
- batchGroups.add(new BatchFailedGroupsOutputBuilder()
- .setGroupId(groupId)
- .setBatchOrder(batchOrder)
- .build());
- groupErrors.addAll(groupModOutput.getErrors());
- }
- batchOrder++;
- }
+ return new CumulatingFunction<O>(inputBatchGroups, sizeOfInputBatch).invoke();
+ }
- final RpcResultBuilder<List<BatchFailedGroupsOutput>> resultBuilder;
- if (!groupErrors.isEmpty()) {
- resultBuilder = RpcResultBuilder.<List<BatchFailedGroupsOutput>>failed()
- .withRpcErrors(groupErrors).withResult(batchGroups);
- } else {
- resultBuilder = SUCCESSFUL_GROUP_OUTPUT_RPC_RESULT;
- }
- return resultBuilder.build();
- }
- };
+ /*
+ * Method returns the bitmap of actions supported by each group.
+ *
+ * @param actionsSupported
+ * @return
+ */
+ public static List<Long> extractGroupActionsSupportBitmap(final List<ActionType> actionsSupported) {
+ List<Long> supportActionByGroups = new ArrayList<>();
+ for (org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.ActionType supportedActions : actionsSupported) {
+ long supportActionBitmap = 0;
+ supportActionBitmap |= supportedActions.isOFPATOUTPUT() ? (1) : 0;
+ supportActionBitmap |= supportedActions.isOFPATCOPYTTLOUT() ? (1 << 11) : 0;
+ supportActionBitmap |= supportedActions.isOFPATCOPYTTLIN() ? (1 << 12) : 0;
+ supportActionBitmap |= supportedActions.isOFPATSETMPLSTTL() ? (1 << 15) : 0;
+ supportActionBitmap |= supportedActions.isOFPATDECMPLSTTL() ? (1 << 16) : 0;
+ supportActionBitmap |= supportedActions.isOFPATPUSHVLAN() ? (1 << 17) : 0;
+ supportActionBitmap |= supportedActions.isOFPATPOPVLAN() ? (1 << 18) : 0;
+ supportActionBitmap |= supportedActions.isOFPATPUSHMPLS() ? (1 << 19) : 0;
+ supportActionBitmap |= supportedActions.isOFPATPOPMPLS() ? (1 << 20) : 0;
+ supportActionBitmap |= supportedActions.isOFPATSETQUEUE() ? (1 << 21) : 0;
+ supportActionBitmap |= supportedActions.isOFPATGROUP() ? (1 << 22) : 0;
+ supportActionBitmap |= supportedActions.isOFPATSETNWTTL() ? (1 << 23) : 0;
+ supportActionBitmap |= supportedActions.isOFPATDECNWTTL() ? (1 << 24) : 0;
+ supportActionBitmap |= supportedActions.isOFPATSETFIELD() ? (1 << 25) : 0;
+ supportActionBitmap |= supportedActions.isOFPATPUSHPBB() ? (1 << 26) : 0;
+ supportActionBitmap |= supportedActions.isOFPATPOPPBB() ? (1 << 27) : 0;
+ supportActionByGroups.add(supportActionBitmap);
+ }
+ return supportActionByGroups;
}
/**
* @return batch group operation output of given type containing list of group-ids and corresponding success flag
*/
private static <T extends BatchGroupOutputListGrouping>
- RpcResultBuilder<T> createCumulativeRpcResult(final @Nullable RpcResult<List<BatchFailedGroupsOutput>> batchGroupsCumulativeResult,
+ RpcResultBuilder<T> createCumulativeRpcResult(@Nullable final RpcResult<List<BatchFailedGroupsOutput>> batchGroupsCumulativeResult,
final T batchOutput) {
final RpcResultBuilder<T> resultBld;
if (batchGroupsCumulativeResult.isSuccessful()) {
}
return resultBld;
}
+
+ private static class CumulatingFunction<O> {
+ private final Iterable<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.Group> inputBatchGroups;
+ private final int sizeOfInputBatch;
+
+ public CumulatingFunction(Iterable<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.Group> inputBatchGroups, int sizeOfInputBatch) {
+ this.inputBatchGroups = inputBatchGroups;
+ this.sizeOfInputBatch = sizeOfInputBatch;
+ }
+
+ public Function<List<RpcResult<O>>, RpcResult<List<BatchFailedGroupsOutput>>> invoke() {
+ return new Function<List<RpcResult<O>>, RpcResult<List<BatchFailedGroupsOutput>>>() {
+ @Nullable
+ @Override
+ public RpcResult<List<BatchFailedGroupsOutput>> apply(@Nullable final List<RpcResult<O>> innerInput) {
+ final int sizeOfFutures = innerInput.size();
+ Preconditions.checkArgument(sizeOfFutures == sizeOfInputBatch,
+ "wrong amount of returned futures: {} <> {}", sizeOfFutures, sizeOfInputBatch);
+
+ final List<BatchFailedGroupsOutput> batchGroups = new ArrayList<>();
+ final Iterator<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.Group>
+ batchGroupIterator = inputBatchGroups.iterator();
+
+ Collection<RpcError> groupErrors = new ArrayList<>(sizeOfFutures);
+
+ int batchOrder = 0;
+ for (RpcResult<O> groupModOutput : innerInput) {
+ final GroupId groupId = batchGroupIterator.next().getGroupId();
+
+ if (!groupModOutput.isSuccessful()) {
+ batchGroups.add(new BatchFailedGroupsOutputBuilder()
+ .setGroupId(groupId)
+ .setBatchOrder(batchOrder)
+ .build());
+ groupErrors.addAll(groupModOutput.getErrors());
+ }
+ batchOrder++;
+ }
+
+ final RpcResultBuilder<List<BatchFailedGroupsOutput>> resultBuilder;
+ if (!groupErrors.isEmpty()) {
+ resultBuilder = RpcResultBuilder.<List<BatchFailedGroupsOutput>>failed()
+ .withRpcErrors(groupErrors).withResult(batchGroups);
+ } else {
+ resultBuilder = SUCCESSFUL_GROUP_OUTPUT_RPC_RESULT;
+ }
+ return resultBuilder.build();
+ }
+ };
+ }
+ }
}
public static <O> Function<List<RpcResult<O>>, RpcResult<List<BatchFailedMetersOutput>>> createCumulativeFunction(
final Iterable<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter> inputBatchMeters,
final int sizeOfInputBatch) {
- return new Function<List<RpcResult<O>>, RpcResult<List<BatchFailedMetersOutput>>>() {
- @Nullable
- @Override
- public RpcResult<List<BatchFailedMetersOutput>> apply(@Nullable final List<RpcResult<O>> innerInput) {
- final int sizeOfFutures = innerInput.size();
- Preconditions.checkArgument(sizeOfFutures == sizeOfInputBatch,
- "wrong amount of returned futures: {} <> {}", sizeOfFutures, sizeOfInputBatch);
-
- final List<BatchFailedMetersOutput> batchMeters = new ArrayList<>();
- final Iterator<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter>
- batchMeterIterator = inputBatchMeters.iterator();
-
- Collection<RpcError> meterErrors = new ArrayList<>(sizeOfFutures);
-
- int batchOrder = 0;
- for (RpcResult<O> meterModOutput : innerInput) {
- final MeterId meterId = batchMeterIterator.next().getMeterId();
-
- if (!meterModOutput.isSuccessful()) {
- batchMeters.add(new BatchFailedMetersOutputBuilder()
- .setBatchOrder(batchOrder)
- .setMeterId(meterId)
- .build());
- meterErrors.addAll(meterModOutput.getErrors());
- }
- batchOrder++;
- }
-
- final RpcResultBuilder<List<BatchFailedMetersOutput>> resultBuilder;
- if (!meterErrors.isEmpty()) {
- resultBuilder = RpcResultBuilder.<List<BatchFailedMetersOutput>>failed()
- .withRpcErrors(meterErrors).withResult(batchMeters);
- } else {
- resultBuilder = SUCCESSFUL_METER_OUTPUT_RPC_RESULT;
- }
- return resultBuilder.build();
- }
- };
+ return new CumulativeFunction<O>(inputBatchMeters, sizeOfInputBatch).invoke();
}
/**
}
return resultBld;
}
+
+ private static class CumulativeFunction<O> {
+ private final Iterable<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter> inputBatchMeters;
+ private final int sizeOfInputBatch;
+
+ public CumulativeFunction(Iterable<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter> inputBatchMeters, int sizeOfInputBatch) {
+ this.inputBatchMeters = inputBatchMeters;
+ this.sizeOfInputBatch = sizeOfInputBatch;
+ }
+
+ public Function<List<RpcResult<O>>, RpcResult<List<BatchFailedMetersOutput>>> invoke() {
+ return new Function<List<RpcResult<O>>, RpcResult<List<BatchFailedMetersOutput>>>() {
+ @Nullable
+ @Override
+ public RpcResult<List<BatchFailedMetersOutput>> apply(@Nullable final List<RpcResult<O>> innerInput) {
+ final int sizeOfFutures = innerInput.size();
+ Preconditions.checkArgument(sizeOfFutures == sizeOfInputBatch,
+ "wrong amount of returned futures: {} <> {}", sizeOfFutures, sizeOfInputBatch);
+
+ final List<BatchFailedMetersOutput> batchMeters = new ArrayList<>();
+ final Iterator<? extends org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.Meter>
+ batchMeterIterator = inputBatchMeters.iterator();
+
+ Collection<RpcError> meterErrors = new ArrayList<>(sizeOfFutures);
+
+ int batchOrder = 0;
+ for (RpcResult<O> meterModOutput : innerInput) {
+ final MeterId meterId = batchMeterIterator.next().getMeterId();
+
+ if (!meterModOutput.isSuccessful()) {
+ batchMeters.add(new BatchFailedMetersOutputBuilder()
+ .setBatchOrder(batchOrder)
+ .setMeterId(meterId)
+ .build());
+ meterErrors.addAll(meterModOutput.getErrors());
+ }
+ batchOrder++;
+ }
+
+ final RpcResultBuilder<List<BatchFailedMetersOutput>> resultBuilder;
+ if (!meterErrors.isEmpty()) {
+ resultBuilder = RpcResultBuilder.<List<BatchFailedMetersOutput>>failed()
+ .withRpcErrors(meterErrors).withResult(batchMeters);
+ } else {
+ resultBuilder = SUCCESSFUL_METER_OUTPUT_RPC_RESULT;
+ }
+ return resultBuilder.build();
+ }
+ };
+ }
+ }
}
}
@Test
- public void testBuildRequest() {
+ public void testBuildRequest() throws Exception {
final OfHeader ofHeader = salPortService.buildRequest(new Xid(DUMMY_XID), dummyUpdatePortInput());
}
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupTypes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStats;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.ActionType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.GroupId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.GroupType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.MultipartRequestFlags;
private static final Long DUMMY_REF_COUNT = 1234L;
private static final GroupTypes DUMMY_GROUPS_TYPE = GroupTypes.GroupAll;
private static final GroupType DUMMY_GROUP_TYPE = GroupType.OFPGTALL;
- private static final Long GROUP_ACTION_BITMAP = 0b00000000000000000000000000000000000001111111111111001100000000001L;
@Before
public void setUp() {
assertEquals(DUMMY_GROUPS_TYPE,groupDescStat.getGroupType() );
}
- @Test
- public void getGroupActionsSupportBitmap() {
- ActionType actionSupported = new ActionType(true,true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true);
- final List<Long> groupActionsSupportBitmap = SinglePurposeMultipartReplyTranslator.getGroupActionsSupportBitmap(Lists.newArrayList(actionSupported));
- assertEquals(1, groupActionsSupportBitmap.size());
- final Long bitmap = groupActionsSupportBitmap.get(0);
- assertEquals(GROUP_ACTION_BITMAP, bitmap);
- }
-
private MultipartReplyBody prepareMultipartReplyGroupDesc() {
MultipartReplyGroupDescCaseBuilder multipartReplyGroupDescCaseBuilder = new MultipartReplyGroupDescCaseBuilder();
MultipartReplyGroupDescBuilder multipartReplyGroupDescBuilder = new MultipartReplyGroupDescBuilder();
package org.opendaylight.openflowplugin.impl.util;
+import static org.junit.Assert.assertEquals;
+
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import java.util.Collections;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.ActionType;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
public static final NodeId DUMMY_NODE_ID = new NodeId("dummyNodeId");
private static final GroupId DUMMY_GROUP_ID = new GroupId(42L);
private static final GroupId DUMMY_GROUP_ID_2 = new GroupId(43L);
+ private static final Long GROUP_ACTION_BITMAP = 0b00000000000000000000000000000000000001111111111111001100000000001L;
@Test
public void testBuildGroupPath() throws Exception {
Assert.assertEquals(1, composite.getResult().getBatchFailedGroupsOutput().size());
}
+ @Test
+ public void testExtractGroupActionsSupportBitmap() {
+ ActionType actionSupported = new ActionType(true,true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true);
+ final List<Long> groupActionsSupportBitmap = GroupUtil.extractGroupActionsSupportBitmap(Lists.newArrayList(actionSupported));
+ assertEquals(1, groupActionsSupportBitmap.size());
+ final Long bitmap = groupActionsSupportBitmap.get(0);
+ assertEquals(GROUP_ACTION_BITMAP, bitmap);
+ }
+
private RpcResult<Void> createBarrierFailureOutcome() {
return RpcResultBuilder.<Void>failed()
.withError(RpcError.ErrorType.APPLICATION, "ut-barrier-error")
<artifactId>org.eclipse.equinox.console</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.apache.felix.gogo.command</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.apache.felix.gogo.runtime</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.apache.felix.gogo.shell</artifactId>
- <scope>test</scope>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>logback-config</artifactId>
if (System.getProperty(INSPECT_OSGI) != null) {
option
.add(CoreOptions.vmOption("-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address="+DEBUG_PORT))
- .add(CoreOptions.mavenBundle("equinoxSDK381", "org.eclipse.equinox.console").versionAsInProject())
- .add(CoreOptions.mavenBundle("equinoxSDK381", "org.apache.felix.gogo.shell").versionAsInProject())
- .add(CoreOptions.mavenBundle("equinoxSDK381", "org.apache.felix.gogo.runtime").versionAsInProject())
- .add(CoreOptions.mavenBundle("equinoxSDK381", "org.apache.felix.gogo.command").versionAsInProject());
+ .add(CoreOptions.mavenBundle("equinoxSDK381", "org.eclipse.equinox.console").versionAsInProject()));
}
return option;