import org.slf4j.LoggerFactory;
/**
- * Listens to config changes and delegates add/remove/update/barrier to {@link SyncReactor}.
+ * Listens to config changes and delegates sync entry to {@link SyncReactor}.
*/
public class SimplifiedConfigListener extends AbstractFrmSyncListener<FlowCapableNode> {
private static final Logger LOG = LoggerFactory.getLogger(SimplifiedConfigListener.class);
}
/**
- * Compare cached operational with current config modification. If operational is not present
- * skip calling Inventory RPCs.
+ * Update cache. If operational data are present, choose appropriate data and start syncup.
+ * Otherwise skip incoming change.
* @throws InterruptedException from syncup
*/
protected Optional<ListenableFuture<Boolean>> processNodeModification(
}
/**
- * Add only what is missing in operational store. Config. node could be added in two situations:
- * <ul>
- * <li>Note very first time after restart was handled by operational listener. Syncup should
- * calculate no delta (we don want to reconfigure switch if not necessary).</li>
- * <li>But later the config. node could be deleted, after that config node added again. Syncup
- * should calculate that everything needs to be added. Operational store should be empty in
- * optimal case (but the switch could be reprogrammed by another person/system.</li>
- * </ul>
+ * Add only what is missing on device. If node was added to config DS and it is already present
+ * in operational DS (connected) diff between current new configuration and actual configuration
+ * (seen in operational) should be calculated and sent to device.
*/
private ListenableFuture<Boolean> onNodeAdded(final InstanceIdentifier<FlowCapableNode> nodePath,
final FlowCapableNode dataAfter,
/**
* Apply minimal changes very fast. For better performance needed just compare config
- * after+before. Config listener should not be dependent on operational flows/groups while
+ * after+before. Config listener should not be dependent on operational flows/groups/meters while
* updating config because operational store is highly async and it depends on another module in
* system which is updating operational store (that components is also trying to solve
* scale/performance issues on several layers).
}
/**
- * Remove values that are being deleted in the config from the switch. Note, this could be
- * probably optimized using dedicated wipe-out RPC, but it has impact on switch if it is
- * programmed by two person/system
+ * Remove values that are being deleted in the config from the switch.
+ * Note, this could be probably optimized using dedicated wipe-out RPC.
*/
private ListenableFuture<Boolean> onNodeDeleted(final InstanceIdentifier<FlowCapableNode> nodePath,
final FlowCapableNode dataBefore) throws InterruptedException {
import org.slf4j.LoggerFactory;
/**
- * Listens to operational new nodes and delegates add/remove/update/barrier to {@link SyncReactor}.
+ * Listens to operational changes and starts reconciliation through {@link SyncReactor} when necessary.
*/
public class SimplifiedOperationalListener extends AbstractFrmSyncListener<Node> {
private static final Logger LOG = LoggerFactory.getLogger(SimplifiedOperationalListener.class);
}
/**
- * This method behaves like this:
- * <ul>
- * <li>If node is added to operational store then reconciliation.</li>
- * <li>Node is deleted from operational cache is removed.</li>
- * <li>Skip this event otherwise.</li>
- * </ul>
- *
+ * Update cache, register for device masterhip when device connected and start reconciliation if device
+ * is registered and actual modification is consistent.Skip the event otherwise.
* @throws InterruptedException from syncup
*/
protected Optional<ListenableFuture<Boolean>> processNodeModification(
deviceMastershipManager.onDeviceConnected(nodeId);
}
- if (isRegisteredAndConsistentForReconcile(modification)) {
+ if (reconciliationRegistry.isRegistered(nodeId) && isConsistentForReconcile(modification)) {
return reconciliation(modification);
} else {
return skipModification(modification);
/**
* Remove if delete. Update only if FlowCapableNode Augmentation modified.
- *
+ * Unregister for device mastership.
* @param modification Datastore modification
*/
private void updateCache(DataTreeModification<Node> modification) {
return false;
}
+ /**
+ * If node is present in config DS diff between wanted configuration (in config DS) and actual device
+ * configuration (coming from operational) should be calculated and sent to device.
+ * @param modification from DS
+ * @return optional syncup future
+ * @throws InterruptedException from syncup
+ */
private Optional<ListenableFuture<Boolean>> reconciliation(DataTreeModification<Node> modification) throws InterruptedException {
final NodeId nodeId = ModificationUtil.nodeId(modification);
final Optional<FlowCapableNode> nodeConfiguration = configDao.loadByNodeId(nodeId);
}
}
- private boolean isRegisteredAndConsistentForReconcile(DataTreeModification<Node> modification) {
+ private boolean isConsistentForReconcile(DataTreeModification<Node> modification) {
final NodeId nodeId = PathUtil.digNodeId(modification.getRootPath().getRootIdentifier());
-
- if (!reconciliationRegistry.isRegistered(nodeId)) {
- return false;
- }
-
final FlowCapableStatisticsGatheringStatus gatheringStatus = modification.getRootNode().getDataAfter()
.getAugmentation(FlowCapableStatisticsGatheringStatus.class);
if (node == null) {
return true;
}
-
final List<NodeConnector> nodeConnectors = node.getNodeConnector();
-
return nodeConnectors == null || nodeConnectors.isEmpty();
}
public LogicalDatastoreType dsType() {
return LogicalDatastoreType.OPERATIONAL;
}
-
+
}
final ListenableFuture<Boolean> endResult =
delegate.syncup(flowcapableNodePath, syncupEntry);
- Futures.addCallback(endResult, new FutureCallback<Boolean>() {
- @Override
- public void onSuccess(@Nullable final Boolean result) {
- if (LOG.isDebugEnabled()) {
- final long stampFinished = System.nanoTime();
- LOG.debug("syncup finished {} took:{} rpc:{} wait:{} guard:{} permits thread:{}", nodeId.getValue(),
- formatNanos(stampFinished - stampBeforeGuard),
- formatNanos(stampFinished - stampAfterGuard),
- formatNanos(stampAfterGuard - stampBeforeGuard),
- guard.availablePermits(), threadName());
- }
-
- releaseGuardForNodeId(guard);
- }
-
- @Override
- public void onFailure(final Throwable t) {
- if (LOG.isDebugEnabled()) {
- final long stampFinished = System.nanoTime();
- LOG.warn("syncup failed {} took:{} rpc:{} wait:{} guard:{} permits thread:{}", nodeId.getValue(),
- formatNanos(stampFinished - stampBeforeGuard),
- formatNanos(stampFinished - stampAfterGuard),
- formatNanos(stampAfterGuard - stampBeforeGuard),
- guard.availablePermits(), threadName());
- }
-
- releaseGuardForNodeId(guard);
- }
- });
+ Futures.addCallback(endResult, createSyncupCallback(guard, stampBeforeGuard, stampAfterGuard, nodeId));
return endResult;
} catch (InterruptedException e) {
releaseGuardForNodeId(guard);
}
}
- private String formatNanos(long nanos) {
+ private static FutureCallback<Boolean> createSyncupCallback(final Semaphore guard,
+ final long stampBeforeGuard,
+ final long stampAfterGuard,
+ final NodeId nodeId) {
+ return new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(@Nullable final Boolean result) {
+ if (LOG.isDebugEnabled()) {
+ final long stampFinished = System.nanoTime();
+ LOG.debug("syncup finished {} took:{} rpc:{} wait:{} guard:{} permits thread:{}", nodeId.getValue(),
+ formatNanos(stampFinished - stampBeforeGuard), formatNanos(stampFinished - stampAfterGuard),
+ formatNanos(stampAfterGuard - stampBeforeGuard), guard.availablePermits(), threadName());
+ }
+ releaseGuardForNodeId(guard);
+ }
+ @Override
+ public void onFailure(final Throwable t) {
+ final long stampFinished = System.nanoTime();
+ LOG.error("syncup failed {} took:{} rpc:{} wait:{} guard:{} permits thread:{}", nodeId.getValue(),
+ formatNanos(stampFinished - stampBeforeGuard), formatNanos(stampFinished - stampAfterGuard),
+ formatNanos(stampAfterGuard - stampBeforeGuard), guard.availablePermits(), threadName());
+ releaseGuardForNodeId(guard);
+ }};
+ }
+
+ private static String formatNanos(long nanos) {
return "'" + TimeUnit.NANOSECONDS.toMillis(nanos) + " ms'";
}
* Unlock and release guard.
* @param guard semaphore guard which should be unlocked
*/
- private void releaseGuardForNodeId(final Semaphore guard) {
+ private static void releaseGuardForNodeId(final Semaphore guard) {
if (guard != null) {
guard.release();
LOG.trace("syncup release guard:{} thread:{}", guard, threadName());
if (input == null) {
return false;
}
-
if (LOG.isDebugEnabled()) {
final CrudCounts flowCrudCounts = counters.getFlowCrudCounts();
final CrudCounts meterCrudCounts = counters.getMeterCrudCounts();
final CrudCounts groupCrudCounts = counters.getGroupCrudCounts();
- LOG.debug("syncup outcome[{}] (added/updated/removed): flow={}/{}/{}, meter={}/{}/{}, group={}/{}/{}, took={} ms",
+ LOG.debug("syncup outcome[{}] (added/updated/removed): flow={}/{}/{}, group={}/{}/{}, meter={}/{}/{}, took={} ms",
nodeId.getValue(),
- flowCrudCounts.getAdded(),
- flowCrudCounts.getUpdated(),
- flowCrudCounts.getRemoved(),
- meterCrudCounts.getAdded(),
- meterCrudCounts.getUpdated(),
- meterCrudCounts.getRemoved(),
- groupCrudCounts.getAdded(),
- groupCrudCounts.getUpdated(),
- groupCrudCounts.getRemoved(),
- TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - counters.getStartNano())
- );
+ flowCrudCounts.getAdded(), flowCrudCounts.getUpdated(), flowCrudCounts.getRemoved(),
+ groupCrudCounts.getAdded(), groupCrudCounts.getUpdated(), groupCrudCounts.getRemoved(),
+ meterCrudCounts.getAdded(), meterCrudCounts.getUpdated(), meterCrudCounts.getRemoved(),
+ TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - counters.getStartNano()));
}
-
LOG.trace("syncup errors: {}", input.getErrors());
return input.isSuccessful();
- }
- });
+ }});
}
@VisibleForTesting
try {
registration.close();
} catch (Exception e) {
- LOG.error("FRS cluster service close fail: {}", nodeId.getValue());
+ LOG.error("FRS cluster service close fail: {} {}", nodeId.getValue(), e);
}
}
LOG.debug("FRS service unregistered for: {}", nodeId.getValue());
public class FlowForwarder implements ForwardingRulesCommitter<Flow, AddFlowOutput, RemoveFlowOutput, UpdateFlowOutput> {
private static final Logger LOG = LoggerFactory.getLogger(FlowForwarder.class);
+ private static final String TABLE_ID_MISMATCH = "tableId mismatch";
private final SalFlowService salFlowService;
public FlowForwarder(final SalFlowService salFlowService) {
return salFlowService.removeFlow(builder.build());
} else {
return RpcResultBuilder.<RemoveFlowOutput>failed()
- .withError(RpcError.ErrorType.APPLICATION, "tableId mismatch").buildFuture();
+ .withError(RpcError.ErrorType.APPLICATION, TABLE_ID_MISMATCH).buildFuture();
}
}
output = salFlowService.updateFlow(builder.build());
} else {
output = RpcResultBuilder.<UpdateFlowOutput>failed()
- .withError(RpcError.ErrorType.APPLICATION, "tableId mismatch").buildFuture();
+ .withError(RpcError.ErrorType.APPLICATION, TABLE_ID_MISMATCH).buildFuture();
}
return output;
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
output = salFlowService.addFlow(builder.build());
} else {
- output = RpcResultBuilder.<AddFlowOutput>failed().withError(RpcError.ErrorType.APPLICATION, "tableId mismatch").buildFuture();
+ output = RpcResultBuilder.<AddFlowOutput>failed().withError(RpcError.ErrorType.APPLICATION, TABLE_ID_MISMATCH).buildFuture();
}
return output;
}
public void onSuccess(@Nullable final RpcResult<ProcessFlatBatchOutput> result) {
if (!result.isSuccessful() && result.getResult() != null && !result.getResult().getBatchFailure().isEmpty()) {
Map<Range<Integer>, Batch> batchMap = mapBatchesToRanges(inputBatchBag, failureIndexLimit);
-
- for (BatchFailure batchFailure : result.getResult().getBatchFailure()) {
- for (Map.Entry<Range<Integer>, Batch> rangeBatchEntry : batchMap.entrySet()) {
- if (rangeBatchEntry.getKey().contains(batchFailure.getBatchOrder())) {
- // get type and decrease
- final BatchChoice batchChoice = rangeBatchEntry.getValue().getBatchChoice();
- decrementCounters(batchChoice, counters);
- break;
- }
- }
- }
+ decrementBatchFailuresCounters(result.getResult().getBatchFailure(), batchMap, counters);
}
}
};
}
+ private static void decrementBatchFailuresCounters(final List<BatchFailure> batchFailures,
+ final Map<Range<Integer>, Batch> batchMap,
+ final SyncCrudCounters counters) {
+ for (BatchFailure batchFailure : batchFailures) {
+ for (Map.Entry<Range<Integer>, Batch> rangeBatchEntry : batchMap.entrySet()) {
+ if (rangeBatchEntry.getKey().contains(batchFailure.getBatchOrder())) {
+ // get type and decrease
+ final BatchChoice batchChoice = rangeBatchEntry.getValue().getBatchChoice();
+ decrementCounters(batchChoice, counters);
+ break;
+ }
+ }
+ }
+ }
+
static void decrementCounters(final BatchChoice batchChoice, final SyncCrudCounters counters) {
if (batchChoice instanceof FlatBatchAddFlowCase) {
counters.getFlowCrudCounts().decAdded();
@VisibleForTesting
static int assembleRemoveFlows(final List<Batch> batchBag, int batchOrder, final Map<TableKey, ItemSyncBox<Flow>> flowItemSyncTableMap) {
// process flow remove
+ int order = batchOrder;
if (flowItemSyncTableMap != null) {
for (Map.Entry<TableKey, ItemSyncBox<Flow>> syncBoxEntry : flowItemSyncTableMap.entrySet()) {
final ItemSyncBox<Flow> flowItemSyncBox = syncBoxEntry.getValue();
.setBatchChoice(new FlatBatchRemoveFlowCaseBuilder()
.setFlatBatchRemoveFlow(flatBatchRemoveFlowBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
}
}
- return batchOrder;
+ return order;
}
@VisibleForTesting
static int assembleAddOrUpdateGroups(final List<Batch> batchBag, int batchOrder, final List<ItemSyncBox<Group>> groupsToAddOrUpdate) {
// process group add+update
+ int order = batchOrder;
if (groupsToAddOrUpdate != null) {
for (ItemSyncBox<Group> groupItemSyncBox : groupsToAddOrUpdate) {
if (!groupItemSyncBox.getItemsToPush().isEmpty()) {
.setBatchChoice(new FlatBatchAddGroupCaseBuilder()
.setFlatBatchAddGroup(flatBatchAddGroupBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
.setBatchChoice(new FlatBatchUpdateGroupCaseBuilder()
.setFlatBatchUpdateGroup(flatBatchUpdateGroupBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
}
}
- return batchOrder;
+ return order;
}
@VisibleForTesting
static int assembleRemoveGroups(final List<Batch> batchBag, int batchOrder, final List<ItemSyncBox<Group>> groupsToRemoveOrUpdate) {
// process group add+update
+ int order = batchOrder;
if (groupsToRemoveOrUpdate != null) {
for (ItemSyncBox<Group> groupItemSyncBox : groupsToRemoveOrUpdate) {
if (!groupItemSyncBox.getItemsToPush().isEmpty()) {
.setBatchChoice(new FlatBatchRemoveGroupCaseBuilder()
.setFlatBatchRemoveGroup(flatBatchRemoveGroupBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
}
}
- return batchOrder;
+ return order;
}
@VisibleForTesting
static int assembleAddOrUpdateMeters(final List<Batch> batchBag, int batchOrder, final ItemSyncBox<Meter> meterItemSyncBox) {
// process meter add+update
+ int order = batchOrder;
if (meterItemSyncBox != null) {
if (!meterItemSyncBox.getItemsToPush().isEmpty()) {
final List<FlatBatchAddMeter> flatBatchAddMeterBag =
.setBatchChoice(new FlatBatchAddMeterCaseBuilder()
.setFlatBatchAddMeter(flatBatchAddMeterBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
.setBatchChoice(new FlatBatchUpdateMeterCaseBuilder()
.setFlatBatchUpdateMeter(flatBatchUpdateMeterBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
}
- return batchOrder;
+ return order;
}
@VisibleForTesting
static int assembleRemoveMeters(final List<Batch> batchBag, int batchOrder, final ItemSyncBox<Meter> meterItemSyncBox) {
// process meter remove
+ int order = batchOrder;
if (meterItemSyncBox != null && !meterItemSyncBox.getItemsToPush().isEmpty()) {
final List<FlatBatchRemoveMeter> flatBatchRemoveMeterBag =
new ArrayList<>(meterItemSyncBox.getItemsToUpdate().size());
.setBatchChoice(new FlatBatchRemoveMeterCaseBuilder()
.setFlatBatchRemoveMeter(flatBatchRemoveMeterBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
- return batchOrder;
+ return order;
}
@VisibleForTesting
static int assembleAddOrUpdateFlows(final List<Batch> batchBag, int batchOrder, final Map<TableKey, ItemSyncBox<Flow>> flowItemSyncTableMap) {
// process flow add+update
+ int order = batchOrder;
if (flowItemSyncTableMap != null) {
for (Map.Entry<TableKey, ItemSyncBox<Flow>> syncBoxEntry : flowItemSyncTableMap.entrySet()) {
final ItemSyncBox<Flow> flowItemSyncBox = syncBoxEntry.getValue();
.setBatchChoice(new FlatBatchAddFlowCaseBuilder()
.setFlatBatchAddFlow(flatBatchAddFlowBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
.setBatchChoice(new FlatBatchUpdateFlowCaseBuilder()
.setFlatBatchUpdateFlow(flatBatchUpdateFlowBag)
.build())
- .setBatchOrder(batchOrder)
+ .setBatchOrder(order)
.build();
- batchOrder += itemOrder;
+ order += itemOrder;
batchBag.add(batch);
}
}
}
- return batchOrder;
+ return order;
}
public SyncPlanPushStrategyFlatBatchImpl setFlatBatchService(final SalFlatBatchService flatBatchService) {
private static final Logger LOG = LoggerFactory.getLogger(FxChainUtil.class);
+ private FxChainUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
public static FutureCallback<RpcResult<Void>> logResultCallback(final NodeId nodeId, final String prefix) {
return new FutureCallback<RpcResult<Void>>() {
* Basic {@link DataTreeModification} related tools.
*/
public class ModificationUtil {
+
+ private ModificationUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
public static String nodeIdValue(DataTreeModification<Node> modification) {
final NodeId nodeId = nodeId(modification);
* Basic {@link InstanceIdentifier} related tools.
*/
public class PathUtil {
+
+ private PathUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
public static NodeId digNodeId(final InstanceIdentifier<?> nodeIdent) {
return nodeIdent.firstKeyOf(Node.class, NodeKey.class).getId();
}
import java.util.Map;
import java.util.Objects;
import java.util.Set;
-import javax.annotation.Nullable;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.GroupActionCase;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
/**
* Util methods for group reconcil task (future chaining, transforms).
*/
-public class ReconcileUtil {
+public final class ReconcileUtil {
private static final Logger LOG = LoggerFactory.getLogger(ReconcileUtil.class);
+ private ReconcileUtil() {
+ throw new IllegalStateException("This class should not be instantiated.");
+ }
+
/**
* @param previousItemAction description for case when the triggering future contains failure
* @param <D> type of rpc output (gathered in list)
* @return single rpc result of type Void honoring all partial rpc results
*/
public static <D> Function<List<RpcResult<D>>, RpcResult<Void>> createRpcResultCondenser(final String previousItemAction) {
- return new Function<List<RpcResult<D>>, RpcResult<Void>>() {
- @Nullable
- @Override
- public RpcResult<Void> apply(@Nullable final List<RpcResult<D>> input) {
- final RpcResultBuilder<Void> resultSink;
- if (input != null) {
- List<RpcError> errors = new ArrayList<>();
- for (RpcResult<D> rpcResult : input) {
- if (!rpcResult.isSuccessful()) {
- errors.addAll(rpcResult.getErrors());
- }
- }
- if (errors.isEmpty()) {
- resultSink = RpcResultBuilder.success();
- } else {
- resultSink = RpcResultBuilder.<Void>failed().withRpcErrors(errors);
+ return input -> {
+ final RpcResultBuilder<Void> resultSink;
+ if (input != null) {
+ List<RpcError> errors = new ArrayList<>();
+ for (RpcResult<D> rpcResult : input) {
+ if (!rpcResult.isSuccessful()) {
+ errors.addAll(rpcResult.getErrors());
}
+ }
+ if (errors.isEmpty()) {
+ resultSink = RpcResultBuilder.success();
} else {
- resultSink = RpcResultBuilder.<Void>failed()
- .withError(RpcError.ErrorType.APPLICATION, "previous " + previousItemAction + " failed");
-
+ resultSink = RpcResultBuilder.<Void>failed().withRpcErrors(errors);
}
-
- return resultSink.build();
+ } else {
+ resultSink = RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "previous " + previousItemAction + " failed");
}
+ return resultSink.build();
};
}
* @return single rpc result of type Void honoring all partial rpc results
*/
public static <D> Function<RpcResult<D>, RpcResult<Void>> createRpcResultToVoidFunction(final String actionDescription) {
- return new Function<RpcResult<D>, RpcResult<Void>>() {
- @Nullable
- @Override
- public RpcResult<Void> apply(@Nullable final RpcResult<D> input) {
- final RpcResultBuilder<Void> resultSink;
- if (input != null) {
- List<RpcError> errors = new ArrayList<>();
- if (!input.isSuccessful()) {
- errors.addAll(input.getErrors());
- resultSink = RpcResultBuilder.<Void>failed().withRpcErrors(errors);
- } else {
- resultSink = RpcResultBuilder.success();
- }
+ return input -> {
+ final RpcResultBuilder<Void> resultSink;
+ if (input != null) {
+ List<RpcError> errors = new ArrayList<>();
+ if (!input.isSuccessful()) {
+ errors.addAll(input.getErrors());
+ resultSink = RpcResultBuilder.<Void>failed().withRpcErrors(errors);
} else {
- resultSink = RpcResultBuilder.<Void>failed()
- .withError(RpcError.ErrorType.APPLICATION, "action of " + actionDescription + " failed");
-
+ resultSink = RpcResultBuilder.success();
}
-
- return resultSink.build();
+ } else {
+ resultSink = RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "action of " + actionDescription + " failed");
}
+ return resultSink.build();
};
}
final Map<Long, Group> installedGroupsArg,
final Collection<Group> pendingGroups,
final boolean gatherUpdates) {
-
final Map<Long, Group> installedGroups = new HashMap<>(installedGroupsArg);
final List<ItemSyncBox<Group>> plan = new ArrayList<>();
this.match = flow.getMatch();
}
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ SwitchFlowId that = (SwitchFlowId) o;
+
+ if (tableId != null ? !tableId.equals(that.tableId) : that.tableId != null) {
+ return false;
+ }
+ if (priority != null ? !priority.equals(that.priority) : that.priority != null) {
+ return false;
+ }
+ return match != null ? match.equals(that.match) : that.match == null;
+ }
+
@Override
public int hashCode() {
final int prime = 31;
return result;
}
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (getClass() != obj.getClass())
- return false;
- SwitchFlowId other = (SwitchFlowId) obj;
- if (match == null) {
- if (other.match != null)
- return false;
- } else if (!match.equals(other.match))
- return false;
- if (priority == null) {
- if (other.priority != null)
- return false;
- } else if (!priority.equals(other.priority))
- return false;
- if (tableId == null) {
- if (other.tableId != null)
- return false;
- } else if (!tableId.equals(other.tableId))
- return false;
- return true;
- }
}
return dsTypeBefore == that.dsTypeBefore;
}
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = after != null ? after.hashCode() : 0;
+ result = prime * result + (dsTypeAfter != null ? dsTypeAfter.hashCode() : 0);
+ result = prime * result + (before != null ? before.hashCode() : 0);
+ result = prime * result + (dsTypeBefore != null ? dsTypeBefore.hashCode() : 0);
+ return result;
+ }
+
}
*/
package org.opendaylight.openflowplugin.applications.frsync.impl;
+import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
Matchers.<DataTreeIdentifier<FlowCapableNode>>any(),
Matchers.<DataTreeChangeListener<FlowCapableNode>>any());
}
+
+ @After
+ public void tearDown() throws InterruptedException {
+ provider.close();
+ }
+
}
\ No newline at end of file