* Represents configuration item contract for device.
* Combining add/update/remove commiters into single one.
*/
-public interface ForwardingRulesCommitter<D extends DataObject, A extends DataObject, R extends DataObject, U extends DataObject>
- extends ForwardingRulesAddCommitter<D, A>, ForwardingRulesRemoveCommitter<D, R>, ForwardingRulesUpdateCommitter<D, U> {
+public interface ForwardingRulesCommitter<D extends DataObject, A extends DataObject, R extends DataObject,
+ U extends DataObject> extends ForwardingRulesAddCommitter<D, A>,
+ ForwardingRulesRemoveCommitter<D, R>, ForwardingRulesUpdateCommitter<D, U> {
}
* @param key semaphore identifier
* @return new or existing semaphore for given key, for one key there is always only one semaphore available
*/
- Semaphore summonGuard(@Nonnull final K key);
+ Semaphore summonGuard(@Nonnull K key);
/**
* Get guard and lock for key.
* @param key for which guard should be created and acquired
* @return semaphore guard
*/
- Semaphore summonGuardAndAcquire(@Nonnull final K key);
+ Semaphore summonGuardAndAcquire(@Nonnull K key);
/**
* Unlock and release guard.
* @param guard semaphore guard which should be released
*/
- void releaseGuard(@Nullable final Semaphore guard);
+ void releaseGuard(@Nullable Semaphore guard);
}
public interface SyncPlanPushStrategy {
/**
+ * Invoked to execute the synchronization strategy.
+ *
* @param resultVehicle bootstrap future - execution will chain it's async calls to this one
* @param diffInput wraps all diff data required for any strategy ({add,remove,update} x {flow,group,meter})
* @param counters reference to internal one-shot statistics - summary off successfully pushed items
*/
public interface SyncReactor {
/**
+ * Invoked to synchronize a device.
+ *
* @param flowcapableNodePath path to openflow augmentation of node
* @param syncupEntry configured node + device reflection
* @return synchronization outcome
*/
- ListenableFuture<Boolean> syncup(final InstanceIdentifier<FlowCapableNode> flowcapableNodePath,
- final SyncupEntry syncupEntry);
+ ListenableFuture<Boolean> syncup(InstanceIdentifier<FlowCapableNode> flowcapableNodePath, SyncupEntry syncupEntry);
}
this.logicalDatastoreType = logicalDatastoreType;
}
+ @Override
public Optional<FlowCapableNode> loadByNodeId(@Nonnull NodeId nodeId) {
- try (final ReadOnlyTransaction roTx = dataBroker.newReadOnlyTransaction()) {
+ try (ReadOnlyTransaction roTx = dataBroker.newReadOnlyTransaction()) {
final InstanceIdentifier<FlowCapableNode> path =
NODES_IID.child(Node.class, new NodeKey(nodeId)).augmentation(FlowCapableNode.class);
return roTx.read(logicalDatastoreType, path).checkedGet(5000, TimeUnit.MILLISECONDS);
import com.google.common.base.Optional;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.Collection;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
LOG.trace("Syncup for {} return from {} listener", nodeId.getValue(), dsType());
}
}
- } catch (Exception e) {
+ } catch (InterruptedException | ExecutionException | TimeoutException e) {
LOG.error("Error processing inventory node modification: {}, {}", nodeId.getValue(), e);
}
}
}
protected abstract Optional<ListenableFuture<Boolean>> processNodeModification(
- final DataTreeModification<T> modification);
+ DataTreeModification<T> modification);
protected abstract LogicalDatastoreType dsType();
this.flatBatchService = Preconditions.checkNotNull(rpcRegistry.getRpcService(SalFlatBatchService.class),
"RPC SalFlatBatchService not found.");
- nodeConfigDataTreePath = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION, FLOW_CAPABLE_NODE_WC_PATH);
+ nodeConfigDataTreePath = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION,
+ FLOW_CAPABLE_NODE_WC_PATH);
nodeOperationalDataTreePath = new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL, NODE_WC_PATH);
- final ExecutorService executorService= Executors.newCachedThreadPool(new ThreadFactoryBuilder()
+ final ExecutorService executorService = Executors.newCachedThreadPool(new ThreadFactoryBuilder()
.setNameFormat(FRS_EXECUTOR_PREFIX + "%d")
.setDaemon(false)
- .setUncaughtExceptionHandler((thread, e) -> LOG.error("Uncaught exception {}", thread, e))
+ .setUncaughtExceptionHandler((thread, ex) -> LOG.error("Uncaught exception {}", thread, ex))
.build());
syncThreadPool = MoreExecutors.listeningDecorator(executorService);
broker.registerProvider(this);
final NodeListener<FlowCapableNode> nodeListenerConfig =
new SimplifiedConfigListener(reactor, configSnapshot, operationalDao);
- final NodeListener<Node> nodeListenerOperational =
- new SimplifiedOperationalListener(reactor, operationalSnapshot, configDao, reconciliationRegistry, deviceMastershipManager);
+ final NodeListener<Node> nodeListenerOperational = new SimplifiedOperationalListener(reactor,
+ operationalSnapshot, configDao, reconciliationRegistry, deviceMastershipManager);
dataTreeConfigChangeListener =
dataService.registerDataTreeChangeListener(nodeConfigDataTreePath, nodeListenerConfig);
LOG.info("ForwardingRulesSync has started.");
}
+ @Override
public void close() {
if (Objects.nonNull(dataTreeConfigChangeListener)) {
dataTreeConfigChangeListener.close();
* Update cache. If operational data are present, choose appropriate data and start syncup.
* Otherwise skip incoming change.
*/
+ @Override
protected Optional<ListenableFuture<Boolean>> processNodeModification(
final DataTreeModification<FlowCapableNode> modification) {
final InstanceIdentifier<FlowCapableNode> nodePath = modification.getRootPath().getRootIdentifier();
final FlowCapableNode dataAfter,
final FlowCapableNode operationalNode) {
LOG.debug("Reconciliation {}: {}", dsType(), PathUtil.digNodeId(nodePath).getValue());
- final SyncupEntry syncupEntry = new SyncupEntry(dataAfter, dsType(), operationalNode, LogicalDatastoreType.OPERATIONAL);
+ final SyncupEntry syncupEntry = new SyncupEntry(dataAfter, dsType(), operationalNode,
+ LogicalDatastoreType.OPERATIONAL);
return reactor.syncup(nodePath, syncupEntry);
}
* Update cache, register for device mastership when device connected and start reconciliation if device
* is registered and actual modification is consistent.Skip the event otherwise.
*/
+ @Override
protected Optional<ListenableFuture<Boolean>> processNodeModification(
final DataTreeModification<Node> modification) {
Optional<ListenableFuture<Boolean>> result;
deviceMastershipManager.onDeviceDisconnected(nodeId);
result = skipModification(modification);
} else {
- operationalSnapshot.updateCache(nodeId, Optional.fromNullable(ModificationUtil.flowCapableNodeAfter(modification)));
+ operationalSnapshot.updateCache(nodeId, Optional.fromNullable(
+ ModificationUtil.flowCapableNodeAfter(modification)));
final boolean isAdd = isAdd(nodeModification) || isAddLogical(nodeModification);
deviceMastershipManager.onDeviceConnected(nodeId);
}
- // if node is registered for reconcile we need consistent data from operational DS (skip partial collections)
- // but we can accept first modification since all statistics are intentionally collected in one step on startup
+ // if node is registered for reconcile we need consistent data from operational DS (skip partial
+ // collections) but we can accept first modification since all statistics are intentionally collected in
+ // one step on startup
if (reconciliationRegistry.isRegistered(nodeId) && (isAdd || isConsistentForReconcile(modification))) {
result = reconciliation(modification);
} else {
* All connectors disappeared from operational store (logical delete).
*/
private boolean isDeleteLogical(final DataObjectModification<Node> nodeModification) {
- return !safeConnectorsEmpty(nodeModification.getDataBefore()) && safeConnectorsEmpty(nodeModification.getDataAfter());
+ return !safeConnectorsEmpty(nodeModification.getDataBefore())
+ && safeConnectorsEmpty(nodeModification.getDataAfter());
}
* All connectors appeared in operational store (logical add).
*/
private boolean isAddLogical(final DataObjectModification<Node> nodeModification) {
- return safeConnectorsEmpty(nodeModification.getDataBefore()) && !safeConnectorsEmpty(nodeModification.getDataAfter());
+ return safeConnectorsEmpty(nodeModification.getDataBefore())
+ && !safeConnectorsEmpty(nodeModification.getDataAfter());
}
/**
this.delegate = delegate;
}
+ @Override
public ListenableFuture<Boolean> syncup(final InstanceIdentifier<FlowCapableNode> flowcapableNodePath,
final SyncupEntry syncupEntry) {
final NodeId nodeId = PathUtil.digNodeId(flowcapableNodePath);
}
semaphoreKeeper.releaseGuard(guard);
}
+
@Override
- public void onFailure(final Throwable t) {
+ public void onFailure(final Throwable failure) {
final long stampFinished = System.nanoTime();
LOG.warn("Syncup failed {} took:{} rpc:{} wait:{}", nodeId.getValue(),
formatNanos(stampFinished - stampBeforeGuard), formatNanos(stampFinished - stampAfterGuard),
formatNanos(stampAfterGuard - stampBeforeGuard));
semaphoreKeeper.releaseGuard(guard);
- }};
+ }
+ };
}
private static String formatNanos(final long nanos) {
package org.opendaylight.openflowplugin.applications.frsync.impl;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
* - flows - meters - groups (reordered)
**/
- final List<ItemSyncBox<Group>> groupsToAddOrUpdate = extractGroupsToAddOrUpdate(nodeId, configTree, operationalTree);
+ final List<ItemSyncBox<Group>> groupsToAddOrUpdate =
+ extractGroupsToAddOrUpdate(nodeId, configTree, operationalTree);
final ItemSyncBox<Meter> metersToAddOrUpdate = extractMetersToAddOrUpdate(nodeId, configTree, operationalTree);
- final Map<TableKey, ItemSyncBox<Flow>> flowsToAddOrUpdate = extractFlowsToAddOrUpdate(nodeId, configTree, operationalTree);
+ final Map<TableKey, ItemSyncBox<Flow>> flowsToAddOrUpdate =
+ extractFlowsToAddOrUpdate(nodeId, configTree, operationalTree);
- final Map<TableKey, ItemSyncBox<Flow>> flowsToRemove = extractFlowsToRemove(nodeId, configTree, operationalTree);
+ final Map<TableKey, ItemSyncBox<Flow>> flowsToRemove =
+ extractFlowsToRemove(nodeId, configTree, operationalTree);
final ItemSyncBox<Meter> metersToRemove = extractMetersToRemove(nodeId, configTree, operationalTree);
final List<ItemSyncBox<Group>> groupsToRemove = extractGroupsToRemove(nodeId, configTree, operationalTree);
final ListenableFuture<RpcResult<Void>> resultVehicle = syncPlanPushStrategy.executeSyncStrategy(
bootstrapResultFuture, input, counters);
- return Futures.transform(resultVehicle, new Function<RpcResult<Void>, Boolean>() {
- @Override
- public Boolean apply(RpcResult<Void> input) {
- if (input == null) {
- return false;
- }
- if (LOG.isDebugEnabled()) {
- final CrudCounts flowCrudCounts = counters.getFlowCrudCounts();
- final CrudCounts meterCrudCounts = counters.getMeterCrudCounts();
- final CrudCounts groupCrudCounts = counters.getGroupCrudCounts();
- LOG.debug("Syncup outcome[{}] (added/updated/removed): flow={}/{}/{}, group={}/{}/{}, " +
- "meter={}/{}/{}, errors={}",
- nodeId.getValue(),
- flowCrudCounts.getAdded(), flowCrudCounts.getUpdated(), flowCrudCounts.getRemoved(),
- groupCrudCounts.getAdded(), groupCrudCounts.getUpdated(), groupCrudCounts.getRemoved(),
- meterCrudCounts.getAdded(), meterCrudCounts.getUpdated(), meterCrudCounts.getRemoved(),
- Arrays.toString(input.getErrors().toArray()));
- }
- return input.isSuccessful();
- }}, MoreExecutors.directExecutor());
+ return Futures.transform(resultVehicle, input1 -> {
+ if (input1 == null) {
+ return false;
+ }
+ if (LOG.isDebugEnabled()) {
+ final CrudCounts flowCrudCounts = counters.getFlowCrudCounts();
+ final CrudCounts meterCrudCounts = counters.getMeterCrudCounts();
+ final CrudCounts groupCrudCounts = counters.getGroupCrudCounts();
+ LOG.debug("Syncup outcome[{}] (added/updated/removed): flow={}/{}/{}, group={}/{}/{}, "
+ + "meter={}/{}/{}, errors={}",
+ nodeId.getValue(),
+ flowCrudCounts.getAdded(), flowCrudCounts.getUpdated(), flowCrudCounts.getRemoved(),
+ groupCrudCounts.getAdded(), groupCrudCounts.getUpdated(), groupCrudCounts.getRemoved(),
+ meterCrudCounts.getAdded(), meterCrudCounts.getUpdated(), meterCrudCounts.getRemoved(),
+ Arrays.toString(input1.getErrors().toArray()));
+ }
+ return input1.isSuccessful();
+ }, MoreExecutors.directExecutor());
}
@VisibleForTesting
private static List<ItemSyncBox<Group>> extractGroupsToAddOrUpdate(final NodeId nodeId,
- final FlowCapableNode flowCapableNodeConfigured,
- final FlowCapableNode flowCapableNodeOperational) {
+ final FlowCapableNode flowCapableNodeConfigured, final FlowCapableNode flowCapableNodeOperational) {
final List<Group> groupsConfigured = ReconcileUtil.safeGroups(flowCapableNodeConfigured);
final List<Group> groupsOperational = ReconcileUtil.safeGroups(flowCapableNodeOperational);
final Map<Long, Group> groupOperationalMap = FlowCapableNodeLookups.wrapGroupsToMap(groupsOperational);
@VisibleForTesting
private static Map<TableKey, ItemSyncBox<Flow>> extractFlowsToAddOrUpdate(final NodeId nodeId,
- final FlowCapableNode flowCapableNodeConfigured,
- final FlowCapableNode flowCapableNodeOperational) {
+ final FlowCapableNode flowCapableNodeConfigured, final FlowCapableNode flowCapableNodeOperational) {
final List<Table> tablesConfigured = ReconcileUtil.safeTables(flowCapableNodeConfigured);
if (tablesConfigured.isEmpty()) {
return Collections.emptyMap();
@VisibleForTesting
private static Map<TableKey, ItemSyncBox<Flow>> extractFlowsToRemove(final NodeId nodeId,
- final FlowCapableNode flowCapableNodeConfigured,
- final FlowCapableNode flowCapableNodeOperational) {
+ final FlowCapableNode flowCapableNodeConfigured, final FlowCapableNode flowCapableNodeOperational) {
final List<Table> tablesOperational = ReconcileUtil.safeTables(flowCapableNodeOperational);
if (tablesOperational.isEmpty()) {
return Collections.emptyMap();
}
@Override
+ @SuppressWarnings("checkstyle:IllegalCatch")
public void close() {
if (clusterSingletonServiceRegistration != null) {
try {
public void onDeviceConnected(final NodeId nodeId) {
LOG.debug("FRS service registered for: {}", nodeId.getValue());
- final DeviceMastership mastership = new DeviceMastership(nodeId, reconciliationRegistry, clusterSingletonService);
+ final DeviceMastership mastership = new DeviceMastership(nodeId, reconciliationRegistry,
+ clusterSingletonService);
deviceMasterships.put(nodeId, mastership);
}
/**
* Implements {@link ForwardingRulesCommitter} methods for processing add, update and remove of {@link Flow}.
*/
-public class FlowForwarder implements ForwardingRulesCommitter<Flow, AddFlowOutput, RemoveFlowOutput, UpdateFlowOutput> {
+public class FlowForwarder implements ForwardingRulesCommitter<Flow, AddFlowOutput, RemoveFlowOutput,
+ UpdateFlowOutput> {
private static final Logger LOG = LoggerFactory.getLogger(FlowForwarder.class);
private static final String TABLE_ID_MISMATCH = "tableId mismatch";
// always needs to set strict flag into update-flow input so that
// only a flow entry associated with a given flow object is updated.
- builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).setStrict(Boolean.TRUE).build());
- builder.setOriginalFlow((new OriginalFlowBuilder(original)).setStrict(Boolean.TRUE).build());
+ builder.setUpdatedFlow(new UpdatedFlowBuilder(update).setStrict(Boolean.TRUE).build());
+ builder.setOriginalFlow(new OriginalFlowBuilder(original).setStrict(Boolean.TRUE).build());
output = salFlowService.updateFlow(builder.build());
} else {
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
output = salFlowService.addFlow(builder.build());
} else {
- output = RpcResultBuilder.<AddFlowOutput>failed().withError(RpcError.ErrorType.APPLICATION, TABLE_ID_MISMATCH).buildFuture();
+ output = RpcResultBuilder.<AddFlowOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ TABLE_ID_MISMATCH).buildFuture();
}
return output;
}
/**
* Implements {@link ForwardingRulesCommitter} methods for processing add, update and remove of {@link Group}.
*/
-public class GroupForwarder implements ForwardingRulesCommitter<Group, AddGroupOutput, RemoveGroupOutput, UpdateGroupOutput> {
+public class GroupForwarder implements ForwardingRulesCommitter<Group, AddGroupOutput, RemoveGroupOutput,
+ UpdateGroupOutput> {
private static final Logger LOG = LoggerFactory.getLogger(GroupForwarder.class);
private final SalGroupService salGroupService;
}
@Override
- public Future<RpcResult<RemoveGroupOutput>> remove(final InstanceIdentifier<Group> identifier, final Group removeDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public Future<RpcResult<RemoveGroupOutput>> remove(final InstanceIdentifier<Group> identifier,
+ final Group removeDataObj, final InstanceIdentifier<FlowCapableNode> nodeIdent) {
LOG.trace("Forwarding Table REMOVE request [Tbl id, node Id {} {}",
identifier, nodeIdent);
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
- builder.setUpdatedGroup((new UpdatedGroupBuilder(update)).build());
- builder.setOriginalGroup((new OriginalGroupBuilder(original)).build());
+ builder.setUpdatedGroup(new UpdatedGroupBuilder(update).build());
+ builder.setOriginalGroup(new OriginalGroupBuilder(original).build());
return salGroupService.updateGroup(builder.build());
}
/**
* Implements {@link ForwardingRulesCommitter} methods for processing add, update and remove of {@link Meter}.
*/
-public class MeterForwarder implements ForwardingRulesCommitter<Meter, AddMeterOutput, RemoveMeterOutput, UpdateMeterOutput> {
+public class MeterForwarder implements ForwardingRulesCommitter<Meter, AddMeterOutput, RemoveMeterOutput,
+ UpdateMeterOutput> {
private static final Logger LOG = LoggerFactory.getLogger(MeterForwarder.class);
private final SalMeterService salMeterService;
}
@Override
- public Future<RpcResult<RemoveMeterOutput>> remove(final InstanceIdentifier<Meter> identifier, final Meter removeDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public Future<RpcResult<RemoveMeterOutput>> remove(final InstanceIdentifier<Meter> identifier,
+ final Meter removeDataObj, final InstanceIdentifier<FlowCapableNode> nodeIdent) {
LOG.trace("Received the Meter REMOVE request [Tbl id, node Id {} {}",
identifier, nodeIdent);
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
- builder.setUpdatedMeter((new UpdatedMeterBuilder(update)).build());
- builder.setOriginalMeter((new OriginalMeterBuilder(original)).build());
+ builder.setUpdatedMeter(new UpdatedMeterBuilder(update).build());
+ builder.setOriginalMeter(new OriginalMeterBuilder(original).build());
return salMeterService.updateMeter(builder.build());
}
import com.google.common.collect.Iterators;
import com.google.common.collect.PeekingIterator;
import com.google.common.collect.Range;
-import com.google.common.util.concurrent.AsyncFunction;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.JdkFutureAdapters;
counters.getGroupCrudCounts().setUpdated(ReconcileUtil.countTotalUpdated(diffInput.getGroupsToAddOrUpdate()));
counters.getGroupCrudCounts().setRemoved(ReconcileUtil.countTotalPushed(diffInput.getGroupsToRemove()));
- counters.getFlowCrudCounts().setAdded(ReconcileUtil.countTotalPushed(diffInput.getFlowsToAddOrUpdate().values()));
- counters.getFlowCrudCounts().setUpdated(ReconcileUtil.countTotalUpdated(diffInput.getFlowsToAddOrUpdate().values()));
+ counters.getFlowCrudCounts().setAdded(ReconcileUtil.countTotalPushed(
+ diffInput.getFlowsToAddOrUpdate().values()));
+ counters.getFlowCrudCounts().setUpdated(ReconcileUtil.countTotalUpdated(
+ diffInput.getFlowsToAddOrUpdate().values()));
counters.getFlowCrudCounts().setRemoved(ReconcileUtil.countTotalPushed(diffInput.getFlowsToRemove().values()));
counters.getMeterCrudCounts().setAdded(diffInput.getMetersToAddOrUpdate().getItemsToPush().size());
// TODO enable table-update when ready
//resultVehicle = updateTableFeatures(nodeIdent, configTree);
- resultVehicle = Futures.transformAsync(resultVehicle, new AsyncFunction<RpcResult<Void>, RpcResult<Void>>() {
- @Override
- public ListenableFuture<RpcResult<Void>> apply(final RpcResult<Void> input) throws Exception {
- final List<Batch> batchBag = new ArrayList<>();
- int batchOrder = 0;
+ resultVehicle = Futures.transformAsync(resultVehicle, input -> {
+ final List<Batch> batchBag = new ArrayList<>();
+ int batchOrder = 0;
- batchOrder = assembleAddOrUpdateGroups(batchBag, batchOrder, diffInput.getGroupsToAddOrUpdate());
- batchOrder = assembleAddOrUpdateMeters(batchBag, batchOrder, diffInput.getMetersToAddOrUpdate());
- batchOrder = assembleAddOrUpdateFlows(batchBag, batchOrder, diffInput.getFlowsToAddOrUpdate());
+ batchOrder = assembleAddOrUpdateGroups(batchBag, batchOrder, diffInput.getGroupsToAddOrUpdate());
+ batchOrder = assembleAddOrUpdateMeters(batchBag, batchOrder, diffInput.getMetersToAddOrUpdate());
+ batchOrder = assembleAddOrUpdateFlows(batchBag, batchOrder, diffInput.getFlowsToAddOrUpdate());
- batchOrder = assembleRemoveFlows(batchBag, batchOrder, diffInput.getFlowsToRemove());
- batchOrder = assembleRemoveMeters(batchBag, batchOrder, diffInput.getMetersToRemove());
- batchOrder = assembleRemoveGroups(batchBag, batchOrder, diffInput.getGroupsToRemove());
+ batchOrder = assembleRemoveFlows(batchBag, batchOrder, diffInput.getFlowsToRemove());
+ batchOrder = assembleRemoveMeters(batchBag, batchOrder, diffInput.getMetersToRemove());
+ batchOrder = assembleRemoveGroups(batchBag, batchOrder, diffInput.getGroupsToRemove());
- LOG.trace("Index of last batch step: {}", batchOrder);
+ LOG.trace("Index of last batch step: {}", batchOrder);
- final ProcessFlatBatchInput flatBatchInput = new ProcessFlatBatchInputBuilder()
- .setNode(new NodeRef(PathUtil.digNodePath(diffInput.getNodeIdent())))
- // TODO: propagate from input
- .setExitOnFirstError(false)
- .setBatch(batchBag)
- .build();
+ final ProcessFlatBatchInput flatBatchInput = new ProcessFlatBatchInputBuilder()
+ .setNode(new NodeRef(PathUtil.digNodePath(diffInput.getNodeIdent())))
+ // TODO: propagate from input
+ .setExitOnFirstError(false)
+ .setBatch(batchBag)
+ .build();
- final Future<RpcResult<ProcessFlatBatchOutput>> rpcResultFuture = flatBatchService.processFlatBatch(flatBatchInput);
+ final Future<RpcResult<ProcessFlatBatchOutput>> rpcResultFuture =
+ flatBatchService.processFlatBatch(flatBatchInput);
- if (LOG.isDebugEnabled()) {
- Futures.addCallback(JdkFutureAdapters.listenInPoolThread(rpcResultFuture),
- createCounterCallback(batchBag, batchOrder, counters), MoreExecutors.directExecutor());
- }
-
- return Futures.transform(JdkFutureAdapters.listenInPoolThread(rpcResultFuture),
- ReconcileUtil.<ProcessFlatBatchOutput>createRpcResultToVoidFunction("flat-batch"),
- MoreExecutors.directExecutor());
+ if (LOG.isDebugEnabled()) {
+ Futures.addCallback(JdkFutureAdapters.listenInPoolThread(rpcResultFuture),
+ createCounterCallback(batchBag, batchOrder, counters), MoreExecutors.directExecutor());
}
+
+ return Futures.transform(JdkFutureAdapters.listenInPoolThread(rpcResultFuture),
+ ReconcileUtil.<ProcessFlatBatchOutput>createRpcResultToVoidFunction("flat-batch"),
+ MoreExecutors.directExecutor());
}, MoreExecutors.directExecutor());
return resultVehicle;
}
return new FutureCallback<RpcResult<ProcessFlatBatchOutput>>() {
@Override
public void onSuccess(@Nullable final RpcResult<ProcessFlatBatchOutput> result) {
- if (!result.isSuccessful() && result.getResult() != null && !result.getResult().getBatchFailure().isEmpty()) {
+ if (!result.isSuccessful() && result.getResult() != null
+ && !result.getResult().getBatchFailure().isEmpty()) {
Map<Range<Integer>, Batch> batchMap = mapBatchesToRanges(inputBatchBag, failureIndexLimit);
decrementBatchFailuresCounters(result.getResult().getBatchFailure(), batchMap, counters);
}
}
@Override
- public void onFailure(final Throwable t) {
+ public void onFailure(final Throwable failure) {
counters.resetAll();
}
};
}
@VisibleForTesting
- static int assembleRemoveFlows(final List<Batch> batchBag, int batchOrder, final Map<TableKey, ItemSyncBox<Flow>> flowItemSyncTableMap) {
+ static int assembleRemoveFlows(final List<Batch> batchBag, int batchOrder,
+ final Map<TableKey, ItemSyncBox<Flow>> flowItemSyncTableMap) {
// process flow remove
int order = batchOrder;
if (flowItemSyncTableMap != null) {
}
@VisibleForTesting
- static int assembleAddOrUpdateGroups(final List<Batch> batchBag, int batchOrder, final List<ItemSyncBox<Group>> groupsToAddOrUpdate) {
+ static int assembleAddOrUpdateGroups(final List<Batch> batchBag, int batchOrder,
+ final List<ItemSyncBox<Group>> groupsToAddOrUpdate) {
// process group add+update
int order = batchOrder;
if (groupsToAddOrUpdate != null) {
new ArrayList<>(groupItemSyncBox.getItemsToUpdate().size());
int itemOrder = 0;
for (Group group : groupItemSyncBox.getItemsToPush()) {
- flatBatchAddGroupBag.add(new FlatBatchAddGroupBuilder(group).setBatchOrder(itemOrder++).build());
+ flatBatchAddGroupBag.add(new FlatBatchAddGroupBuilder(group)
+ .setBatchOrder(itemOrder++).build());
}
final Batch batch = new BatchBuilder()
.setBatchChoice(new FlatBatchAddGroupCaseBuilder()
int itemOrder = 0;
for (ItemSyncBox.ItemUpdateTuple<Group> groupUpdate : groupItemSyncBox.getItemsToUpdate()) {
flatBatchUpdateGroupBag.add(new FlatBatchUpdateGroupBuilder()
- .setBatchOrder(itemOrder++)
- .setOriginalBatchedGroup(new OriginalBatchedGroupBuilder(groupUpdate.getOriginal()).build())
- .setUpdatedBatchedGroup(new UpdatedBatchedGroupBuilder(groupUpdate.getUpdated()).build())
- .build());
+ .setBatchOrder(itemOrder++)
+ .setOriginalBatchedGroup(new OriginalBatchedGroupBuilder(groupUpdate.getOriginal()).build())
+ .setUpdatedBatchedGroup(new UpdatedBatchedGroupBuilder(groupUpdate.getUpdated()).build())
+ .build());
}
final Batch batch = new BatchBuilder()
.setBatchChoice(new FlatBatchUpdateGroupCaseBuilder()
}
@VisibleForTesting
- static int assembleRemoveGroups(final List<Batch> batchBag, int batchOrder, final List<ItemSyncBox<Group>> groupsToRemoveOrUpdate) {
+ static int assembleRemoveGroups(final List<Batch> batchBag, int batchOrder,
+ final List<ItemSyncBox<Group>> groupsToRemoveOrUpdate) {
// process group add+update
int order = batchOrder;
if (groupsToRemoveOrUpdate != null) {
new ArrayList<>(groupItemSyncBox.getItemsToUpdate().size());
int itemOrder = 0;
for (Group group : groupItemSyncBox.getItemsToPush()) {
- flatBatchRemoveGroupBag.add(new FlatBatchRemoveGroupBuilder(group).setBatchOrder(itemOrder++).build());
+ flatBatchRemoveGroupBag.add(new FlatBatchRemoveGroupBuilder(group)
+ .setBatchOrder(itemOrder++).build());
}
final Batch batch = new BatchBuilder()
.setBatchChoice(new FlatBatchRemoveGroupCaseBuilder()
}
@VisibleForTesting
- static int assembleAddOrUpdateMeters(final List<Batch> batchBag, int batchOrder, final ItemSyncBox<Meter> meterItemSyncBox) {
+ static int assembleAddOrUpdateMeters(final List<Batch> batchBag, int batchOrder,
+ final ItemSyncBox<Meter> meterItemSyncBox) {
// process meter add+update
int order = batchOrder;
if (meterItemSyncBox != null) {
}
@VisibleForTesting
- static int assembleRemoveMeters(final List<Batch> batchBag, int batchOrder, final ItemSyncBox<Meter> meterItemSyncBox) {
+ static int assembleRemoveMeters(final List<Batch> batchBag, int batchOrder,
+ final ItemSyncBox<Meter> meterItemSyncBox) {
// process meter remove
int order = batchOrder;
if (meterItemSyncBox != null && !meterItemSyncBox.getItemsToPush().isEmpty()) {
}
@VisibleForTesting
- static int assembleAddOrUpdateFlows(final List<Batch> batchBag, int batchOrder, final Map<TableKey, ItemSyncBox<Flow>> flowItemSyncTableMap) {
+ static int assembleAddOrUpdateFlows(final List<Batch> batchBag, int batchOrder,
+ final Map<TableKey, ItemSyncBox<Flow>> flowItemSyncTableMap) {
// process flow add+update
int order = batchOrder;
if (flowItemSyncTableMap != null) {
int itemOrder = 0;
for (ItemSyncBox.ItemUpdateTuple<Flow> flowUpdate : flowItemSyncBox.getItemsToUpdate()) {
flatBatchUpdateFlowBag.add(new FlatBatchUpdateFlowBuilder()
- .setBatchOrder(itemOrder++)
- .setFlowId(flowUpdate.getUpdated().getId())
- .setOriginalBatchedFlow(new OriginalBatchedFlowBuilder(flowUpdate.getOriginal()).build())
- .setUpdatedBatchedFlow(new UpdatedBatchedFlowBuilder(flowUpdate.getUpdated()).build())
- .build());
+ .setBatchOrder(itemOrder++)
+ .setFlowId(flowUpdate.getUpdated().getId())
+ .setOriginalBatchedFlow(new OriginalBatchedFlowBuilder(flowUpdate.getOriginal()).build())
+ .setUpdatedBatchedFlow(new UpdatedBatchedFlowBuilder(flowUpdate.getUpdated()).build())
+ .build());
}
final Batch batch = new BatchBuilder()
.setBatchChoice(new FlatBatchUpdateFlowCaseBuilder()
package org.opendaylight.openflowplugin.applications.frsync.impl.strategy;
import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.AsyncFunction;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.JdkFutureAdapters;
-import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
// TODO enable table-update when ready
//resultVehicle = updateTableFeatures(nodeIdent, configTree);
- resultVehicle = Futures.transformAsync(resultVehicle, new AsyncFunction<RpcResult<Void>, RpcResult<Void>>() {
- @Override
- public ListenableFuture<RpcResult<Void>> apply(final RpcResult<Void> input) throws Exception {
- if (!input.isSuccessful()) {
- //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
- //final ListenableFuture<RpcResult<Void>> singleVoidUpdateResult = Futures.transform(
- // Futures.asList Arrays.asList(input, output),
- // ReconcileUtil.<UpdateFlowOutput>createRpcResultCondenser("TODO"));
- }
- return addMissingGroups(nodeId, nodeIdent, diffInput.getGroupsToAddOrUpdate(), counters);
+ resultVehicle = Futures.transformAsync(resultVehicle, input -> {
+ if (!input.isSuccessful()) {
+ //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
+ //final ListenableFuture<RpcResult<Void>> singleVoidUpdateResult = Futures.transform(
+ // Futures.asList Arrays.asList(input, output),
+ // ReconcileUtil.<UpdateFlowOutput>createRpcResultCondenser("TODO"));
}
+ return addMissingGroups(nodeId, nodeIdent, diffInput.getGroupsToAddOrUpdate(), counters);
}, MoreExecutors.directExecutor());
Futures.addCallback(resultVehicle, FxChainUtil.logResultCallback(nodeId, "addMissingGroups"),
MoreExecutors.directExecutor());
- resultVehicle = Futures.transformAsync(resultVehicle, new AsyncFunction<RpcResult<Void>, RpcResult<Void>>() {
- @Override
- public ListenableFuture<RpcResult<Void>> apply(final RpcResult<Void> input) throws Exception {
- if (!input.isSuccessful()) {
- //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
- }
- return addMissingMeters(nodeId, nodeIdent, diffInput.getMetersToAddOrUpdate(), counters);
+ resultVehicle = Futures.transformAsync(resultVehicle, input -> {
+ if (!input.isSuccessful()) {
+ //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
}
+ return addMissingMeters(nodeId, nodeIdent, diffInput.getMetersToAddOrUpdate(), counters);
}, MoreExecutors.directExecutor());
Futures.addCallback(resultVehicle, FxChainUtil.logResultCallback(nodeId, "addMissingMeters"),
MoreExecutors.directExecutor());
- resultVehicle = Futures.transformAsync(resultVehicle, new AsyncFunction<RpcResult<Void>, RpcResult<Void>>() {
- @Override
- public ListenableFuture<RpcResult<Void>> apply(final RpcResult<Void> input) throws Exception {
- if (!input.isSuccessful()) {
- //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
- }
- return addMissingFlows(nodeId, nodeIdent, diffInput.getFlowsToAddOrUpdate(), counters);
+ resultVehicle = Futures.transformAsync(resultVehicle, input -> {
+ if (!input.isSuccessful()) {
+ //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
}
+ return addMissingFlows(nodeId, nodeIdent, diffInput.getFlowsToAddOrUpdate(), counters);
}, MoreExecutors.directExecutor());
Futures.addCallback(resultVehicle, FxChainUtil.logResultCallback(nodeId, "addMissingFlows"),
MoreExecutors.directExecutor());
- resultVehicle = Futures.transformAsync(resultVehicle, new AsyncFunction<RpcResult<Void>, RpcResult<Void>>() {
- @Override
- public ListenableFuture<RpcResult<Void>> apply(final RpcResult<Void> input) throws Exception {
- if (!input.isSuccessful()) {
- //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
- }
- return removeRedundantFlows(nodeId, nodeIdent, diffInput.getFlowsToRemove(), counters);
+ resultVehicle = Futures.transformAsync(resultVehicle, input -> {
+ if (!input.isSuccessful()) {
+ //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
}
+ return removeRedundantFlows(nodeId, nodeIdent, diffInput.getFlowsToRemove(), counters);
}, MoreExecutors.directExecutor());
Futures.addCallback(resultVehicle, FxChainUtil.logResultCallback(nodeId, "removeRedundantFlows"),
MoreExecutors.directExecutor());
- resultVehicle = Futures.transformAsync(resultVehicle, new AsyncFunction<RpcResult<Void>, RpcResult<Void>>() {
- @Override
- public ListenableFuture<RpcResult<Void>> apply(final RpcResult<Void> input) throws Exception {
- if (!input.isSuccessful()) {
- //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
- }
- return removeRedundantMeters(nodeId, nodeIdent, diffInput.getMetersToRemove(), counters);
+ resultVehicle = Futures.transformAsync(resultVehicle, input -> {
+ if (!input.isSuccessful()) {
+ //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
}
+ return removeRedundantMeters(nodeId, nodeIdent, diffInput.getMetersToRemove(), counters);
}, MoreExecutors.directExecutor());
Futures.addCallback(resultVehicle, FxChainUtil.logResultCallback(nodeId, "removeRedundantMeters"),
MoreExecutors.directExecutor());
- resultVehicle = Futures.transformAsync(resultVehicle, new AsyncFunction<RpcResult<Void>, RpcResult<Void>>() {
- @Override
- public ListenableFuture<RpcResult<Void>> apply(final RpcResult<Void> input) throws Exception {
- if (!input.isSuccessful()) {
- //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
- }
- return removeRedundantGroups(nodeId, nodeIdent, diffInput.getGroupsToRemove(), counters);
+ resultVehicle = Futures.transformAsync(resultVehicle, input -> {
+ if (!input.isSuccessful()) {
+ //TODO chain errors but not skip processing on first error return Futures.immediateFuture(input);
}
+ return removeRedundantGroups(nodeId, nodeIdent, diffInput.getGroupsToRemove(), counters);
}, MoreExecutors.directExecutor());
Futures.addCallback(resultVehicle, FxChainUtil.logResultCallback(nodeId, "removeRedundantGroups"),
MoreExecutors.directExecutor());
final Flow existingFlow = flowUpdate.getOriginal();
final Flow updatedFlow = flowUpdate.getUpdated();
- final KeyedInstanceIdentifier<Flow, FlowKey> flowIdent = tableIdent.child(Flow.class, updatedFlow.getKey());
+ final KeyedInstanceIdentifier<Flow, FlowKey> flowIdent = tableIdent.child(Flow.class,
+ updatedFlow.getKey());
LOG.trace("flow {} in table {} - needs update on device {} match{}",
updatedFlow.getId(), tableKey, nodeId, updatedFlow.getMatch());
}
Collections.reverse(groupsRemovalPlan);
for (final ItemSyncBox<Group> groupsPortion : groupsRemovalPlan) {
- chainedResult =
- Futures.transformAsync(chainedResult, new AsyncFunction<RpcResult<Void>, RpcResult<Void>>() {
- @Override
- public ListenableFuture<RpcResult<Void>> apply(final RpcResult<Void> input)
- throws Exception {
- final ListenableFuture<RpcResult<Void>> result;
- if (input.isSuccessful()) {
- result = flushRemoveGroupPortionAndBarrier(nodeIdent, groupsPortion);
- } else {
- // pass through original unsuccessful rpcResult
- result = Futures.immediateFuture(input);
- }
-
- return result;
- }
- }, MoreExecutors.directExecutor());
+ chainedResult = Futures.transformAsync(chainedResult, input -> {
+ final ListenableFuture<RpcResult<Void>> result;
+ if (input.isSuccessful()) {
+ result = flushRemoveGroupPortionAndBarrier(nodeIdent, groupsPortion);
+ } else {
+ // pass through original unsuccessful rpcResult
+ result = Futures.immediateFuture(input);
+ }
+
+ return result;
+ }, MoreExecutors.directExecutor());
}
} catch (IllegalStateException e) {
chainedResult = RpcResultBuilder.<Void>failed()
for (TableFeatures tableFeaturesItem : tableFeatures) {
// TODO uncomment java.lang.NullPointerException
// at
- // org.opendaylight.openflowjava.protocol.impl.serialization.match.AbstractOxmMatchEntrySerializer.serializeHeader(AbstractOxmMatchEntrySerializer.java:31
+ // org.opendaylight.openflowjava.protocol.impl.serialization.match.AbstractOxmMatchEntrySerializer
+ // .serializeHeader(AbstractOxmMatchEntrySerializer.java:31
// allResults.add(JdkFutureAdapters.listenInPoolThread(
// tableForwarder.update(tableFeaturesII, null, tableFeaturesItem, nodeIdent)));
}
chainedResult = flushAddGroupPortionAndBarrier(nodeIdent, groupsAddPlan.get(0));
for (final ItemSyncBox<Group> groupsPortion : Iterables.skip(groupsAddPlan, 1)) {
chainedResult =
- Futures.transformAsync(chainedResult, new AsyncFunction<RpcResult<Void>, RpcResult<Void>>() {
- @Override
- public ListenableFuture<RpcResult<Void>> apply(final RpcResult<Void> input)
- throws Exception {
- final ListenableFuture<RpcResult<Void>> result;
- if (input.isSuccessful()) {
- result = flushAddGroupPortionAndBarrier(nodeIdent, groupsPortion);
- } else {
- // pass through original unsuccessful rpcResult
- result = Futures.immediateFuture(input);
- }
-
- return result;
- }
- }, MoreExecutors.directExecutor());
+ Futures.transformAsync(chainedResult, input -> {
+ final ListenableFuture<RpcResult<Void>> result;
+ if (input.isSuccessful()) {
+ result = flushAddGroupPortionAndBarrier(nodeIdent, groupsPortion);
+ } else {
+ // pass through original unsuccessful rpcResult
+ result = Futures.immediateFuture(input);
+ }
+
+ return result;
+ }, MoreExecutors.directExecutor());
}
} else {
chainedResult = RpcResultBuilder.<Void>success().buildFuture();
return this;
}
- public SyncPlanPushStrategyIncrementalImpl setTransactionService(final FlowCapableTransactionService transactionService) {
+ public SyncPlanPushStrategyIncrementalImpl setTransactionService(
+ final FlowCapableTransactionService transactionService) {
this.transactionService = transactionService;
return this;
}
}
@Override
- public boolean equals(Object o) {
- if (this == o) {
+ public boolean equals(Object obj) {
+ if (this == obj) {
return true;
}
- if (o == null || getClass() != o.getClass()) {
+ if (obj == null || getClass() != obj.getClass()) {
return false;
}
- FlowDescriptor that = (FlowDescriptor) o;
+ FlowDescriptor that = (FlowDescriptor) obj;
if (flowId != null ? !flowId.equals(that.flowId) : that.flowId != null) {
return false;
}
/**
* Util methods for {@link com.google.common.util.concurrent.ListenableFuture} chaining.
*/
-public class FxChainUtil {
+public final class FxChainUtil {
private static final Logger LOG = LoggerFactory.getLogger(FxChainUtil.class);
if (result.isSuccessful()) {
LOG.debug(prefix + " finished successfully: {}", nodeId.getValue());
} else {
- final Collection<RpcError> errors = MoreObjects.firstNonNull(result.getErrors(), ImmutableList.<RpcError>of());
+ final Collection<RpcError> errors = MoreObjects.firstNonNull(result.getErrors(),
+ ImmutableList.<RpcError>of());
LOG.debug(prefix + " failed: {} -> {}", nodeId.getValue(), Arrays.toString(errors.toArray()));
}
} else {
}
@Override
- public void onFailure(final Throwable t) {
- LOG.debug(prefix + " reconciliation failed seriously: {}", nodeId.getValue(), t);
+ public void onFailure(final Throwable failure) {
+ LOG.debug(prefix + " reconciliation failed seriously: {}", nodeId.getValue(), failure);
}
};
}
/**
* Basic {@link DataTreeModification} related tools.
*/
-public class ModificationUtil {
+public final class ModificationUtil {
private ModificationUtil() {
throw new IllegalStateException("This class should not be instantiated.");
/**
* Basic {@link InstanceIdentifier} related tools.
*/
-public class PathUtil {
+public final class PathUtil {
private PathUtil() {
throw new IllegalStateException("This class should not be instantiated.");
}
+
public static NodeId digNodeId(final InstanceIdentifier<?> nodeIdent) {
return nodeIdent.firstKeyOf(Node.class, NodeKey.class).getId();
}
}
/**
+ * Creates a single rpc result of type Void honoring all partial rpc results.
+ *
* @param previousItemAction description for case when the triggering future contains failure
* @param <D> type of rpc output (gathered in list)
* @return single rpc result of type Void honoring all partial rpc results
*/
- public static <D> Function<List<RpcResult<D>>, RpcResult<Void>> createRpcResultCondenser(final String previousItemAction) {
+ public static <D> Function<List<RpcResult<D>>, RpcResult<Void>> createRpcResultCondenser(
+ final String previousItemAction) {
return input -> {
final RpcResultBuilder<Void> resultSink;
if (input != null) {
}
/**
+ * Creates a single rpc result of type Void honoring all partial rpc results.
+ *
* @param actionDescription description for case when the triggering future contains failure
* @param <D> type of rpc output (gathered in list)
* @return single rpc result of type Void honoring all partial rpc results
*/
- public static <D> Function<RpcResult<D>, RpcResult<Void>> createRpcResultToVoidFunction(final String actionDescription) {
+ public static <D> Function<RpcResult<D>, RpcResult<Void>> createRpcResultToVoidFunction(
+ final String actionDescription) {
return input -> {
final RpcResultBuilder<Void> resultSink;
if (input != null) {
}
/**
- * @param nodeIdent flow capable node path - target device for routed rpc
+ * Flushes a chain barrier.
+ *
+ * @param nodeIdent flow capable node path - target device for routed rpc
* @param flowCapableTransactionService barrier rpc service
* @return async barrier result
*/
}
/**
- * @param nodeId target node
+ * Returns a list of safe synchronization steps with updates.
+ *
+ * @param nodeId target node
* @param installedGroupsArg groups resent on device
* @param pendingGroups groups configured for device
* @return list of safe synchronization steps with updates
}
/**
+ * Returns a list of safe synchronization steps.
+ *
* @param nodeId target node
* @param installedGroupsArg groups resent on device
* @param pendingGroups groups configured for device
} else {
if (checkGroupPrecondition(installedGroups.keySet(), group)) {
iterator.remove();
- LOG.trace("Group {} on device {} differs - planned for update", group.getGroupId(), nodeId);
- stepPlan.getItemsToUpdate().add(new ItemSyncBox.ItemUpdateTuple<>(existingGroup, group));
+ LOG.trace("Group {} on device {} differs - planned for update", group.getGroupId(),
+ nodeId);
+ stepPlan.getItemsToUpdate().add(new ItemSyncBox.ItemUpdateTuple<>(existingGroup,
+ group));
}
}
}
installedGroups.putAll(installIncrement);
plan.add(stepPlan);
} else if (!pendingGroups.isEmpty()) {
- LOG.warn("Failed to resolve and divide groups into preconditions-match based ordered plan: {}, " +
- "resolving stuck at level {}", nodeId.getValue(), plan.size());
+ LOG.warn("Failed to resolve and divide groups into preconditions-match based ordered plan: {}, "
+ + "resolving stuck at level {}", nodeId.getValue(), plan.size());
throw new IllegalStateException("Failed to resolve and divide groups when matching preconditions");
}
}
for (Action action : bucket.getAction()) {
// if the output action is a group
if (GroupActionCase.class.equals(action.getAction().getImplementedInterface())) {
- Long groupId = ((GroupActionCase) (action.getAction())).getGroupAction().getGroupId();
+ Long groupId = ((GroupActionCase) action.getAction()).getGroupAction().getGroupId();
// see if that output group is installed
if (!installedGroupIds.contains(groupId)) {
// if not installed, we have missing dependencies and cannot install this pending group
}
/**
+ * Resolves meter differences.
+ *
* @param nodeId target node
* @param meterOperationalMap meters present on device
* @param metersConfigured meters configured for device
}
/**
+ * Resolves flow differences in a table.
+ *
* @param flowsConfigured flows resent on device
* @param flowOperationalMap flows configured for device
* @param gatherUpdates check content of pending item if present on device (and create update task eventually)
}
/**
+ * Resolves flow differences in all tables.
+ *
* @param nodeId target node
* @param tableOperationalMap flow-tables resent on device
* @param tablesConfigured flow-tables configured for device
* @return map : key={@link TableKey}, value={@link ItemSyncBox} of safe synchronization steps
*/
public static Map<TableKey, ItemSyncBox<Flow>> resolveFlowDiffsInAllTables(final NodeId nodeId,
- final Map<Short, Table> tableOperationalMap,
- final List<Table> tablesConfigured,
- final boolean gatherUpdates) {
+ final Map<Short, Table> tableOperationalMap, final List<Table> tablesConfigured,
+ final boolean gatherUpdates) {
LOG.trace("resolving flows in tables for {}", nodeId.getValue());
final Map<TableKey, ItemSyncBox<Flow>> tableFlowSyncBoxes = new HashMap<>();
for (final Table tableConfigured : tablesConfigured) {
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
/**
- * Data entry of before and after data for syncup in {@link org.opendaylight.openflowplugin.applications.frsync.SyncReactor}.
+ * Data entry of before and after data for syncup in
+ * {@link org.opendaylight.openflowplugin.applications.frsync.SyncReactor}.
*/
public class SyncupEntry {
private final FlowCapableNode after;
}
@Override
- public boolean equals(Object o) {
- if (this == o) {
+ public boolean equals(Object obj) {
+ if (this == obj) {
return true;
}
- if (o == null || getClass() != o.getClass()) {
+ if (obj == null || getClass() != obj.getClass()) {
return false;
}
- SyncupEntry that = (SyncupEntry) o;
+ SyncupEntry that = (SyncupEntry) obj;
if (after != null ? !after.equals(that.after) : that.after != null) {
return false;
* Provides create methods for dataObjects involved in
* {@link org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener} by inventory.
*/
-public class DSInputFactory {
+public final class DSInputFactory {
+ private DSInputFactory() {
+ }
+
public static Group createGroup(final long groupIdValue) {
final Buckets buckets = new BucketsBuilder()
.setBucket(Collections.<Bucket>emptyList())
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
-import org.mockito.stubbing.Answer;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
@Before
public void setUp() throws Exception {
Mockito.when(rpcRegistry.getRpcService(Matchers.<Class<? extends RpcService>>any()))
- .thenAnswer(new Answer<RpcService>() {
- @Override
- public RpcService answer(final InvocationOnMock invocation) throws Throwable {
- Class<? extends RpcService> serviceType = (Class<? extends RpcService>) invocation.getArguments()[0];
- return Mockito.mock(serviceType);
- }
+ .thenAnswer(invocation -> {
+ Class<? extends RpcService> serviceType =
+ (Class<? extends RpcService>) invocation.getArguments()[0];
+ return Mockito.mock(serviceType);
});
provider = new ForwardingRulesSyncProvider(broker, dataBroker, rpcRegistry, clusterSingletonService);
provider.close();
}
-}
\ No newline at end of file
+}
public void testOnDataTreeChangedAdd() {
Mockito.when(configModification.getDataBefore()).thenReturn(null);
Mockito.when(configModification.getDataAfter()).thenReturn(dataAfter);
- final SyncupEntry syncupEntry = loadOperationalDSAndPrepareSyncupEntry(dataAfter, confgDS, dataBefore, operationalDS);
+ final SyncupEntry syncupEntry =
+ loadOperationalDSAndPrepareSyncupEntry(dataAfter, confgDS, dataBefore, operationalDS);
nodeListenerConfig.onDataTreeChanged(Collections.singleton(dataTreeModification));
@Test
public void testOnDataTreeChangedSkip() {
- Mockito.when(roTx.read(LogicalDatastoreType.OPERATIONAL, fcNodePath)).
- thenReturn(Futures.immediateCheckedFuture(Optional.absent()));
+ Mockito.when(roTx.read(LogicalDatastoreType.OPERATIONAL, fcNodePath))
+ .thenReturn(Futures.immediateCheckedFuture(Optional.absent()));
nodeListenerConfig.onDataTreeChanged(Collections.singleton(dataTreeModification));
Mockito.verify(roTx).close();
}
- private SyncupEntry loadOperationalDSAndPrepareSyncupEntry(final FlowCapableNode after, final LogicalDatastoreType dsTypeAfter,
- final FlowCapableNode before, final LogicalDatastoreType dsTypeBefore) {
+ private SyncupEntry loadOperationalDSAndPrepareSyncupEntry(final FlowCapableNode after,
+ final LogicalDatastoreType dsTypeAfter, final FlowCapableNode before,
+ final LogicalDatastoreType dsTypeBefore) {
Mockito.when(roTx.read(LogicalDatastoreType.OPERATIONAL, fcNodePath))
.thenReturn(Futures.immediateCheckedFuture(Optional.of(dataBefore)));
final SyncupEntry syncupEntry = new SyncupEntry(after, dsTypeAfter, before, dsTypeBefore);
return syncupEntry;
}
-}
\ No newline at end of file
+}
private SimplifiedOperationalListener nodeListenerOperational;
private final LogicalDatastoreType configDS = LogicalDatastoreType.CONFIGURATION;
private final LogicalDatastoreType operationalDS = LogicalDatastoreType.OPERATIONAL;
- private final SimpleDateFormat simpleDateFormat = new SimpleDateFormat(SimplifiedOperationalListener.DATE_AND_TIME_FORMAT);
+ private final SimpleDateFormat simpleDateFormat =
+ new SimpleDateFormat(SimplifiedOperationalListener.DATE_AND_TIME_FORMAT);
@Mock
private SyncReactor reactor;
final FlowCapableNodeDao configDao = new FlowCapableNodeCachedDao(configSnapshot,
new FlowCapableNodeOdlDao(db, LogicalDatastoreType.CONFIGURATION));
- nodeListenerOperational = new SimplifiedOperationalListener(reactor, operationalSnapshot, configDao, reconciliationRegistry, deviceMastershipManager);
- InstanceIdentifier<Node> nodePath = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(NODE_ID));
+ nodeListenerOperational = new SimplifiedOperationalListener(reactor, operationalSnapshot, configDao,
+ reconciliationRegistry, deviceMastershipManager);
+ InstanceIdentifier<Node> nodePath = InstanceIdentifier.create(Nodes.class).child(Node.class,
+ new NodeKey(NODE_ID));
fcNodePath = nodePath.augmentation(FlowCapableNode.class);
final DataTreeIdentifier<Node> dataTreeIdentifier =
public void testOnDataTreeChangedReconcileButStaticsGatheringNotFinished() {
Mockito.when(reconciliationRegistry.isRegistered(NODE_ID)).thenReturn(true);
operationalUpdate();
- Mockito.when(operationalNode.getAugmentation(FlowCapableStatisticsGatheringStatus.class)).thenReturn(statisticsGatheringStatus);
+ Mockito.when(operationalNode.getAugmentation(FlowCapableStatisticsGatheringStatus.class))
+ .thenReturn(statisticsGatheringStatus);
Mockito.when(statisticsGatheringStatus.getSnapshotGatheringStatusEnd()).thenReturn(null);
nodeListenerOperational.onDataTreeChanged(Collections.singleton(dataTreeModification));
public void testOnDataTreeChangedReconcileButStaticsGatheringNotSuccessful() {
Mockito.when(reconciliationRegistry.isRegistered(NODE_ID)).thenReturn(true);
operationalUpdate();
- Mockito.when(operationalNode.getAugmentation(FlowCapableStatisticsGatheringStatus.class)).thenReturn(statisticsGatheringStatus);
+ Mockito.when(operationalNode.getAugmentation(FlowCapableStatisticsGatheringStatus.class))
+ .thenReturn(statisticsGatheringStatus);
Mockito.when(statisticsGatheringStatus.getSnapshotGatheringStatusEnd()).thenReturn(snapshotGatheringStatusEnd);
Mockito.when(snapshotGatheringStatusEnd.isSucceeded()).thenReturn(false);
Mockito.when(reconciliationRegistry.isRegistered(NODE_ID)).thenReturn(true);
operationalUpdate();
prepareFreshOperational(true);
- final SyncupEntry syncupEntry = loadConfigDSAndPrepareSyncupEntry(configNode, configDS, fcOperationalNode, operationalDS);
+ final SyncupEntry syncupEntry = loadConfigDSAndPrepareSyncupEntry(
+ configNode, configDS, fcOperationalNode, operationalDS);
nodeListenerOperational.onDataTreeChanged(Collections.singleton(dataTreeModification));
Mockito.when(reconciliationRegistry.isRegistered(NODE_ID)).thenReturn(true);
operationalAdd();
prepareFreshOperational(false);
- final SyncupEntry syncupEntry = loadConfigDSAndPrepareSyncupEntry(configNode, configDS, fcOperationalNode, operationalDS);
+ final SyncupEntry syncupEntry = loadConfigDSAndPrepareSyncupEntry(
+ configNode, configDS, fcOperationalNode, operationalDS);
nodeListenerOperational.onDataTreeChanged(Collections.singleton(dataTreeModification));
}
private void prepareFreshOperational(final boolean afterRegistration) throws ParseException {
- Mockito.when(operationalNode.getAugmentation(FlowCapableStatisticsGatheringStatus.class)).thenReturn(statisticsGatheringStatus);
+ Mockito.when(operationalNode.getAugmentation(FlowCapableStatisticsGatheringStatus.class))
+ .thenReturn(statisticsGatheringStatus);
Mockito.when(statisticsGatheringStatus.getSnapshotGatheringStatusEnd()).thenReturn(snapshotGatheringStatusEnd);
Mockito.when(snapshotGatheringStatusEnd.isSucceeded()).thenReturn(true);
Mockito.when(snapshotGatheringStatusEnd.getEnd()).thenReturn(Mockito.mock(DateAndTime.class));
final String timestampAfter = "9999-12-12T01:01:01.000-07:00";
if (afterRegistration) {
Mockito.when(snapshotGatheringStatusEnd.getEnd().getValue()).thenReturn(timestampAfter);
- Mockito.when(reconciliationRegistry.getRegistrationTimestamp(NODE_ID)).thenReturn(simpleDateFormat.parse(timestampBefore));
+ Mockito.when(reconciliationRegistry.getRegistrationTimestamp(NODE_ID))
+ .thenReturn(simpleDateFormat.parse(timestampBefore));
} else {
Mockito.when(snapshotGatheringStatusEnd.getEnd().getValue()).thenReturn(timestampBefore);
- Mockito.when(reconciliationRegistry.getRegistrationTimestamp(NODE_ID)).thenReturn(simpleDateFormat.parse(timestampAfter));
+ Mockito.when(reconciliationRegistry.getRegistrationTimestamp(NODE_ID))
+ .thenReturn(simpleDateFormat.parse(timestampAfter));
}
}
Mockito.when(operationalModification.getDataAfter()).thenReturn(operationalNode);
}
- private SyncupEntry loadConfigDSAndPrepareSyncupEntry(final FlowCapableNode after, final LogicalDatastoreType dsTypeAfter,
- final FlowCapableNode before, final LogicalDatastoreType dsTypeBefore) {
+ private SyncupEntry loadConfigDSAndPrepareSyncupEntry(final FlowCapableNode after,
+ final LogicalDatastoreType dsTypeAfter, final FlowCapableNode before,
+ final LogicalDatastoreType dsTypeBefore) {
Mockito.when(roTx.read(LogicalDatastoreType.CONFIGURATION, fcNodePath))
.thenReturn(Futures.immediateCheckedFuture(Optional.of(configNode)));
final SyncupEntry syncupEntry = new SyncupEntry(after, dsTypeAfter, before, dsTypeBefore);
public void setUp() {
reactor = new SyncReactorClusterDecorator(delegate, deviceMastershipManager);
- InstanceIdentifier<Node> nodePath = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(NODE_ID));
+ InstanceIdentifier<Node> nodePath = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(NODE_ID));
fcNodePath = nodePath.augmentation(FlowCapableNode.class);
}
Mockito.verifyZeroInteractions(delegate);
}
-}
\ No newline at end of file
+}
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
-import org.mockito.stubbing.Answer;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.openflowplugin.applications.frsync.SyncReactor;
import org.opendaylight.openflowplugin.applications.frsync.util.SyncupEntry;
final ExecutorService executorService = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder()
.setDaemon(false)
.setNameFormat("frsync-test-%d")
- .setUncaughtExceptionHandler((thread, e) -> LOG.error("Uncaught exception {}", thread, e))
+ .setUncaughtExceptionHandler((thread, ex) -> LOG.error("Uncaught exception {}", thread, ex))
.build());
syncThreadPool = MoreExecutors.listeningDecorator(executorService);
reactor = new SyncReactorFutureZipDecorator(delegate, syncThreadPool);
final List<ListenableFuture<Boolean>> allResults = new ArrayList<>();
Mockito.when(delegate.syncup(Matchers.<InstanceIdentifier<FlowCapableNode>>any(), Mockito.eq(first)))
- .thenAnswer(new Answer<ListenableFuture<Boolean>>() {
- @Override
- public ListenableFuture<Boolean> answer(final InvocationOnMock invocationOnMock) throws Throwable {
- LOG.info("unlocking next configs");
- latchForNext.countDown();
- latchForFirst.await();
- LOG.info("unlocking first delegate");
- return Futures.immediateFuture(Boolean.TRUE);
- }
+ .thenAnswer(invocationOnMock -> {
+ LOG.info("unlocking next configs");
+ latchForNext.countDown();
+ latchForFirst.await();
+ LOG.info("unlocking first delegate");
+ return Futures.immediateFuture(Boolean.TRUE);
});
allResults.add(reactor.syncup(fcNodePath, first));
final SyncupEntry second = new SyncupEntry(dataAfter, configDS, dataBefore, configDS);
Mockito.when(delegate.syncup(Matchers.<InstanceIdentifier<FlowCapableNode>>any(), Mockito.eq(first)))
- .thenAnswer(new Answer<ListenableFuture<Boolean>>() {
- @Override
- public ListenableFuture<Boolean> answer(final InvocationOnMock invocationOnMock) throws Throwable {
- LOG.info("unlocking next config");
- latchForNext.countDown();
- return Futures.immediateFuture(Boolean.TRUE);
- }
- });
+ .thenAnswer(invocationOnMock -> {
+ LOG.info("unlocking next config");
+ latchForNext.countDown();
+ return Futures.immediateFuture(Boolean.TRUE);
+ });
reactor.syncup(fcNodePath, first);
latchForNext.await();
final SyncupEntry second = new SyncupEntry(configActual, configDS, freshOperational, operationalDS);
Mockito.when(delegate.syncup(Matchers.<InstanceIdentifier<FlowCapableNode>>any(), Mockito.eq(first)))
- .thenAnswer(new Answer<ListenableFuture<Boolean>>() {
- @Override
- public ListenableFuture<Boolean> answer(final InvocationOnMock invocationOnMock) throws Throwable {
- LOG.info("unlocking for fresh operational");
- latchForNext.countDown();
- latchForFirst.await();
- LOG.info("unlocking first delegate");
- return Futures.immediateFuture(Boolean.TRUE);
- }
- });
+ .thenAnswer(invocationOnMock -> {
+ LOG.info("unlocking for fresh operational");
+ latchForNext.countDown();
+ latchForFirst.await();
+ LOG.info("unlocking first delegate");
+ return Futures.immediateFuture(Boolean.TRUE);
+ });
reactor.syncup(fcNodePath, first);
latchForNext.await();
public void tearDown() {
syncThreadPool.shutdownNow();
}
-}
\ No newline at end of file
+}
@Before
public void setUp() throws Exception {
reactor = new SyncReactorGuardDecorator(delegate);
- InstanceIdentifier<Node> nodePath = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(NODE_ID));
+ InstanceIdentifier<Node> nodePath = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(NODE_ID));
fcNodePath = nodePath.augmentation(FlowCapableNode.class);
final Node operationalNode = Mockito.mock(Node.class);
Mockito.verify(delegate).syncup(fcNodePath, syncupEntry);
Mockito.verifyNoMoreInteractions(delegate);
}
-}
\ No newline at end of file
+}
.thenReturn(RpcResultBuilder.<Void>success().buildFuture());
final ListenableFuture<Boolean> syncupResult = reactor.syncup(NODE_IDENT, syncupEntry);
- try {
- Assert.assertTrue(syncupResult.isDone());
- final Boolean voidRpcResult = syncupResult.get(2, TimeUnit.SECONDS);
- Assert.assertTrue(voidRpcResult);
-
- Mockito.verify(syncPlanPushStrategy).executeSyncStrategy(
- Matchers.<ListenableFuture<RpcResult<Void>>>any(),
- syncDiffInputCaptor.capture(),
- Matchers.<SyncCrudCounters>any()
- );
-
- final SynchronizationDiffInput diffInput = syncDiffInputCaptor.getValue();
- Assert.assertEquals(1, ReconcileUtil.countTotalPushed(diffInput.getFlowsToAddOrUpdate().values()));
- Assert.assertEquals(0, ReconcileUtil.countTotalUpdated(diffInput.getFlowsToAddOrUpdate().values()));
- Assert.assertEquals(1, ReconcileUtil.countTotalPushed(diffInput.getFlowsToRemove().values()));
-
- Assert.assertEquals(1, ReconcileUtil.countTotalPushed(diffInput.getGroupsToAddOrUpdate()));
- Assert.assertEquals(0, ReconcileUtil.countTotalUpdated(diffInput.getGroupsToAddOrUpdate()));
- Assert.assertEquals(1, ReconcileUtil.countTotalPushed(diffInput.getGroupsToRemove()));
-
- Assert.assertEquals(1, diffInput.getMetersToAddOrUpdate().getItemsToPush().size());
- Assert.assertEquals(0, diffInput.getMetersToAddOrUpdate().getItemsToUpdate().size());
- Assert.assertEquals(1, diffInput.getMetersToRemove().getItemsToPush().size());
- } catch (Exception e) {
- LOG.warn("syncup failed", e);
- Assert.fail("syncup failed: " + e.getMessage());
- }
+ Assert.assertTrue(syncupResult.isDone());
+ final Boolean voidRpcResult = syncupResult.get(2, TimeUnit.SECONDS);
+ Assert.assertTrue(voidRpcResult);
+
+ Mockito.verify(syncPlanPushStrategy).executeSyncStrategy(
+ Matchers.<ListenableFuture<RpcResult<Void>>>any(),
+ syncDiffInputCaptor.capture(),
+ Matchers.<SyncCrudCounters>any());
+
+ final SynchronizationDiffInput diffInput = syncDiffInputCaptor.getValue();
+ Assert.assertEquals(1, ReconcileUtil.countTotalPushed(diffInput.getFlowsToAddOrUpdate().values()));
+ Assert.assertEquals(0, ReconcileUtil.countTotalUpdated(diffInput.getFlowsToAddOrUpdate().values()));
+ Assert.assertEquals(1, ReconcileUtil.countTotalPushed(diffInput.getFlowsToRemove().values()));
+
+ Assert.assertEquals(1, ReconcileUtil.countTotalPushed(diffInput.getGroupsToAddOrUpdate()));
+ Assert.assertEquals(0, ReconcileUtil.countTotalUpdated(diffInput.getGroupsToAddOrUpdate()));
+ Assert.assertEquals(1, ReconcileUtil.countTotalPushed(diffInput.getGroupsToRemove()));
+
+ Assert.assertEquals(1, diffInput.getMetersToAddOrUpdate().getItemsToPush().size());
+ Assert.assertEquals(0, diffInput.getMetersToAddOrUpdate().getItemsToUpdate().size());
+ Assert.assertEquals(1, diffInput.getMetersToRemove().getItemsToPush().size());
}
-}
\ No newline at end of file
+}
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
/**
- * Test for {@link SyncReactorRetryDecorator}
+ * Test for {@link SyncReactorRetryDecorator}.
*/
@RunWith(MockitoJUnitRunner.class)
public class SyncReactorRetryDecoratorTest {
@Before
public void setUp() {
reactor = new SyncReactorRetryDecorator(delegate, reconciliationRegistry);
- InstanceIdentifier<Node> nodePath = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(NODE_ID));
+ InstanceIdentifier<Node> nodePath = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(NODE_ID));
fcNodePath = nodePath.augmentation(FlowCapableNode.class);
}
Mockito.verifyZeroInteractions(delegate);
}
-}
\ No newline at end of file
+}
@Before
public void setUp() throws Exception {
- deviceMastership = new DeviceMastership(NODE_ID, reconciliationRegistry, Mockito.mock(ClusterSingletonServiceProvider.class));
+ deviceMastership = new DeviceMastership(NODE_ID, reconciliationRegistry,
+ Mockito.mock(ClusterSingletonServiceProvider.class));
}
@Test
Assert.assertFalse(deviceMastership.isDeviceMastered());
}
-}
\ No newline at end of file
+}
/**
* Provides create methods for data involved in {@link SynchronizationDiffInput}.
*/
-public class DiffInputFactory {
+public final class DiffInputFactory {
+ private DiffInputFactory() {
+ }
+
static ItemSyncBox<Group> createGroupSyncBox(final long... groupIDs) {
final ItemSyncBox<Group> groupBox = new ItemSyncBox<>();
.setMatch(new MatchBuilder().build())
.build();
- final Future<RpcResult<UpdateFlowOutput>> updateResult = flowForwarder.update(flowPath, flow, flowUpdated, flowCapableNodePath);
+ final Future<RpcResult<UpdateFlowOutput>> updateResult = flowForwarder.update(flowPath, flow,
+ flowUpdated, flowCapableNodePath);
Mockito.verify(salFlowService).updateFlow(Matchers.<UpdateFlowInput>any());
final UpdateFlowInput updateFlowInput = updateFlowInputCpt.getValue();
.build()).buildFuture());
final Flow removeFlow = new FlowBuilder(flow).build();
- final Future<RpcResult<RemoveFlowOutput>> removeResult = flowForwarder.remove(flowPath, removeFlow, flowCapableNodePath);
+ final Future<RpcResult<RemoveFlowOutput>> removeResult = flowForwarder.remove(flowPath,
+ removeFlow, flowCapableNodePath);
Mockito.verify(salFlowService).removeFlow(Matchers.<RemoveFlowInput>any());
final RemoveFlowInput flowInput = removeFlowInputCpt.getValue();
.buildFuture()
);
- final Future<RpcResult<RemoveGroupOutput>> addResult = groupForwarder.remove(groupPath, group, flowCapableNodePath);
+ final Future<RpcResult<RemoveGroupOutput>> addResult =
+ groupForwarder.remove(groupPath, group, flowCapableNodePath);
Mockito.verify(salGroupService).removeGroup(Matchers.<RemoveGroupInput>any());
.setGroupName("another-test")
.build();
- final Future<RpcResult<UpdateGroupOutput>> addResult = groupForwarder.update(groupPath, groupOriginal, groupUpdate,
- flowCapableNodePath);
+ final Future<RpcResult<UpdateGroupOutput>> addResult =
+ groupForwarder.update(groupPath, groupOriginal, groupUpdate, flowCapableNodePath);
Mockito.verify(salGroupService).updateGroup(Matchers.<UpdateGroupInput>any());
Assert.assertNotNull(addGroupInput.getBuckets());
Assert.assertEquals("test-group", addGroupInput.getGroupName());
}
-}
\ No newline at end of file
+}
Meter removeMeter = new MeterBuilder(meter).build();
- final Future<RpcResult<RemoveMeterOutput>> removeResult = meterForwarder.remove(meterPath, removeMeter, flowCapableNodePath);
+ final Future<RpcResult<RemoveMeterOutput>> removeResult =
+ meterForwarder.remove(meterPath, removeMeter, flowCapableNodePath);
Mockito.verify(salMeterService).removeMeter(Matchers.<RemoveMeterInput>any());
Assert.assertTrue(removeResult.isDone());
.setMeterName("another-test")
.build();
- final Future<RpcResult<UpdateMeterOutput>> updateResult = meterForwarder.update(meterPath, meterOriginal, meterUpdate,
+ final Future<RpcResult<UpdateMeterOutput>> updateResult =
+ meterForwarder.update(meterPath, meterOriginal, meterUpdate,
flowCapableNodePath);
Mockito.verify(salMeterService).updateMeter(Matchers.<UpdateMeterInput>any());
Assert.assertEquals(nodePath, addMeterInput.getNode().getValue());
Assert.assertEquals("test-meter", addMeterInput.getMeterName());
}
-}
\ No newline at end of file
+}
@Test
public void testExecuteSyncStrategy() throws Exception {
final SynchronizationDiffInput diffInput = new SynchronizationDiffInput(NODE_IDENT,
- groupsToAddOrUpdate, metersToAddOrUpdate, flowsToAddOrUpdate, flowsToRemove, metersToRemove, groupsToRemove);
+ groupsToAddOrUpdate, metersToAddOrUpdate, flowsToAddOrUpdate,
+ flowsToRemove, metersToRemove, groupsToRemove);
Mockito.when(flatBatchService.processFlatBatch(Matchers.<ProcessFlatBatchInput>any()))
.thenReturn(RpcResultBuilder.success(new ProcessFlatBatchOutputBuilder().build()).buildFuture());
@Test
public void testAssembleAddOrUpdateGroups() throws Exception {
- final int lastOrder = SyncPlanPushStrategyFlatBatchImpl.assembleAddOrUpdateGroups(batchBag, 0, groupsToAddOrUpdate);
+ final int lastOrder = SyncPlanPushStrategyFlatBatchImpl.assembleAddOrUpdateGroups(
+ batchBag, 0, groupsToAddOrUpdate);
Assert.assertEquals(9, lastOrder);
Assert.assertEquals(3, batchBag.size());
@Test
public void testAssembleAddOrUpdateMeters() throws Exception {
- final int lastOrder = SyncPlanPushStrategyFlatBatchImpl.assembleAddOrUpdateMeters(batchBag, 0, metersToAddOrUpdate);
+ final int lastOrder = SyncPlanPushStrategyFlatBatchImpl.assembleAddOrUpdateMeters(
+ batchBag, 0, metersToAddOrUpdate);
Assert.assertEquals(6, lastOrder);
Assert.assertEquals(2, batchBag.size());
@Test
public void testAssembleAddOrUpdateFlows() throws Exception {
- final int lastOrder = SyncPlanPushStrategyFlatBatchImpl.assembleAddOrUpdateFlows(batchBag, 0, flowsToAddOrUpdate);
+ final int lastOrder = SyncPlanPushStrategyFlatBatchImpl.assembleAddOrUpdateFlows(
+ batchBag, 0, flowsToAddOrUpdate);
Assert.assertEquals(9, lastOrder);
Assert.assertEquals(3, batchBag.size());
new BatchBuilder().setBatchOrder(9).build(),
new BatchBuilder().setBatchOrder(15).build()
);
- final Map<Range<Integer>, Batch> rangeBatchMap = SyncPlanPushStrategyFlatBatchImpl.mapBatchesToRanges(inputBatchBag, 42);
+ final Map<Range<Integer>, Batch> rangeBatchMap =
+ SyncPlanPushStrategyFlatBatchImpl.mapBatchesToRanges(inputBatchBag, 42);
Assert.assertEquals(4, rangeBatchMap.size());
int idx = 0;
idx++;
}
}
-}
\ No newline at end of file
+}
@Test
public void testExecuteSyncStrategy() throws Exception {
final SynchronizationDiffInput diffInput = new SynchronizationDiffInput(NODE_IDENT,
- groupsToAddOrUpdate, metersToAddOrUpdate, flowsToAddOrUpdate, flowsToRemove, metersToRemove, groupsToRemove);
+ groupsToAddOrUpdate, metersToAddOrUpdate, flowsToAddOrUpdate, flowsToRemove,
+ metersToRemove, groupsToRemove);
- final SyncCrudCounters counters = new SyncCrudCounters();
+ final SyncCrudCounters syncCounters = new SyncCrudCounters();
final ListenableFuture<RpcResult<Void>> rpcResult = syncPlanPushStrategy.executeSyncStrategy(
- RpcResultBuilder.<Void>success().buildFuture(), diffInput, counters);
-
- Mockito.verify(groupCommitter, Mockito.times(6)).add(Matchers.<InstanceIdentifier<Group>>any(),Matchers.<Group>any(),
- Matchers.<InstanceIdentifier<FlowCapableNode>>any());
- Mockito.verify(groupCommitter, Mockito.times(3)).update(Matchers.<InstanceIdentifier<Group>>any(),Matchers.<Group>any(),
- Matchers.<Group>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
- Mockito.verify(groupCommitter, Mockito.times(6)).remove(Matchers.<InstanceIdentifier<Group>>any(),Matchers.<Group>any(),
- Matchers.<InstanceIdentifier<FlowCapableNode>>any());
- Mockito.verify(flowCommitter, Mockito.times(6)).add(Matchers.<InstanceIdentifier<Flow>>any(),Matchers.<Flow>any(),
- Matchers.<InstanceIdentifier<FlowCapableNode>>any());
- Mockito.verify(flowCommitter, Mockito.times(3)).update(Matchers.<InstanceIdentifier<Flow>>any(),Matchers.<Flow>any(),
- Matchers.<Flow>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
- Mockito.verify(flowCommitter, Mockito.times(6)).remove(Matchers.<InstanceIdentifier<Flow>>any(),Matchers.<Flow>any(),
- Matchers.<InstanceIdentifier<FlowCapableNode>>any());
- Mockito.verify(meterCommitter, Mockito.times(3)).add(Matchers.<InstanceIdentifier<Meter>>any(), Matchers.<Meter>any(),
- Matchers.<InstanceIdentifier<FlowCapableNode>>any());
- Mockito.verify(meterCommitter, Mockito.times(3)).update(Matchers.<InstanceIdentifier<Meter>>any(), Matchers.<Meter>any(),
+ RpcResultBuilder.<Void>success().buildFuture(), diffInput, syncCounters);
+
+ Mockito.verify(groupCommitter, Mockito.times(6)).add(Matchers.<InstanceIdentifier<Group>>any(),
+ Matchers.<Group>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
+ Mockito.verify(groupCommitter, Mockito.times(3)).update(Matchers.<InstanceIdentifier<Group>>any(),
+ Matchers.<Group>any(), Matchers.<Group>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
+ Mockito.verify(groupCommitter, Mockito.times(6)).remove(Matchers.<InstanceIdentifier<Group>>any(),
+ Matchers.<Group>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
+ Mockito.verify(flowCommitter, Mockito.times(6)).add(Matchers.<InstanceIdentifier<Flow>>any(),
+ Matchers.<Flow>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
+ Mockito.verify(flowCommitter, Mockito.times(3)).update(Matchers.<InstanceIdentifier<Flow>>any(),
+ Matchers.<Flow>any(), Matchers.<Flow>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
+ Mockito.verify(flowCommitter, Mockito.times(6)).remove(Matchers.<InstanceIdentifier<Flow>>any(),
+ Matchers.<Flow>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
+ Mockito.verify(meterCommitter, Mockito.times(3)).add(Matchers.<InstanceIdentifier<Meter>>any(),
+ Matchers.<Meter>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
+ Mockito.verify(meterCommitter, Mockito.times(3)).update(Matchers.<InstanceIdentifier<Meter>>any(),
+ Matchers.<Meter>any(), Matchers.<Meter>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
+ Mockito.verify(meterCommitter, Mockito.times(3)).remove(Matchers.<InstanceIdentifier<Meter>>any(),
Matchers.<Meter>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
- Mockito.verify(meterCommitter, Mockito.times(3)).remove(Matchers.<InstanceIdentifier<Meter>>any(), Matchers.<Meter>any(),
- Matchers.<InstanceIdentifier<FlowCapableNode>>any());
Assert.assertTrue(rpcResult.isDone());
Assert.assertTrue(rpcResult.get().isSuccessful());
- Assert.assertEquals(6, counters.getFlowCrudCounts().getAdded());
- Assert.assertEquals(3, counters.getFlowCrudCounts().getUpdated());
- Assert.assertEquals(6, counters.getFlowCrudCounts().getRemoved());
+ Assert.assertEquals(6, syncCounters.getFlowCrudCounts().getAdded());
+ Assert.assertEquals(3, syncCounters.getFlowCrudCounts().getUpdated());
+ Assert.assertEquals(6, syncCounters.getFlowCrudCounts().getRemoved());
- Assert.assertEquals(6, counters.getGroupCrudCounts().getAdded());
- Assert.assertEquals(3, counters.getGroupCrudCounts().getUpdated());
- Assert.assertEquals(6, counters.getGroupCrudCounts().getRemoved());
+ Assert.assertEquals(6, syncCounters.getGroupCrudCounts().getAdded());
+ Assert.assertEquals(3, syncCounters.getGroupCrudCounts().getUpdated());
+ Assert.assertEquals(6, syncCounters.getGroupCrudCounts().getRemoved());
- Assert.assertEquals(3, counters.getMeterCrudCounts().getAdded());
- Assert.assertEquals(3, counters.getMeterCrudCounts().getUpdated());
- Assert.assertEquals(3, counters.getMeterCrudCounts().getRemoved());
+ Assert.assertEquals(3, syncCounters.getMeterCrudCounts().getAdded());
+ Assert.assertEquals(3, syncCounters.getMeterCrudCounts().getUpdated());
+ Assert.assertEquals(3, syncCounters.getMeterCrudCounts().getRemoved());
}
@Before
Matchers.<InstanceIdentifier<FlowCapableNode>>any());
Mockito.doAnswer(createSalServiceFutureAnswer()).when(tableCommitter).update(
- Matchers.<InstanceIdentifier<TableFeatures>>any(), Matchers.<TableFeatures>any(), Matchers.<TableFeatures>any(),
- Matchers.<InstanceIdentifier<FlowCapableNode>>any());
+ Matchers.<InstanceIdentifier<TableFeatures>>any(), Matchers.<TableFeatures>any(),
+ Matchers.<TableFeatures>any(), Matchers.<InstanceIdentifier<FlowCapableNode>>any());
syncPlanPushStrategy = new SyncPlanPushStrategyIncrementalImpl()
.setMeterForwarder(meterCommitter)
Mockito.verify(flowCapableTxService).sendBarrier(Matchers.<SendBarrierInput>any());
}
-}
\ No newline at end of file
+}
public class ReconcileUtilTest {
private static final NodeId NODE_ID = new NodeId("unit-node-id");
- private final InstanceIdentifier<Node> NODE_IDENT = InstanceIdentifier.create(Nodes.class)
+ private static final InstanceIdentifier<Node> NODE_IDENT = InstanceIdentifier.create(Nodes.class)
.child(Node.class, new NodeKey(NODE_ID));
private static final Splitter COMMA_SPLITTER = Splitter.on(",");
}
/**
- * add one missing group
- *
- * @throws Exception
+ * add one missing group.
*/
@Test
public void testResolveAndDivideGroupDiffs1() throws Exception {
Assert.assertEquals(1, plan.size());
Assert.assertEquals(1, plan.get(0).getItemsToPush().size());
- Assert.assertEquals(4L, plan.get(0).getItemsToPush().iterator().next().getKey().getGroupId().getValue().longValue());
+ Assert.assertEquals(4L, plan.get(0).getItemsToPush().iterator().next().getKey()
+ .getGroupId().getValue().longValue());
Assert.assertEquals(0, plan.get(0).getItemsToUpdate().size());
}
/**
- * add 3 groups with dependencies - 3 steps involved
- *
- * @throws Exception
+ * add 3 groups with dependencies - 3 steps involved.
*/
@Test
public void testResolveAndDivideGroupDiffs2() throws Exception {
Assert.assertEquals(3, plan.size());
Assert.assertEquals(1, plan.get(0).getItemsToPush().size());
- Assert.assertEquals(2L, plan.get(0).getItemsToPush().iterator().next().getKey().getGroupId().getValue().longValue());
+ Assert.assertEquals(2L, plan.get(0).getItemsToPush().iterator().next().getKey()
+ .getGroupId().getValue().longValue());
Assert.assertEquals(0, plan.get(0).getItemsToUpdate().size());
Assert.assertEquals(1, plan.get(1).getItemsToPush().size());
- Assert.assertEquals(4L, plan.get(1).getItemsToPush().iterator().next().getKey().getGroupId().getValue().longValue());
+ Assert.assertEquals(4L, plan.get(1).getItemsToPush().iterator().next().getKey()
+ .getGroupId().getValue().longValue());
Assert.assertEquals(0, plan.get(1).getItemsToUpdate().size());
Assert.assertEquals(1, plan.get(2).getItemsToPush().size());
- Assert.assertEquals(3L, plan.get(2).getItemsToPush().iterator().next().getKey().getGroupId().getValue().longValue());
+ Assert.assertEquals(3L, plan.get(2).getItemsToPush().iterator().next().getKey()
+ .getGroupId().getValue().longValue());
Assert.assertEquals(0, plan.get(2).getItemsToUpdate().size());
}
/**
- * no actions taken - installed and pending groups are the same
- *
- * @throws Exception
+ * no actions taken - installed and pending groups are the same.
*/
@Test
public void testResolveAndDivideGroupDiffs3() throws Exception {
}
/**
- * update 1 group
- *
- * @throws Exception
+ * update 1 group.
*/
@Test
public void testResolveAndDivideGroupDiffs4() throws Exception {
Assert.assertEquals(1, plan.size());
Assert.assertEquals(0, plan.get(0).getItemsToPush().size());
Assert.assertEquals(1, plan.get(0).getItemsToUpdate().size());
- final ItemSyncBox.ItemUpdateTuple<Group> firstItemUpdateTuple = plan.get(0).getItemsToUpdate().iterator().next();
+ final ItemSyncBox.ItemUpdateTuple<Group> firstItemUpdateTuple =
+ plan.get(0).getItemsToUpdate().iterator().next();
Assert.assertEquals(1L, firstItemUpdateTuple.getOriginal().getGroupId().getValue().longValue());
Assert.assertEquals(1L, firstItemUpdateTuple.getUpdated().getGroupId().getValue().longValue());
}
/**
- * no action taken - update 1 group will be ignored
- *
- * @throws Exception
+ * no action taken - update 1 group will be ignored.
*/
@Test
public void testResolveAndDivideGroupDiffs5() throws Exception {
}
/**
- * should add 1 group but preconditions are not met
- *
- * @throws Exception
+ * should add 1 group but preconditions are not met.
*/
@Test
public void testResolveAndDivideGroupDiffs_negative1() throws Exception {
}
/**
- * should update 1 group but preconditions are not met
- *
- * @throws Exception
+ * should update 1 group but preconditions are not met.
*/
@Test
public void testResolveAndDivideGroupDiffs_negative2() throws Exception {
}
/**
- * covers {@link ReconcileUtil#countTotalUpdated(Iterable)} too
- *
- * @throws Exception
+ * covers {@link ReconcileUtil#countTotalUpdated(Iterable)} too.
*/
@Test
public void testCountTotalAdds() throws Exception {
}
return syncBox1;
}
-}
\ No newline at end of file
+}
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>()) {
@Override
- protected void afterExecute(final Runnable r, final Throwable t) {
- super.afterExecute(r, t);
- if (t != null) {
- LOG.error("pool thread crashed", t);
+ protected void afterExecute(final Runnable task, final Throwable failure) {
+ super.afterExecute(task, failure);
+ if (failure != null) {
+ LOG.error("pool thread crashed", failure);
}
}
};
private final ConcurrentMap<Integer, Integer> counter = new ConcurrentHashMap<>();
private volatile int index = 0;
- public Worker(SemaphoreKeeper<String> keeper, final String key) {
+ Worker(SemaphoreKeeper<String> keeper, final String key) {
this.keeper = keeper;
this.key = key;
}
@Override
+ @SuppressWarnings("checkstyle:IllegalCatch")
public void run() {
try {
final Semaphore guard = keeper.summonGuard(key);
return counter.size();
}
}
-}
\ No newline at end of file
+}
package org.opendaylight.openflowplugin.openflow.ofswitch.config;
import java.util.Collection;
-import java.util.concurrent.Callable;
import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
this.dataBroker = dataBroker;
}
+ @SuppressWarnings("checkstyle:IllegalCatch")
public void start() {
try {
- final InstanceIdentifier<FlowCapableNode> path = InstanceIdentifier.create(Nodes.class).child(Node.class).augmentation(FlowCapableNode.class);
- final DataTreeIdentifier<FlowCapableNode> identifier = new DataTreeIdentifier(LogicalDatastoreType.OPERATIONAL, path);
+ final InstanceIdentifier<FlowCapableNode> path = InstanceIdentifier.create(Nodes.class).child(Node.class)
+ .augmentation(FlowCapableNode.class);
+ final DataTreeIdentifier<FlowCapableNode> identifier = new DataTreeIdentifier<>(
+ LogicalDatastoreType.OPERATIONAL, path);
final SimpleTaskRetryLooper looper = new SimpleTaskRetryLooper(STARTUP_LOOP_TICK, STARTUP_LOOP_MAX_RETRIES);
- listenerRegistration = looper.loopUntilNoException(new Callable<ListenerRegistration<DataTreeChangeListener>>() {
- @Override
- public ListenerRegistration<DataTreeChangeListener> call() throws Exception {
- return dataBroker.registerDataTreeChangeListener(identifier, DefaultConfigPusher.this);
- }
- });
+ listenerRegistration = looper.loopUntilNoException(
+ () -> dataBroker.registerDataTreeChangeListener(identifier, DefaultConfigPusher.this));
} catch (Exception e) {
LOG.error("DataTreeChangeListener registration failed: {}", e);
throw new IllegalStateException("DefaultConfigPusher startup failed!", e);
@Override
public void close() {
- if(listenerRegistration != null) {
+ if (listenerRegistration != null) {
listenerRegistration.close();
}
}
@Override
public void onDataTreeChanged(@Nonnull final Collection<DataTreeModification<FlowCapableNode>> modifications) {
- for (DataTreeModification modification : modifications) {
+ for (DataTreeModification<FlowCapableNode> modification : modifications) {
if (modification.getRootNode().getModificationType() == ModificationType.WRITE) {
SetConfigInputBuilder setConfigInputBuilder = new SetConfigInputBuilder();
setConfigInputBuilder.setFlag(SwitchConfigFlag.FRAGNORMAL.toString());
setConfigInputBuilder.setMissSearchLength(OFConstants.OFPCML_NO_BUFFER);
- setConfigInputBuilder.setNode(new NodeRef(modification.getRootPath().getRootIdentifier().firstIdentifierOf(Node.class)));
+ setConfigInputBuilder.setNode(new NodeRef(modification.getRootPath()
+ .getRootIdentifier().firstIdentifierOf(Node.class)));
nodeConfigService.setConfig(setConfigInputBuilder.build());
}
}
@RunWith(MockitoJUnitRunner.class)
public class DefaultConfigPusherTest {
private DefaultConfigPusher defaultConfigPusher;
- private final static InstanceIdentifier<Node> nodeIID = InstanceIdentifier.create(Nodes.class)
+ private static final InstanceIdentifier<Node> NODE_IID = InstanceIdentifier.create(Nodes.class)
.child(Node.class, new NodeKey(new NodeId("testnode:1")));
@Mock
private NodeConfigService nodeConfigService;
@Before
public void setUp() throws Exception {
defaultConfigPusher = new DefaultConfigPusher(nodeConfigService, Mockito.mock(DataBroker.class));
- final DataTreeIdentifier<FlowCapableNode> identifier = new DataTreeIdentifier(LogicalDatastoreType.OPERATIONAL, nodeIID);
+ final DataTreeIdentifier<FlowCapableNode> identifier =
+ new DataTreeIdentifier(LogicalDatastoreType.OPERATIONAL, NODE_IID);
Mockito.when(dataTreeModification.getRootPath()).thenReturn(identifier);
Mockito.when(dataTreeModification.getRootNode()).thenReturn(Mockito.mock(DataObjectModification.class));
Mockito.when(dataTreeModification.getRootNode().getModificationType()).thenReturn(ModificationType.WRITE);
final SetConfigInput captured = setConfigInputCaptor.getValue();
Assert.assertEquals(SwitchConfigFlag.FRAGNORMAL.toString(), captured.getFlag());
Assert.assertEquals(OFConstants.OFPCML_NO_BUFFER, captured.getMissSearchLength());
- Assert.assertEquals(nodeIID, captured.getNode().getValue());
+ Assert.assertEquals(NODE_IID, captured.getNode().getValue());
}
@After
defaultConfigPusher.close();
}
-}
\ No newline at end of file
+}
@Override
public void onPacketReceived(PacketReceived lldp) {
NodeConnectorRef src = LLDPDiscoveryUtils.lldpToNodeConnectorRef(lldp.getPayload(), true);
- if(src != null) {
+ if (src != null) {
LinkDiscoveredBuilder ldb = new LinkDiscoveredBuilder();
ldb.setDestination(lldp.getIngress());
ldb.setSource(new NodeConnectorRef(src));
lldpLinkAger.put(ld);
}
}
-}
\ No newline at end of file
+}
private final AutoCloseable configurationServiceRegistration;
/**
- * default ctor - start timer
+ * default ctor - start timer.
*/
public LLDPLinkAger(final TopologyLldpDiscoveryConfig topologyLldpDiscoveryConfig,
final NotificationProviderService notificationService,
LinkDiscovered link = entry.getKey();
Date expires = entry.getValue();
Date now = new Date();
- if(now.after(expires)) {
+ if (now.after(expires)) {
if (notificationService != null) {
LinkRemovedBuilder lrb = new LinkRemovedBuilder(link);
notificationService.publish(lrb.build());
}
});
}
-}
\ No newline at end of file
+}
import org.opendaylight.openflowplugin.libraries.liblldp.LLDP;
import org.opendaylight.openflowplugin.libraries.liblldp.LLDPTLV;
import org.opendaylight.openflowplugin.libraries.liblldp.NetUtils;
+import org.opendaylight.openflowplugin.libraries.liblldp.PacketException;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
-public class LLDPDiscoveryUtils {
+public final class LLDPDiscoveryUtils {
private static final Logger LOG = LoggerFactory.getLogger(LLDPDiscoveryUtils.class);
private static final short MINIMUM_LLDP_SIZE = 61;
private static final short ETHERNET_TYPE_OFFSET = 12;
private static final short ETHERNET_VLAN_OFFSET = ETHERNET_TYPE_OFFSET + 4;
+ private LLDPDiscoveryUtils() {
+ }
+
public static String macToString(byte[] mac) {
- StringBuilder b = new StringBuilder();
+ StringBuilder builder = new StringBuilder();
for (int i = 0; i < mac.length; i++) {
- b.append(String.format("%02X%s", mac[i], i < mac.length - 1 ? ":" : ""));
+ builder.append(String.format("%02X%s", mac[i], i < mac.length - 1 ? ":" : ""));
}
- return b.toString();
+ return builder.toString();
}
/**
- * @param payload
+ * Returns the encoded in custom TLV for the given lldp.
+ *
+ * @param payload lldp payload
* @return nodeConnectorId - encoded in custom TLV of given lldp
* @see LLDPDiscoveryUtils#lldpToNodeConnectorRef(byte[], boolean)
*/
}
/**
- * @param payload
+ * Returns the encoded in custom TLV for the given lldp.
+ *
+ * @param payload lldp payload
* @param useExtraAuthenticatorCheck make it more secure (CVE-2015-1611 CVE-2015-1612)
* @return nodeConnectorId - encoded in custom TLV of given lldp
*/
+ @SuppressWarnings("checkstyle:IllegalCatch")
public static NodeConnectorRef lldpToNodeConnectorRef(byte[] payload, boolean useExtraAuthenticatorCheck) {
NodeConnectorRef nodeConnectorRef = null;
Ethernet ethPkt = new Ethernet();
try {
ethPkt.deserialize(payload, 0, payload.length * NetUtils.NUM_BITS_IN_A_BYTE);
- } catch (Exception e) {
+ } catch (PacketException e) {
LOG.warn("Failed to decode LLDP packet {}", e);
return nodeConnectorRef;
}
throw new Exception("Node id wasn't specified via systemNameId in LLDP packet.");
}
- final LLDPTLV nodeConnectorIdLldptlv = lldp.getCustomTLV(
- new CustomTLVKey(BitBufferHelper.getInt(LLDPTLV.OFOUI), LLDPTLV.CUSTOM_TLV_SUB_TYPE_NODE_CONNECTOR_ID[0]));
+ final LLDPTLV nodeConnectorIdLldptlv = lldp.getCustomTLV(new CustomTLVKey(
+ BitBufferHelper.getInt(LLDPTLV.OFOUI), LLDPTLV.CUSTOM_TLV_SUB_TYPE_NODE_CONNECTOR_ID[0]));
if (nodeConnectorIdLldptlv != null) {
srcNodeConnectorId = new NodeConnectorId(LLDPTLV.getCustomString(
nodeConnectorIdLldptlv.getValue(), nodeConnectorIdLldptlv.getLength()));
boolean secure = checkExtraAuthenticator(lldp, srcNodeConnectorId);
if (!secure) {
LOG.warn("SECURITY ALERT: there is probably a LLDP spoofing attack in progress.");
- throw new Exception("Attack. LLDP packet with inconsistent extra authenticator field was received.");
+ throw new Exception(
+ "Attack. LLDP packet with inconsistent extra authenticator field was received.");
}
}
.toInstance();
nodeConnectorRef = new NodeConnectorRef(srcInstanceId);
} catch (Exception e) {
- LOG.debug("Caught exception while parsing out lldp optional and custom fields: {}", e.getMessage(), e);
+ LOG.debug("Caught exception while parsing out lldp optional and custom fields", e);
}
}
return nodeConnectorRef;
}
/**
- * @param nodeConnectorId
+ * Gets an extra authenticator for lldp security.
+ *
+ * @param nodeConnectorId the NodeConnectorId
* @return extra authenticator for lldp security
- * @throws NoSuchAlgorithmException
*/
- public static byte[] getValueForLLDPPacketIntegrityEnsuring(final NodeConnectorId nodeConnectorId) throws NoSuchAlgorithmException {
+ public static byte[] getValueForLLDPPacketIntegrityEnsuring(final NodeConnectorId nodeConnectorId)
+ throws NoSuchAlgorithmException {
String finalKey;
- if(LLDPActivator.getLldpSecureKey() !=null && !LLDPActivator.getLldpSecureKey().isEmpty()) {
+ if (LLDPActivator.getLldpSecureKey() != null && !LLDPActivator.getLldpSecureKey().isEmpty()) {
finalKey = LLDPActivator.getLldpSecureKey();
} else {
finalKey = ManagementFactory.getRuntimeMXBean().getName();
return hashedValue.asBytes();
}
- /**
- * @param lldp
- * @param srcNodeConnectorId
- * @throws NoSuchAlgorithmException
- */
- private static boolean checkExtraAuthenticator(LLDP lldp, NodeConnectorId srcNodeConnectorId) throws NoSuchAlgorithmException {
+ private static boolean checkExtraAuthenticator(LLDP lldp, NodeConnectorId srcNodeConnectorId)
+ throws NoSuchAlgorithmException {
final LLDPTLV hashLldptlv = lldp.getCustomTLV(
new CustomTLVKey(BitBufferHelper.getInt(LLDPTLV.OFOUI), LLDPTLV.CUSTOM_TLV_SUB_TYPE_CUSTOM_SEC[0]));
boolean secAuthenticatorOk = false;
private static final Logger LOG = LoggerFactory.getLogger(LLDPLinkAgerTest.class);
- private LLDPLinkAger lldpLinkAger;
- private final long LLDP_INTERVAL = 5L;
- private final long LINK_EXPIRATION_TIME = 10L;
+ private static final long LLDP_INTERVAL = 5L;
+ private static final long LINK_EXPIRATION_TIME = 10L;
+
/**
- * We need to wait while other tasks are finished before we can check anything
- * in LLDPAgingTask
+ * We need to wait while other tasks are finished before we can check anything in LLDPAgingTask.
*/
- private final int SLEEP = 100;
+ private static final int SLEEP = 100;
+
+ private LLDPLinkAger lldpLinkAger;
@Mock
private LinkDiscovered link;
}
/**
- * Inner class LLDPAgingTask removes all expired records from linkToDate if any (in constructor of LLDPLinkAger)
+ * Inner class LLDPAgingTask removes all expired records from linkToDate if any (in constructor of LLDPLinkAger).
*/
@Test
public void testLLDPAgingTask() throws InterruptedException {
return configurationService;
}
-}
\ No newline at end of file
+}
@Test
public void testLldpToNodeConnectorRefLLDP() throws Exception {
byte[] packetLLDP = {
- 0x01, 0x23, 0x00, 0x00, 0x00, 0x01, (byte) 0x8a, (byte) 0x8e,
- (byte) 0xcc, (byte) 0x85, (byte) 0xeb, 0x27,
- /* ethernet type LLDP 0x88cc */(byte) 0x88, (byte) 0xcc,
- 0x02, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04,
- 0x02, 0x07, 0x32, 0x06, 0x02, 0x13, 0x37, 0x0a, 0x0a,
- /* openflow:2 */0x6f, 0x70, 0x65, 0x6e, 0x66, 0x6c, 0x6f, 0x77, 0x3a, 0x32,
- (byte) 0xfe, 0x10, 0x00, 0x26, (byte) 0xe1, 0x00,
- /* openflow:2:2 */0x6f, 0x70, 0x65, 0x6e, 0x66, 0x6c, 0x6f, 0x77, 0x3a, 0x32, 0x3a, 0x32,
- (byte) 0xfe, 0x14, 0x00, 0x26, (byte) 0xe1, 0x01, 0x62, (byte) 0xc8, 0x2b, 0x67, (byte) 0xce,
- (byte) 0xbe, 0x7c, 0x2b, 0x47, (byte) 0xbe, 0x2b, (byte) 0xe7, (byte) 0xbc,
- (byte) 0xe9, 0x75, 0x3d, 0x00, 0x00
+ 0x01, 0x23, 0x00, 0x00, 0x00, 0x01, (byte) 0x8a, (byte) 0x8e,
+ (byte) 0xcc, (byte) 0x85, (byte) 0xeb, 0x27,
+ /* ethernet type LLDP 0x88cc */(byte) 0x88, (byte) 0xcc,
+ 0x02, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04,
+ 0x02, 0x07, 0x32, 0x06, 0x02, 0x13, 0x37, 0x0a, 0x0a,
+ /* openflow:2 */0x6f, 0x70, 0x65, 0x6e, 0x66, 0x6c, 0x6f, 0x77, 0x3a, 0x32,
+ (byte) 0xfe, 0x10, 0x00, 0x26, (byte) 0xe1, 0x00,
+ /* openflow:2:2 */0x6f, 0x70, 0x65, 0x6e, 0x66, 0x6c, 0x6f, 0x77, 0x3a, 0x32, 0x3a, 0x32,
+ (byte) 0xfe, 0x14, 0x00, 0x26, (byte) 0xe1, 0x01, 0x62, (byte) 0xc8, 0x2b, 0x67, (byte) 0xce,
+ (byte) 0xbe, 0x7c, 0x2b, 0x47, (byte) 0xbe, 0x2b, (byte) 0xe7, (byte) 0xbc,
+ (byte) 0xe9, 0x75, 0x3d, 0x00, 0x00
};
NodeConnectorRef nodeConnectorRef = LLDPDiscoveryUtils.lldpToNodeConnectorRef(packetLLDP, false);
@Test
public void testLldpToNodeConnectorRefNotLLDP() throws Exception {
byte[] packetNotLLDP = {
- 0x01, 0x23, 0x00, 0x00, 0x00, 0x01, (byte) 0x8a, (byte) 0x8e,
- (byte) 0xcc, (byte) 0x85, (byte) 0xeb, 0x27,
- /* ethernet type IPv4 0x0800 */(byte) 0x08, (byte) 0x00,
- 0x02, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04,
- 0x02, 0x07, 0x32, 0x06, 0x02, 0x13, 0x37, 0x0a,
- 0x0a, 0x6f, 0x70, 0x65, 0x6e, 0x66, 0x6c, 0x6f,
- 0x77, 0x3a, 0x32, (byte) 0xfe, 0x10, 0x00, 0x26, (byte) 0xe1,
- 0x00, 0x6f, 0x70, 0x65, 0x6e, 0x66, 0x6c, 0x6f,
- 0x77, 0x3a, 0x32, 0x3a, 0x32, (byte) 0xfe, 0x14, 0x00,
- 0x26, (byte) 0xe1, 0x01, 0x62, (byte) 0xc8, 0x2b, 0x67, (byte) 0xce,
- (byte) 0xbe, 0x7c, 0x2b, 0x47, (byte) 0xbe, 0x2b, (byte) 0xe7, (byte) 0xbc,
- (byte) 0xe9, 0x75, 0x3d, 0x00, 0x00
+ 0x01, 0x23, 0x00, 0x00, 0x00, 0x01, (byte) 0x8a, (byte) 0x8e,
+ (byte) 0xcc, (byte) 0x85, (byte) 0xeb, 0x27,
+ /* ethernet type IPv4 0x0800 */(byte) 0x08, (byte) 0x00,
+ 0x02, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04,
+ 0x02, 0x07, 0x32, 0x06, 0x02, 0x13, 0x37, 0x0a,
+ 0x0a, 0x6f, 0x70, 0x65, 0x6e, 0x66, 0x6c, 0x6f,
+ 0x77, 0x3a, 0x32, (byte) 0xfe, 0x10, 0x00, 0x26, (byte) 0xe1,
+ 0x00, 0x6f, 0x70, 0x65, 0x6e, 0x66, 0x6c, 0x6f,
+ 0x77, 0x3a, 0x32, 0x3a, 0x32, (byte) 0xfe, 0x14, 0x00,
+ 0x26, (byte) 0xe1, 0x01, 0x62, (byte) 0xc8, 0x2b, 0x67, (byte) 0xce,
+ (byte) 0xbe, 0x7c, 0x2b, 0x47, (byte) 0xbe, 0x2b, (byte) 0xe7, (byte) 0xbc,
+ (byte) 0xe9, 0x75, 0x3d, 0x00, 0x00
};
NodeConnectorRef nodeConnectorRef = LLDPDiscoveryUtils.lldpToNodeConnectorRef(packetNotLLDP, false);
assertNull(nodeConnectorRef);
}
-}
\ No newline at end of file
+}