package org.opendaylight.openflowplugin.impl.registry.flow;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FluentFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
-import javax.annotation.Nonnull;
-import javax.annotation.concurrent.ThreadSafe;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.binding.api.DataBroker;
+import org.opendaylight.mdsal.binding.api.ReadTransaction;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.DeviceFlowRegistry;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowDescriptor;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowRegistryKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowplugin.extension.general.rev140714.GeneralAugMatchNodesNodeTableFlow;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.Uint8;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-@ThreadSafe
+/*
+ * this class is marked to be thread safe
+ */
public class DeviceFlowRegistryImpl implements DeviceFlowRegistry {
private static final Logger LOG = LoggerFactory.getLogger(DeviceFlowRegistryImpl.class);
private static final String ALIEN_SYSTEM_FLOW_ID = "#UF$TABLE*";
final InstanceIdentifier<FlowCapableNode> path = instanceIdentifier.augmentation(FlowCapableNode.class);
// First, try to fill registry with flows from DS/Configuration
- final CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> configFuture =
+ final FluentFuture<Optional<FlowCapableNode>> configFuture =
fillFromDatastore(LogicalDatastoreType.CONFIGURATION, path);
// Now, try to fill registry with flows from DS/Operational
// in case of cluster fail over, when clients are not using DS/Configuration
// for adding flows, but only RPCs
- final CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> operationalFuture =
+ final FluentFuture<Optional<FlowCapableNode>> operationalFuture =
fillFromDatastore(LogicalDatastoreType.OPERATIONAL, path);
// And at last, chain and return futures created above.
return lastFillFuture;
}
- private CheckedFuture<Optional<FlowCapableNode>, ReadFailedException>
- fillFromDatastore(final LogicalDatastoreType logicalDatastoreType,
+ private FluentFuture<Optional<FlowCapableNode>> fillFromDatastore(final LogicalDatastoreType logicalDatastoreType,
final InstanceIdentifier<FlowCapableNode> path) {
- // Create new read-only transaction
- final ReadOnlyTransaction transaction = dataBroker.newReadOnlyTransaction();
-
- // Bail out early if transaction is null
- if (transaction == null) {
- return Futures.immediateFailedCheckedFuture(
- new ReadFailedException("Read transaction is null"));
- }
-
// Prepare read operation from datastore for path
- final CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> future =
- transaction.read(logicalDatastoreType, path);
-
- // Bail out early if future is null
- if (future == null) {
- return Futures.immediateFailedCheckedFuture(
- new ReadFailedException("Future from read transaction is null"));
+ final FluentFuture<Optional<FlowCapableNode>> future;
+ try (ReadTransaction transaction = dataBroker.newReadOnlyTransaction()) {
+ future = transaction.read(logicalDatastoreType, path);
}
- Futures.addCallback(future, new FutureCallback<Optional<FlowCapableNode>>() {
+ future.addCallback(new FutureCallback<Optional<FlowCapableNode>>() {
@Override
- public void onSuccess(@Nonnull Optional<FlowCapableNode> result) {
- result.asSet().stream()
- .filter(Objects::nonNull)
- .filter(flowCapableNode -> Objects.nonNull(flowCapableNode.getTable()))
- .flatMap(flowCapableNode -> flowCapableNode.getTable().stream())
- .filter(Objects::nonNull)
- .filter(table -> Objects.nonNull(table.getFlow()))
- .flatMap(table -> table.getFlow().stream())
- .filter(Objects::nonNull)
- .filter(flow -> Objects.nonNull(flow.getId()))
- .forEach(flowConsumer);
-
- // After we are done with reading from datastore, close the transaction
- transaction.close();
+ public void onSuccess(final Optional<FlowCapableNode> result) {
+ result.ifPresent(flowCapableNode -> {
+ flowCapableNode.nonnullTable().values().stream()
+ .filter(Objects::nonNull)
+ .flatMap(table -> table.nonnullFlow().values().stream())
+ .filter(Objects::nonNull)
+ .filter(flow -> flow.getId() != null)
+ .forEach(flowConsumer);
+ });
}
@Override
- public void onFailure(Throwable throwable) {
- // Even when read operation failed, close the transaction
- transaction.close();
+ public void onFailure(final Throwable throwable) {
+ LOG.debug("Failed to read {} path {}", logicalDatastoreType, path, throwable);
}
}, MoreExecutors.directExecutor());
}
@Override
- public FlowDescriptor retrieveDescriptor(@Nonnull final FlowRegistryKey flowRegistryKey) {
+ public FlowDescriptor retrieveDescriptor(@NonNull final FlowRegistryKey flowRegistryKey) {
if (LOG.isTraceEnabled()) {
LOG.trace("Retrieving flow descriptor for flow registry : {}", flowRegistryKey.toString());
}
}
@Override
- public void storeDescriptor(@Nonnull final FlowRegistryKey flowRegistryKey,
- @Nonnull final FlowDescriptor flowDescriptor) {
+ public void storeDescriptor(@NonNull final FlowRegistryKey flowRegistryKey,
+ @NonNull final FlowDescriptor flowDescriptor) {
try {
if (LOG.isTraceEnabled()) {
LOG.trace("Storing flowDescriptor with table ID : {} and flow ID : {} for flow hash : {}",
@Override
public void store(final FlowRegistryKey flowRegistryKey) {
- if (Objects.isNull(retrieveDescriptor(flowRegistryKey))) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Flow descriptor for flow hash : {} not found, generating alien flow ID",
- flowRegistryKey.toString());
- }
+ if (retrieveDescriptor(flowRegistryKey) == null) {
+ LOG.debug("Flow descriptor for flow hash : {} not found, generating alien flow ID", flowRegistryKey);
// We do not found flow in flow registry, that means it do not have any ID already assigned, so we need
// to generate new alien flow ID here.
- storeDescriptor(
- flowRegistryKey,
- FlowDescriptorFactory.create(
- flowRegistryKey.getTableId(),
- createAlienFlowId(flowRegistryKey.getTableId())));
+ final Uint8 tableId = Uint8.valueOf(flowRegistryKey.getTableId());
+ storeDescriptor(flowRegistryKey, FlowDescriptorFactory.create(tableId, createAlienFlowId(tableId)));
}
}
}
@VisibleForTesting
- static FlowId createAlienFlowId(final short tableId) {
+ static FlowId createAlienFlowId(final Uint8 tableId) {
final String alienId = ALIEN_SYSTEM_FLOW_ID + tableId + '-' + UNACCOUNTED_FLOWS_COUNTER.incrementAndGet();
LOG.debug("Created alien flow id {} for table id {}", alienId, tableId);
return new FlowId(alienId);
}
private FlowRegistryKey getExistingKey(final FlowRegistryKey flowRegistryKey) {
- if (flowRegistryKey.getMatch().getAugmentation(GeneralAugMatchNodesNodeTableFlow.class) == null) {
+ if (flowRegistryKey.getMatch().augmentation(GeneralAugMatchNodesNodeTableFlow.class) == null) {
if (flowRegistry.containsKey(flowRegistryKey)) {
return flowRegistryKey;
}