/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* their locations for renderering. The endpoint manager will maintain
* appropriate indexes only for switches that are attached to the current
* controller node.
- *
* In order to render the policy, we need to be able to efficiently enumerate
* all endpoints on a particular switch and also all the switches containing
* each particular endpoint group
*
* @author readams
*/
-public class EndpointManager implements AutoCloseable, DataChangeListener
-{
- private static final Logger LOG =
- LoggerFactory.getLogger(EndpointManager.class);
- private final static InstanceIdentifier<Nodes> nodesIid = InstanceIdentifier
- .builder(Nodes.class).build();
- private static final InstanceIdentifier<Endpoint> endpointsIid =
- InstanceIdentifier.builder(Endpoints.class)
- .child(Endpoint.class).build();
+public class EndpointManager implements AutoCloseable, DataChangeListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(EndpointManager.class);
+ private final static InstanceIdentifier<Nodes> nodesIid = InstanceIdentifier.builder(Nodes.class).build();
+ private static final InstanceIdentifier<Endpoint> endpointsIid = InstanceIdentifier.builder(Endpoints.class)
+ .child(Endpoint.class)
+ .build();
final ListenerRegistration<DataChangeListener> listenerReg;
- private final ConcurrentHashMap<EpKey, Endpoint> endpoints =
- new ConcurrentHashMap<>();
- private final ConcurrentHashMap<NodeId, ConcurrentMap<EgKey, Set<EpKey>>> endpointsByGroupByNode =
- new ConcurrentHashMap<>();
- private final ConcurrentHashMap<NodeId, Set<EpKey>> endpointsByNode =
- new ConcurrentHashMap<>();
+ private final ConcurrentHashMap<EpKey, Endpoint> endpoints = new ConcurrentHashMap<>();
+ private final ConcurrentHashMap<NodeId, ConcurrentMap<EgKey, Set<EpKey>>> endpointsByGroupByNode = new ConcurrentHashMap<>();
+ private final ConcurrentHashMap<NodeId, Set<EpKey>> endpointsByNode = new ConcurrentHashMap<>();
- private final ConcurrentHashMap<EgKey, Set<EpKey>> endpointsByGroup =
- new ConcurrentHashMap<>();
+ private final ConcurrentHashMap<EgKey, Set<EpKey>> endpointsByGroup = new ConcurrentHashMap<>();
private List<EndpointListener> listeners = new CopyOnWriteArrayList<>();
final private DataBroker dataProvider;
- public EndpointManager(DataBroker dataProvider,
- RpcProviderRegistry rpcRegistry,
- ScheduledExecutorService executor,
+ public EndpointManager(DataBroker dataProvider, RpcProviderRegistry rpcRegistry, ScheduledExecutorService executor,
SwitchManager switchManager) {
this.executor = executor;
this.dataProvider = dataProvider;
EndpointRpcRegistry.register(dataProvider, rpcRegistry, endpointRpcAug);
if (dataProvider != null) {
- listenerReg = dataProvider
- .registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
- endpointsIid,
- this,
- DataChangeScope.ONE);
+ listenerReg = dataProvider.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL, endpointsIid, this,
+ DataChangeScope.ONE);
} else
listenerReg = null;
* Add a {@link EndpointListener} to get notifications of switch events
*
* @param listener
- * the {@link EndpointListener} to add
+ * the {@link EndpointListener} to add
*/
public void registerListener(EndpointListener listener) {
listeners.add(listener);
* Get a collection of endpoints attached to a particular switch
*
* @param nodeId
- * the nodeId of the switch to get endpoints for
+ * the nodeId of the switch to get endpoints for
* @return a collection of {@link Endpoint} objects.
*/
public synchronized Set<EgKey> getGroupsForNode(NodeId nodeId) {
* Get the set of nodes
*
* @param egKey
- * the egKey of the endpointgroup to get nodes for
+ * the egKey of the endpointgroup to get nodes for
* @return a collection of {@link NodeId} objects.
*/
public synchronized Set<NodeId> getNodesForGroup(final EgKey egKey) {
- return ImmutableSet.copyOf(Sets.filter(endpointsByGroupByNode.keySet(),
- new Predicate<NodeId>() {
- @Override
- public boolean apply(NodeId input) {
- Map<EgKey, Set<EpKey>> nodeEps =
- endpointsByGroupByNode.get(input);
- return (nodeEps != null &&
- nodeEps.containsKey(egKey));
- }
+ return ImmutableSet.copyOf(Sets.filter(endpointsByGroupByNode.keySet(), new Predicate<NodeId>() {
- }));
+ @Override
+ public boolean apply(NodeId input) {
+ Map<EgKey, Set<EpKey>> nodeEps = endpointsByGroupByNode.get(input);
+ return (nodeEps != null && nodeEps.containsKey(egKey));
+ }
+
+ }));
}
/**
* Get the endpoints in a particular group on a particular node
*
* @param nodeId
- * the node ID to look up
+ * the node ID to look up
* @param eg
- * the group to look up
+ * the group to look up
* @return the endpoints
*/
public synchronized Collection<Endpoint> getEndpointsForNode(NodeId nodeId, EgKey eg) {
Collection<EpKey> ebn = nodeEps.get(eg);
if (ebn == null)
return Collections.emptyList();
- return ImmutableList.copyOf(Collections2.transform(ebn,indexTransform));
+ return ImmutableList.copyOf(Collections2.transform(ebn, indexTransform));
}
/**
* Get the endpoints on a particular node
*
* @param nodeId
- * the node ID to look up
+ * the node ID to look up
* @return the endpoints
*/
public synchronized Collection<Endpoint> getEndpointsForNode(final NodeId nodeId) {
* Get the endpoint object for the given key
*
* @param epKey
- * the key
+ * the key
* @return the {@link Endpoint} corresponding to the key
*/
public Endpoint getEndpoint(EpKey epKey) {
* Set the learning mode to the specified value
*
* @param learningMode
- * the learning mode to set
+ * the learning mode to set
*/
public void setLearningMode(LearningMode learningMode) {
// No-op for now
* Get a collection of endpoints in a particular endpoint group
*
* @param eg
- * endpoint group ID
+ * endpoint group ID
* @return a collection of {@link Endpoint} objects.
*/
public synchronized Collection<Endpoint> getEndpointsForGroup(EgKey eg) {
* directly represented in the endpoint object
*
* @param endpoint
- * the {@link Endpoint} to resolve
+ * the {@link Endpoint} to resolve
* @return the list of {@link ConditionName}
*/
public List<ConditionName> getCondsForEndpoint(Endpoint endpoint) {
Optional<Nodes> result;
try {
- result = dataProvider
- .newReadOnlyTransaction().read(
- LogicalDatastoreType.OPERATIONAL, nodesIid).get();
+ result = dataProvider.newReadOnlyTransaction()
+ .read(LogicalDatastoreType.OPERATIONAL, nodesIid)
+ .get();
if (result.isPresent()) {
Nodes nodes = result.get();
for (Node node : nodes.getNode()) {
if (node.getNodeConnector() != null) {
boolean found = false;
for (NodeConnector nc : node.getNodeConnector()) {
- FlowCapableNodeConnector fcnc = nc
- .getAugmentation(FlowCapableNodeConnector.class);
+ FlowCapableNodeConnector fcnc = nc.getAugmentation(FlowCapableNodeConnector.class);
if (fcnc.getName().equals(portName)) {
nodeInfo = new NodeInfo();
nodeInfo.setNode(node);
}
}
- private Function<EpKey, Endpoint> indexTransform =
- new Function<EpKey, Endpoint>() {
- @Override
- public Endpoint apply(EpKey input) {
- return endpoints.get(input);
- }
- };
+ private Function<EpKey, Endpoint> indexTransform = new Function<EpKey, Endpoint>() {
+
+ @Override
+ public Endpoint apply(EpKey input) {
+ return endpoints.get(input);
+ }
+ };
private boolean validEp(Endpoint endpoint) {
- return (endpoint != null && endpoint.getTenant() != null &&
- (endpoint.getEndpointGroup() != null || endpoint.getEndpointGroups() != null) &&
- endpoint.getL2Context() != null && endpoint.getMacAddress() != null);
+ return (endpoint != null && endpoint.getTenant() != null
+ && (endpoint.getEndpointGroup() != null || endpoint.getEndpointGroups() != null)
+ && endpoint.getL2Context() != null && endpoint.getMacAddress() != null);
}
private NodeId getLocation(Endpoint endpoint) {
if (!validEp(endpoint))
return null;
- OfOverlayContext context =
- endpoint.getAugmentation(OfOverlayContext.class);
+ OfOverlayContext context = endpoint.getAugmentation(OfOverlayContext.class);
if (context != null)
return context.getNodeId();
ConcurrentMap<EgKey, Set<EpKey>> map = endpointsByGroupByNode.get(location);
if (map == null) {
map = new ConcurrentHashMap<>();
- ConcurrentMap<EgKey, Set<EpKey>> old =
- endpointsByGroupByNode.putIfAbsent(location, map);
+ ConcurrentMap<EgKey, Set<EpKey>> old = endpointsByGroupByNode.putIfAbsent(location, map);
if (old != null)
map = old;
}
return SetUtils.getNestedSet(eg, map);
}
- private static final ConcurrentMap<EgKey, Set<EpKey>> EMPTY_MAP =
- new ConcurrentHashMap<>();
+ private static final ConcurrentMap<EgKey, Set<EpKey>> EMPTY_MAP = new ConcurrentHashMap<>();
private Set<EpKey> getEpGSet(EgKey eg) {
return SetUtils.getNestedSet(eg, endpointsByGroup);
Set<EpKey> epsNode = new HashSet<EpKey>();
epsNode.add(newEpKey);
endpointsByNode.put(newLoc, epsNode);
+ SwitchManager.activateEndpoint(newLoc);
+
} else {
Set<EpKey> epsNode = endpointsByNode.get(newLoc);
epsNode.add(newEpKey);
Set<EpKey> epsNode = endpointsByNode.get(oldLoc);
if (epsNode != null) {
epsNode.remove(oldEpKey);
- if (epsNode.isEmpty())
+ if (epsNode.isEmpty()) {
endpointsByNode.remove(oldLoc);
+ SwitchManager.deactivateEndpoint(oldLoc);
+ }
}
// Update endpointsByGroupByNode
// Update endpointsByGroup
// Get map of EPGs and their Endpoints for Node
- ConcurrentMap<EgKey, Set<EpKey>> map =
- endpointsByGroupByNode.get(oldLoc);
+ ConcurrentMap<EgKey, Set<EpKey>> map = endpointsByGroupByNode.get(oldLoc);
// For each EPG in the removed endpoint...
for (EndpointGroupId oldEpgId : newEpgIds) {
EgKey oldEgKey = new EgKey(oldEp.getTenant(), oldEpgId);
/*
* Moved endpoint (from node to node or from NULL to node)
*/
- if ((oldEp != null && newEp != null && oldEpKey != null && newEpKey != null) &&
- (oldEpKey.toString().equals(newEpKey.toString()))) {
+ if ((oldEp != null && newEp != null && oldEpKey != null && newEpKey != null)
+ && (oldEpKey.toString().equals(newEpKey.toString()))) {
// old and new Endpoints have same key. (same endpoint)
/*
Set<EpKey> epsNode = endpointsByNode.get(oldLoc);
if (epsNode != null) {
epsNode.remove(oldEpKey);
- if (epsNode.isEmpty())
+ if (epsNode.isEmpty()) {
endpointsByNode.remove(oldLoc);
+ SwitchManager.deactivateEndpoint(oldLoc);
+ }
}
// Update endpointsByGroupByNode
// Get map of EPGs and their Endpoints for Node
- ConcurrentMap<EgKey, Set<EpKey>> map =
- endpointsByGroupByNode.get(oldLoc);
+ ConcurrentMap<EgKey, Set<EpKey>> map = endpointsByGroupByNode.get(oldLoc);
// For each EPG in the removed endpoint...
for (EndpointGroupId oldEpgId : oldEpgIds) {
EgKey oldEgKey = new EgKey(oldEp.getTenant(), oldEpgId);
}
// endpointsByGroup
Set<EpKey> geps = endpointsByGroup.get(oldEgKey);
- if (geps != null)
- {
+ if (geps != null) {
geps.remove(oldEpKey);
if (geps.isEmpty())
endpointsByGroup.remove(oldEgKey);
Set<EpKey> newEpsNode = new HashSet<EpKey>();
newEpsNode.add(newEpKey);
endpointsByNode.put(newLoc, newEpsNode);
+ SwitchManager.activateEndpoint(newLoc);
} else {
Set<EpKey> newEpsNode = endpointsByNode.get(newLoc);
newEpsNode.add(newEpKey);
// A wrapper class around node, nodeConnector info so we can pass a final
// object inside OnSuccess anonymous inner class
private static class NodeInfo {
+
NodeConnector nodeConnector;
Node node;
private static final Logger LOG = LoggerFactory.getLogger(SwitchManager.class);
- protected Map<NodeId, SwitchState> switches = new HashMap<>();
+ protected static Map<NodeId, SwitchState> switches = new HashMap<>();
protected List<SwitchListener> listeners = new CopyOnWriteArrayList<>();
private final FlowCapableNodeListener nodeListener;
LOG.debug("Initialized OFOverlay switch manager");
}
+ // When first endpoint is attached to switch, it can be ready
+ public static void activateEndpoint(NodeId nodeId) {
+ switches.get(nodeId).hasEndpoints=true;
+ switches.get(nodeId).updateStatus();
+ }
+
+ // When last endpoint is removed from switch, it is no longer ready
+ public static void deactivateEndpoint(NodeId nodeId) {
+ switches.get(nodeId).hasEndpoints=false;
+ switches.get(nodeId).updateStatus();
+ }
+
/**
* Get the collection of switches that are in the "ready" state. Note
* that the collection is immutable.
return ImmutableSet.copyOf(state.externalPorts);
}
- public synchronized NodeConnectorId getTunnelPort(NodeId nodeId, Class<? extends TunnelTypeBase> tunnelType) {
+ public static synchronized NodeConnectorId getTunnelPort(NodeId nodeId, Class<? extends TunnelTypeBase> tunnelType) {
SwitchState state = switches.get(nodeId);
if (state == null) {
return null;
return tunnel.getNodeConnectorId();
}
- public synchronized IpAddress getTunnelIP(NodeId nodeId, Class<? extends TunnelTypeBase> tunnelType) {
+ public static synchronized IpAddress getTunnelIP(NodeId nodeId, Class<? extends TunnelTypeBase> tunnelType) {
SwitchState state = switches.get(nodeId);
if (state == null) {
return null;
private FlowCapableNode fcNode;
private OfOverlayNodeConfig nodeConfig;
private Map<InstanceIdentifier<NodeConnector>, FlowCapableNodeConnector> fcncByNcIid = Maps.newHashMap();
+ private boolean hasEndpoints=false;
Map<Class<? extends TunnelTypeBase>, TunnelBuilder> tunnelBuilderByType = new HashMap<>();
Set<NodeConnectorId> externalPorts = new HashSet<>();
private void updateStatus() {
boolean tunnelWithIpAndNcExists = tunnelWithIpAndNcExists();
if (fcNode != null) {
- if (tunnelWithIpAndNcExists) {
+ if (tunnelWithIpAndNcExists && hasEndpoints) {
setStatus(SwitchStatus.READY);
} else {
setStatus(SwitchStatus.PREPARING);
LOG.trace("Iterating over tunnel till tunnel with IP and node-connector is not found.");
for (TunnelBuilder tb : tunnelBuilderByType.values()) {
if (tb.getIp() != null && tb.getNodeConnectorId() != null) {
- LOG.trace("Tunnel found. Type: {} IP: {} Port: {} Node-connector: {}", tb.getTunnelType()
- .getSimpleName(), tb.getIp(), tb.getPort(), tb.getNodeConnectorId());
+// LOG.trace("Tunnel found. Type: {} IP: {} Port: {} Node-connector: {}", tb.getTunnelType()
+// .getSimpleName(), tb.getIp(), tb.getPort(), tb.getNodeConnectorId());
+ LOG.trace("Tunnel found.");
return true;
} else {
- LOG.trace("Tunnel which is not completed: Type: {} IP: {} Port: {} Node-connector: {}",
- tb.getTunnelType().getSimpleName(), tb.getIp(), tb.getPort(), tb.getNodeConnectorId());
+// LOG.trace("Tunnel which is not completed: Type: {} IP: {} Port: {} Node-connector: {}",
+// tb.getTunnelType().getSimpleName(), tb.getIp(), tb.getPort(), tb.getNodeConnectorId());
+ LOG.trace("Tunnel which is not completed");
}
}
return false;
package org.opendaylight.groupbasedpolicy.renderer.ofoverlay.flow;\r
\r
import java.math.BigInteger;\r
+import java.util.Collections;\r
import java.util.HashMap;\r
import java.util.List;\r
import java.util.Objects;\r
import org.opendaylight.groupbasedpolicy.resolver.ConditionGroup;\r
import org.opendaylight.groupbasedpolicy.resolver.EgKey;\r
import org.opendaylight.groupbasedpolicy.resolver.PolicyInfo;\r
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpAddress;\r
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv4Address;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.ApplyActionsCase;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.common.rev140421.ConditionName;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.common.rev140421.NetworkDomainId;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.Endpoint;\r
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.ofoverlay.rev140528.OfOverlayNodeConfigBuilder;\r
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.ofoverlay.rev140528.nodes.node.TunnelBuilder;\r
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowjava.nx.match.rev140421.NxmNxReg0;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowjava.nx.match.rev140421.NxmNxReg1;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowjava.nx.match.rev140421.NxmNxReg4;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowjava.nx.match.rev140421.NxmNxReg5;\r
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowjava.nx.match.rev140421.NxmNxReg6;\r
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.overlay.rev150105.TunnelTypeVxlan;\r
import org.slf4j.Logger;\r
import org.slf4j.LoggerFactory;\r
\r
\r
@Test\r
public void testMap() throws Exception {\r
+ switchManager.addSwitch(\r
+ nodeId,\r
+ tunnelId,\r
+ Collections.<NodeConnectorId>emptySet(),\r
+ new OfOverlayNodeConfigBuilder().setTunnel(\r
+ ImmutableList.of(new TunnelBuilder().setIp(new IpAddress(new Ipv4Address("1.2.3.4")))\r
+ .setTunnelType(TunnelTypeVxlan.class)\r
+ .setNodeConnectorId(tunnelId)\r
+ .build())).build());\r
Endpoint ep = localEP().build();\r
endpointManager.addEndpoint(ep);\r
policyResolver.addTenant(baseTenant().build());\r
\r
FlowMap fm = dosync(null);\r
- assertEquals(2, fm.getTableForNode(nodeId, (short) 1).getFlow().size());\r
+ assertEquals(4, fm.getTableForNode(nodeId, (short) 1).getFlow().size());\r
\r
int count = 0;\r
HashMap<String, Flow> flowMap = new HashMap<>();\r
if (f.getMatch() == null) {\r
assertEquals(FlowUtils.dropInstructions(), f.getInstructions());\r
count += 1;\r
- } else if (Objects.equals(ep.getMacAddress(), f.getMatch()\r
+ } else if ((f.getMatch() !=null && f.getMatch().getEthernetMatch() != null)\r
+ && (Objects.equals(ep.getMacAddress(), f.getMatch()\r
.getEthernetMatch()\r
.getEthernetSource()\r
- .getAddress())) {\r
+ .getAddress()))) {\r
PolicyInfo pi = policyResolver.getCurrentPolicy();\r
List<ConditionName> cset = endpointManager.getCondsForEndpoint(ep);\r
ConditionGroup cg = pi.getEgCondGroup(new EgKey(tid, eg), cset);\r