-module opendaylight-flow {
- namespace "urn:opendaylight:flow:service";
- prefix flow;
+module opendaylight-flow-base {
+ namespace "urn:opendaylight:flow:base";
+ prefix "flowbase";
import yang-ext {prefix ext;}
import ietf-inet-types {prefix inet;}
import ietf-yang-types {prefix yang;}
import opendaylight-inventory {prefix inv;}
+ import opendaylight-l2-types {prefix l2t;}
revision "2013-08-19" {
description "Initial revision of flow service";
}
- /** Base structure **/
- container flows {
- list flow {
- leaf node {
- type inv:node-id;
- }
- container match {
- // Match is empty
- leaf input-node-connector {
- type inv:node-connector-id; //
- }
- }
- list action {
- key id;
- leaf id {
- type string;
- }
- choice action {
-
- }
- }
- }
- }
-
- /** Matches **/
- augment "/flows/flow/match" {
- ext:augment-identifier "ethernet-match";
+ /** Match Groupings **/
+ grouping "ethernet-match-fields" {
container ethernet-source {
- description "Ethernet source address.";
+ //description "Ethernet source address.";
+ //presence "Match field is active and set";
leaf address {
+ mandatory true;
type yang:mac-address;
}
leaf mask {
}
container ethernet-destination {
description "Ethernet destination address.";
+ presence "Match field is active and set";
leaf address {
+ mandatory true;
type yang:mac-address;
}
}
container ethernet-type {
description "Ethernet frame type.";
+ presence "Match field is active and set";
+
leaf type {
- type uint16; // Needs to define that as general model
+ mandatory true;
+ type l2t:ether-type; // Needs to define that as general model
}
leaf mask {
type binary;
}
}
- augment "/flows/flow/match" {
- ext:augment-identifier "vlan-match";
-
+ grouping "vlan-match-fields" {
container vlan-id {
description "VLAN id.";
+ presence "Match field is active and set";
+
leaf vlan-id {
- type uint16; // TODO: Define proper vlan id type.
+ mandatory true;
+ type l2t:vlan-id;
}
leaf mask {
type binary;
}
leaf vlan-pcp {
description "VLAN priority.";
- type uint8; // TODO: Define PCP type
+ type l2t:vlan-pcp;
}
}
- augment "/flows/flow/match" {
- ext:augment-identifier "ip-match";
-
+ grouping "ip-match-fields" {
leaf ip-protocol {
description "IP protocol.";
type uint8; // TODO define IP protocol number
}
}
- augment "/flows/flow/match" {
- ext:augment-identifier "ipv4-match";
+ grouping "ipv4-match-fields" {
leaf ipv4-source {
description "IPv4 source address.";
type inet:ipv4-prefix;
}
}
- augment "/flows/flow/match" {
- ext:augment-identifier "ipv6-match";
+ grouping "ipv6-match-fields" {
leaf ipv6-source {
description "IPv6 source address.";
type inet:ipv6-prefix;
}
- augment "/flows/flow/match" {
- ext:augment-identifier "udp-match";
-
+ grouping "udp-match-fields" {
leaf udp-source-port {
description "UDP source port.";
type inet:port-number;
}
}
- augment "/flows/flow/match" {
- ext:augment-identifier "tcp-match";
+ grouping "tcp-match-fields" {
leaf tcp-source-port {
description "TCP source port.";
type inet:port-number;
}
}
- augment "/flows/flow/match" {
- ext:augment-identifier "sctp-match";
+ grouping "sctp-match-fields" {
leaf sctp-source-port {
description "SCTP source port.";
type inet:port-number;
}
}
- augment "/flows/flow/match" {
- ext:augment-identifier "icmpv4-match";
+ grouping "icmpv4-match-fields" {
leaf icmpv4-type {
description "ICMP type.";
type uint8; // Define ICMP Type
}
}
- augment "/flows/flow/match" {
- ext:augment-identifier "arp-match";
-
+ grouping "arp-match-fields" {
leaf arp-source-transport-address {
description "ARP source IPv4 address.";
type inet:ipv4-prefix;
}
container arp-source-hardware-address {
description "ARP source hardware address.";
+ presence "Match field is active and set";
leaf address {
+ mandatory true;
type yang:mac-address;
}
leaf mask {
}
container arp-target-hardware-address {
description "ARP target hardware address.";
+ presence "Match field is active and set";
leaf address {
+ mandatory true;
type yang:mac-address;
}
leaf mask {
}
}
- /** Actions **/
- augment "/flows/flow/action/action" {
- case output-action {
- leaf output-node-connector {
- type string;
+ grouping action {
+ choice action {
+ case output-action {
+ leaf output-node-connector {
+ type string;
+ }
}
- }
- case controller-action {
- leaf max-length {
- type uint16 {
- range "0..65294";
+ case controller-action {
+ leaf max-length {
+ type uint16 {
+ range "0..65294";
+ }
}
}
- }
- case set-queue-action {
- leaf queue {
- type string; // TODO: define queues
+ case set-queue-action {
+ leaf queue {
+ type string; // TODO: define queues
+ }
}
- }
- case pop-mpls-action {
- container pop-mpls {
- leaf ethernet-type {
- type uint16; // TODO: define ethertype type
+ case pop-mpls-action {
+ container pop-mpls {
+ leaf ethernet-type {
+ type uint16; // TODO: define ethertype type
+ }
}
}
- }
- case set-mpls-ttl-action {
- leaf mpls-ttl {
- type uint8;
+ case set-mpls-ttl-action {
+ leaf mpls-ttl {
+ type uint8;
+ }
}
- }
- case set-nw-ttl-action {
- leaf nw-ttl {
- type uint8;
+ case set-nw-ttl-action {
+ leaf nw-ttl {
+ type uint8;
+ }
}
- }
- case push-pbb-action {
+ case push-pbb-action {
- }
+ }
- case push-mpls-action {
+ case push-mpls-action {
- }
+ }
- case push-vlan-action {
+ case push-vlan-action {
+ }
}
}
+ grouping flow {
+ leaf node {
+ type inv:node-id;
+ }
+ container match {
+ container "ethernet-match" {
+ uses "ethernet-match-fields";
+ }
+ container "vlan-match" {
+ uses "vlan-match-fields";
+ }
+ container "ip-match" {
+ uses "ip-match-fields";
+ }
+ container "ipv4-match" {
+ uses "ipv4-match-fields";
+ }
+ container "ipv6-match" {
+ uses "ipv6-match-fields";
+ }
+ container "udp-match" {
+ uses "udp-match-fields";
+ }
+ container "tcp-match" {
+ uses "tcp-match-fields";
+ }
+ container "sctp-match" {
+ uses "sctp-match-fields";
+ }
+ container "icmpv4-match" {
+ uses "icmpv4-match-fields";
+ }
+ container "arp-match" {
+ uses "arp-match-fields";
+ }
+ }
+ list action {
+ key "order";
+ leaf order {
+ type int32;
+ }
+ uses action;
+ }
+ }
}
\ No newline at end of file
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
-
+/**
+ * This class implements basic L3 forwarding within the managed devices.
+ * Forwarding is only done within configured subnets.</br>
+ * <br/>
+ * The basic flow is that the module listens for new hosts from the
+ * {@link org.opendaylight.controller.hosttracker.IfIptoHost HostTracker}
+ * service and on discovering a new host it first calls
+ * <tt>preparePerHostRules()</tt> to create a set of new rules that must be
+ * installed in the network. This is done by repeatedly calling
+ * <tt>updatePerHostRuleInSW()</tt> for each switch in the network. Then it
+ * installs those rules using <tt>installPerHostRules()</tt>.
+ */
public class SimpleForwardingImpl implements IfNewHostNotify,
IListenRoutingUpdates, IInventoryListener {
private static Logger log = LoggerFactory
.getLogger(SimpleForwardingImpl.class);
private static short DEFAULT_IPSWITCH_PRIORITY = 1;
+ private static String FORWARDING_RULES_CACHE_NAME = "forwarding.ipswitch.rules";
private IfIptoHost hostTracker;
private IForwardingRulesManager frm;
private ITopologyManager topologyManager;
private IRouting routing;
+
+ /**
+ * The set of all forwarding rules: (host) -> (switch -> flowmod). Note that
+ * the host includes an attachment point and that while the switch appears
+ * to be a switch's port, in actuality it is a special port which just
+ * represents the switch.
+ */
private ConcurrentMap<HostNodePair, HashMap<NodeConnector, FlowEntry>> rulesDB;
private Map<Node, List<FlowEntry>> tobePrunedPos = new HashMap<Node, List<FlowEntry>>();
private IClusterContainerServices clusterContainerService = null;
/**
* Return codes from the programming of the perHost rules in HW
- *
*/
public enum RulesProgrammingReturnCode {
SUCCESS, FAILED_FEW_SWITCHES, FAILED_ALL_SWITCHES, FAILED_WRONG_PARAMS
}
try {
- clusterContainerService.createCache("forwarding.ipswitch.rules",
+ clusterContainerService.createCache(FORWARDING_RULES_CACHE_NAME,
EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
} catch (CacheExistException cee) {
log.error("\nCache already exists - destroy and recreate if needed");
}
rulesDB = (ConcurrentMap<HostNodePair, HashMap<NodeConnector, FlowEntry>>) clusterContainerService
- .getCache("forwarding.ipswitch.rules");
+ .getCache(FORWARDING_RULES_CACHE_NAME);
if (rulesDB == null) {
log.error("\nFailed to get rulesDB handle");
}
return;
}
- clusterContainerService.destroyCache("forwarding.ipswitch.rules");
+ clusterContainerService.destroyCache(FORWARDING_RULES_CACHE_NAME);
}
- @SuppressWarnings("unused")
+ /**
+ * Populates <tt>rulesDB</tt> with rules specifying how to reach
+ * <tt>host</tt> from <tt>currNode</tt> assuming that:
+ * <ul>
+ * <li><tt>host</tt> is attached to <tt>rootNode</tt>
+ * <li><tt>link</tt> is the next part of the path to reach <tt>rootNode</tt>
+ * from <tt>currNode</tt>
+ * <li><tt>rulesDB.get(key)</tt> represents the list of rules stored about
+ * <tt>host</tt> at <tt>currNode</tt>
+ * </ul>
+ *
+ * @param host
+ * The host to be reached.
+ * @param currNode
+ * The current node being processed.
+ * @param rootNode
+ * The node to be reached. Really, the switch which host is
+ * attached to.
+ * @param link
+ * The link to follow from curNode to get to rootNode
+ * @param key
+ * The key to store computed rules at in the rulesDB. For now,
+ * this is a {@link HostNodePair} of host and currNode.
+ */
private void updatePerHostRuleInSW(HostNodeConnector host, Node currNode,
- Node rootNode, Edge link, HostNodePair key,
- Set<NodeConnector> passedPorts) {
+ Node rootNode, Edge link, HostNodePair key) {
- // link parameter it's optional
+ // only the link parameter is optional
if (host == null || key == null || currNode == null || rootNode == null) {
return;
}
- Set<NodeConnector> ports = passedPorts;
- // TODO: Replace this with SAL equivalent when available
- //if (container == null) {
- ports = new HashSet<NodeConnector>();
+
+ Set<NodeConnector> ports = new HashSet<NodeConnector>();
+ // add a special port of type ALL and port 0 to represent the node
+ // without specifying a port on that node
ports.add(NodeConnectorCreator.createNodeConnector(
NodeConnectorIDType.ALL, NodeConnector.SPECIALNODECONNECTORID,
currNode));
- //}
HashMap<NodeConnector, FlowEntry> pos = this.rulesDB.get(key);
if (pos == null) {
pos = new HashMap<NodeConnector, FlowEntry>();
}
- if (ports == null) {
- log.debug("Empty port list, nothing to do");
- return;
- }
+
for (NodeConnector inPort : ports) {
- /*
- * skip the port connected to the target host
- */
+ // skip the port connected to the target host
if (currNode.equals(rootNode)
&& (host.getnodeConnector().equals(inPort))) {
continue;
}
+
+ // remove the current rule, if any
FlowEntry removed_po = pos.remove(inPort);
Match match = new Match();
List<Action> actions = new ArrayList<Action>();
- // IP destination based forwarding
- //on /32 entries only!
+
+ // IP destination based forwarding on /32 entries only!
match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
match.setField(MatchType.NW_DST, host.getNetworkAddress());
- //Action for the policy if to
- //forward to a port except on the
- //switch where the host sits,
- //which is to rewrite also the MAC
- //and to forward on the Host port
+ /* Action for the policy is to forward to a port except on the
+ * switch where the host sits, which is to rewrite also the MAC
+ * and to forward on the Host port */
NodeConnector outPort = null;
if (currNode.equals(rootNode)) {
+ /* If we're at the root node, then rewrite the DL addr and
+ * possibly pop the VLAN tag. This allows for MAC rewriting
+ * in the core of the network assuming we can uniquely ID
+ * packets based on IP address. */
+
outPort = host.getnodeConnector();
if (inPort.equals(outPort)) {
- /*
- * skip the host port
- */
+ // TODO: isn't this code skipped already by the above continue?
+ // skip the host port
continue;
}
actions.add(new SetDlDst(host.getDataLayerAddressBytes()));
- if (!inPort.getType().equals(
- NodeConnectorIDType.ALL)) {
- /*
- * Container mode: at the destination switch, we need to strip out the tag (VLAN)
- */
+ if (!inPort.getType().equals(NodeConnectorIDType.ALL)) {
+ // Container mode: at the destination switch, we need to strip out the tag (VLAN)
actions.add(new PopVlan());
}
} else {
- /*
- * currNode is NOT the rootNode
- */
+ // currNode is NOT the rootNode, find the next hop and create a rule
if (link != null) {
outPort = link.getTailNodeConnector();
if (inPort.equals(outPort)) {
- /*
- * skip the outgoing port
- */
+ // skip the outgoing port
continue;
}
- /*
- * If outPort is network link, add VLAN tag
- */
+
+ // If outPort is network link, add VLAN tag
if (topologyManager.isInternal(outPort)) {
log.debug("outPort {}/{} is internal uplink port",
currNode, outPort);
currNode, outPort);
}
- if ((!inPort.getType().equals(
- NodeConnectorIDType.ALL))
- && (topologyManager.isInternal(outPort))) {
+ if ((!inPort.getType().equals(NodeConnectorIDType.ALL))
+ && (topologyManager.isInternal(outPort))) {
Node nextNode = link.getHeadNodeConnector()
- .getNode();
+ .getNode();
// TODO: Replace this with SAL equivalent
//short tag = container.getTag((Long)nextNode.getNodeID());
short tag = 0;
actions.add(new Output(outPort));
}
if (!inPort.getType().equals(NodeConnectorIDType.ALL)) {
- /*
- * include input port in the flow match field
- */
+ // include input port in the flow match field
match.setField(MatchType.IN_PORT, inPort);
if (topologyManager.isInternal(inPort)) {
log.debug("inPort {}/{} is host facing port", currNode,
inPort);
}
- /*
- * for incoming network link; if the VLAN tag is defined, include it for incoming flow matching
- */
+
+ // for incoming network link; if the VLAN tag is defined, include it for incoming flow matching
if (topologyManager.isInternal(inPort)) {
// TODO: Replace this with SAL equivalent
//short tag = container.getTag((Long)currNode.getNodeID());
+ currNode + "]";
FlowEntry po = new FlowEntry(policyName, flowName, flow, currNode);
- // Now save the rule in the DB rule,
- // so on updates from topology we can
- // selectively
+ /* Now save the rule in the DB rule, so on updates from topology we
+ * can selectively */
pos.put(inPort, po);
this.rulesDB.put(key, pos);
if (!inPort.getType().equals(NodeConnectorIDType.ALL)) {
if (host == null) {
return null;
}
+
+ //TODO: race condition! unset* functions can make these null.
if (this.routing == null) {
return null;
}
HashMap<NodeConnector, FlowEntry> pos;
FlowEntry po;
+ // for all nodes in the system
for (Node node : nodes) {
if (node.equals(rootNode)) {
// We skip it because for the node with host attached
List<Edge> links;
Path res = this.routing.getRoute(node, rootNode);
if ((res == null) || ((links = res.getEdges()) == null)) {
- // Still the path that connect node to rootNode
- // doesn't exists
+ // No route from node to rootNode can be found, back out any
+ // existing forwarding rules if they exist.
log.debug("NO Route/Path between SW[{}] --> SW[{}] cleaning " +
"potentially existing entries", node, rootNode);
key = new HostNodePair(host, node);
for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
po = e.getValue();
if (po != null) {
- //Uninstall the policy
+ // uninstall any existing rules we put in the
+ // ForwardingRulesManager
this.frm.uninstallFlowEntry(po);
}
}
}
log.debug("Route between SW[{}] --> SW[{}]", node, rootNode);
- Integer curr;
Node currNode = node;
key = new HostNodePair(host, currNode);
- Edge link;
- for (curr = 0; curr < links.size(); curr++) {
- link = links.get(curr);
+
+ // for each link in the route from here to there
+ for (Edge link : links) {
if (link == null) {
log.error("Could not retrieve the Link");
+ // TODO: should we keep going?
continue;
}
log.debug(link.toString());
// Index all the switches to be programmed
- // switchesToProgram.add(currNode);
- Set<NodeConnector> ports = null;
- ports = switchManager.getUpNodeConnectors(currNode);
- updatePerHostRuleInSW(host, currNode, rootNode, link, key,
- ports);
+ updatePerHostRuleInSW(host, currNode, rootNode, link, key);
if ((this.rulesDB.get(key)) != null) {
- /*
- * Calling updatePerHostRuleInSW() doesn't guarantee that rules will be
- * added in currNode (e.g, there is only one link from currNode to rootNode
- * This check makes sure that there are some rules in the rulesDB for the
- * given key prior to adding switch to switchesToProgram
+ /* Calling updatePerHostRuleInSW() doesn't guarantee that
+ * rules will be added in currNode (e.g, there is only one
+ * link from currNode to rootNode This check makes sure that
+ * there are some rules in the rulesDB for the given key
+ * prior to adding switch to switchesToProgram
*/
switchesToProgram.add(currNode);
}
// multiple hosts attached to it but not yet connected to the
// rest of the world
switchesToProgram.add(rootNode);
- Set<NodeConnector> ports = switchManager
- .getUpNodeConnectors(rootNode);
- updatePerHostRuleInSW(host, rootNode, rootNode, null, new HostNodePair(
- host, rootNode), ports);
+ updatePerHostRuleInSW(host, rootNode, rootNode, null,
+ new HostNodePair(host, rootNode));
// log.debug("Getting out at the end!");
return switchesToProgram;
/**
* Calculate the per-Host rules to be installed in the rulesDB
- * from a specific switch when a host facing port comes up.
+ * from a specific switch when a host facing port comes up.
* These rules will later on be installed in HW. This routine
* will implicitly calculate the shortest path from the switch
* where the port has come up to the switch where host is ,
HostNodePair key;
Map<NodeConnector, FlowEntry> pos;
FlowEntry po;
- Set<NodeConnector> ports = new HashSet<NodeConnector>();
- ports.add(swport);
List<Edge> links;
Path res = this.routing.getRoute(node, rootNode);
if ((res == null) || ((links = res.getEdges()) == null)) {
- // Still the path that connect node to rootNode
- // doesn't exists
+ // the routing service doesn't know how to get there from here
log.debug("NO Route/Path between SW[{}] --> SW[{}] cleaning " +
"potentially existing entries", node, rootNode);
key = new HostNodePair(host, node);
// Index all the switches to be programmed
switchesToProgram.add(currNode);
- updatePerHostRuleInSW(host, currNode, rootNode, link, key, ports);
+ updatePerHostRuleInSW(host, currNode, rootNode, link, key);
break; // come out of the loop for port up case, interested only in programming one switch
}
public void recalculateDone() {
if (this.hostTracker == null) {
//Not yet ready to process all the updates
+ //TODO: we should make sure that this call is executed eventually
return;
}
Set<HostNodeConnector> allHosts = this.hostTracker.getAllHosts();
}
}
- /*
+ /**
* A Host facing port has come up in a container. Add rules on the switch where this
* port has come up for all the known hosts to the controller.
* @param swId switch id of the port where port came up