3 * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
5 * This program and the accompanying materials are made available under the
6 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
7 * and is available at http://www.eclipse.org/legal/epl-v10.html
10 package org.opendaylight.controller.samples.simpleforwarding.internal;
12 import java.util.ArrayList;
13 import java.util.EnumSet;
14 import java.util.HashMap;
15 import java.util.HashSet;
16 import java.util.Iterator;
17 import java.util.LinkedList;
18 import java.util.List;
21 import java.util.concurrent.ConcurrentMap;
23 import org.opendaylight.controller.clustering.services.CacheConfigException;
24 import org.opendaylight.controller.clustering.services.CacheExistException;
25 import org.opendaylight.controller.clustering.services.IClusterContainerServices;
26 import org.opendaylight.controller.clustering.services.IClusterServices;
27 import org.opendaylight.controller.forwardingrulesmanager.FlowEntry;
28 import org.opendaylight.controller.forwardingrulesmanager.IForwardingRulesManager;
29 import org.opendaylight.controller.hosttracker.IfIptoHost;
30 import org.opendaylight.controller.hosttracker.IfNewHostNotify;
31 import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
32 import org.opendaylight.controller.sal.action.Action;
33 import org.opendaylight.controller.sal.action.Output;
34 import org.opendaylight.controller.sal.action.PopVlan;
35 import org.opendaylight.controller.sal.action.SetDlDst;
36 import org.opendaylight.controller.sal.action.SetVlanId;
37 import org.opendaylight.controller.sal.core.Edge;
38 import org.opendaylight.controller.sal.core.Node;
39 import org.opendaylight.controller.sal.core.NodeConnector;
40 import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
41 import org.opendaylight.controller.sal.core.Path;
42 import org.opendaylight.controller.sal.core.Property;
43 import org.opendaylight.controller.sal.core.State;
44 import org.opendaylight.controller.sal.core.UpdateType;
45 import org.opendaylight.controller.sal.flowprogrammer.Flow;
46 import org.opendaylight.controller.sal.match.Match;
47 import org.opendaylight.controller.sal.match.MatchType;
48 import org.opendaylight.controller.sal.routing.IListenRoutingUpdates;
49 import org.opendaylight.controller.sal.routing.IRouting;
50 import org.opendaylight.controller.sal.utils.EtherTypes;
51 import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
52 import org.opendaylight.controller.sal.utils.Status;
53 import org.opendaylight.controller.samples.simpleforwarding.HostNodePair;
54 import org.opendaylight.controller.switchmanager.IInventoryListener;
55 import org.opendaylight.controller.switchmanager.ISwitchManager;
56 import org.opendaylight.controller.topologymanager.ITopologyManager;
57 import org.slf4j.Logger;
58 import org.slf4j.LoggerFactory;
61 * This class implements basic L3 forwarding within the managed devices.
62 * Forwarding is only done within configured subnets.</br>
64 * The basic flow is that the module listens for new hosts from the
65 * {@link org.opendaylight.controller.hosttracker.IfIptoHost HostTracker}
66 * service and on discovering a new host it first calls
67 * <tt>preparePerHostRules()</tt> to create a set of new rules that must be
68 * installed in the network. This is done by repeatedly calling
69 * <tt>updatePerHostRuleInSW()</tt> for each switch in the network. Then it
70 * installs those rules using <tt>installPerHostRules()</tt>.
72 public class SimpleForwardingImpl implements IfNewHostNotify,
73 IListenRoutingUpdates, IInventoryListener {
74 private static Logger log = LoggerFactory
75 .getLogger(SimpleForwardingImpl.class);
76 private static short DEFAULT_IPSWITCH_PRIORITY = 1;
77 private static String FORWARDING_RULES_CACHE_NAME = "forwarding.ipswitch.rules";
78 private IfIptoHost hostTracker;
79 private IForwardingRulesManager frm;
80 private ITopologyManager topologyManager;
81 private IRouting routing;
84 * The set of all forwarding rules: (host) -> (switch -> flowmod). Note that
85 * the host includes an attachment point and that while the switch appears
86 * to be a switch's port, in actuality it is a special port which just
87 * represents the switch.
89 private ConcurrentMap<HostNodePair, HashMap<NodeConnector, FlowEntry>> rulesDB;
90 private Map<Node, List<FlowEntry>> tobePrunedPos = new HashMap<Node, List<FlowEntry>>();
91 private IClusterContainerServices clusterContainerService = null;
92 private ISwitchManager switchManager;
95 * Return codes from the programming of the perHost rules in HW
97 public enum RulesProgrammingReturnCode {
98 SUCCESS, FAILED_FEW_SWITCHES, FAILED_ALL_SWITCHES, FAILED_WRONG_PARAMS
101 public void setRouting(IRouting routing) {
102 this.routing = routing;
105 public void unsetRouting(IRouting routing) {
106 if (this.routing == routing) {
111 public ITopologyManager getTopologyManager() {
112 return topologyManager;
115 public void setTopologyManager(ITopologyManager topologyManager) {
116 log.debug("Setting topologyManager");
117 this.topologyManager = topologyManager;
120 public void unsetTopologyManager(ITopologyManager topologyManager) {
121 if (this.topologyManager == topologyManager) {
122 this.topologyManager = null;
126 public void setHostTracker(IfIptoHost hostTracker) {
127 log.debug("Setting HostTracker");
128 this.hostTracker = hostTracker;
131 public void setForwardingRulesManager(
132 IForwardingRulesManager forwardingRulesManager) {
133 log.debug("Setting ForwardingRulesManager");
134 this.frm = forwardingRulesManager;
137 public void unsetHostTracker(IfIptoHost hostTracker) {
138 if (this.hostTracker == hostTracker) {
139 this.hostTracker = null;
143 public void unsetForwardingRulesManager(
144 IForwardingRulesManager forwardingRulesManager) {
145 if (this.frm == forwardingRulesManager) {
151 * Function called when the bundle gets activated
154 public void startUp() {
160 * Function called when the bundle gets stopped
163 public void shutDown() {
164 log.debug("Destroy all the host Rules given we are shutting down");
165 uninstallPerHostRules();
169 @SuppressWarnings("deprecation")
170 private void allocateCaches() {
171 if (this.clusterContainerService == null) {
172 log.info("un-initialized clusterContainerService, can't create cache");
177 clusterContainerService.createCache(FORWARDING_RULES_CACHE_NAME,
178 EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
179 } catch (CacheExistException cee) {
180 log.error("\nCache already exists - destroy and recreate if needed");
181 } catch (CacheConfigException cce) {
182 log.error("\nCache configuration invalid - check cache mode");
186 @SuppressWarnings({ "unchecked", "deprecation" })
187 private void retrieveCaches() {
188 if (this.clusterContainerService == null) {
189 log.info("un-initialized clusterContainerService, can't retrieve cache");
193 rulesDB = (ConcurrentMap<HostNodePair, HashMap<NodeConnector, FlowEntry>>) clusterContainerService
194 .getCache(FORWARDING_RULES_CACHE_NAME);
195 if (rulesDB == null) {
196 log.error("\nFailed to get rulesDB handle");
200 @SuppressWarnings("deprecation")
201 private void destroyCaches() {
202 if (this.clusterContainerService == null) {
203 log.info("un-initialized clusterContainerService, can't destroy cache");
207 clusterContainerService.destroyCache(FORWARDING_RULES_CACHE_NAME);
211 * Populates <tt>rulesDB</tt> with rules specifying how to reach
212 * <tt>host</tt> from <tt>currNode</tt> assuming that:
214 * <li><tt>host</tt> is attached to <tt>rootNode</tt>
215 * <li><tt>link</tt> is the next part of the path to reach <tt>rootNode</tt>
216 * from <tt>currNode</tt>
217 * <li><tt>rulesDB.get(key)</tt> represents the list of rules stored about
218 * <tt>host</tt> at <tt>currNode</tt>
222 * The host to be reached.
224 * The current node being processed.
226 * The node to be reached. Really, the switch which host is
229 * The link to follow from curNode to get to rootNode
231 * The key to store computed rules at in the rulesDB. For now,
232 * this is a {@link HostNodePair} of host and currNode.
234 private void updatePerHostRuleInSW(HostNodeConnector host, Node currNode,
235 Node rootNode, Edge link, HostNodePair key) {
237 // only the link parameter is optional
238 if (host == null || key == null || currNode == null || rootNode == null) {
242 Set<NodeConnector> ports = new HashSet<NodeConnector>();
243 // add a special port of type ALL and port 0 to represent the node
244 // without specifying a port on that node
245 ports.add(NodeConnectorCreator.createNodeConnector(
246 NodeConnectorIDType.ALL, NodeConnector.SPECIALNODECONNECTORID,
249 HashMap<NodeConnector, FlowEntry> pos = this.rulesDB.get(key);
251 pos = new HashMap<NodeConnector, FlowEntry>();
254 for (NodeConnector inPort : ports) {
255 // skip the port connected to the target host
256 if (currNode.equals(rootNode)
257 && (host.getnodeConnector().equals(inPort))) {
261 // remove the current rule, if any
262 FlowEntry removed_po = pos.remove(inPort);
263 Match match = new Match();
264 List<Action> actions = new ArrayList<Action>();
266 // IP destination based forwarding on /32 entries only!
267 match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
268 match.setField(MatchType.NW_DST, host.getNetworkAddress());
270 /* Action for the policy is to forward to a port except on the
271 * switch where the host sits, which is to rewrite also the MAC
272 * and to forward on the Host port */
273 NodeConnector outPort = null;
275 if (currNode.equals(rootNode)) {
276 /* If we're at the root node, then rewrite the DL addr and
277 * possibly pop the VLAN tag. This allows for MAC rewriting
278 * in the core of the network assuming we can uniquely ID
279 * packets based on IP address. */
281 outPort = host.getnodeConnector();
282 if (inPort.equals(outPort)) {
283 // TODO: isn't this code skipped already by the above continue?
284 // skip the host port
287 actions.add(new SetDlDst(host.getDataLayerAddressBytes()));
289 if (!inPort.getType().equals(NodeConnectorIDType.ALL)) {
290 // Container mode: at the destination switch, we need to strip out the tag (VLAN)
291 actions.add(new PopVlan());
294 // currNode is NOT the rootNode, find the next hop and create a rule
296 outPort = link.getTailNodeConnector();
297 if (inPort.equals(outPort)) {
298 // skip the outgoing port
302 // If outPort is network link, add VLAN tag
303 if (topologyManager.isInternal(outPort)) {
304 log.debug("outPort {}/{} is internal uplink port",
307 log.debug("outPort {}/{} is host facing port",
311 if ((!inPort.getType().equals(NodeConnectorIDType.ALL))
312 && (topologyManager.isInternal(outPort))) {
313 Node nextNode = link.getHeadNodeConnector()
315 // TODO: Replace this with SAL equivalent
316 //short tag = container.getTag((Long)nextNode.getNodeID());
319 log.debug("adding SET_VLAN {} for traffic " +
320 "leaving {}/{} toward switch {}",
321 new Object[] { tag, currNode, outPort,
323 actions.add(new SetVlanId(tag));
325 log.debug("No tag assigned to switch {}", nextNode);
330 if (outPort != null) {
331 actions.add(new Output(outPort));
333 if (!inPort.getType().equals(NodeConnectorIDType.ALL)) {
334 // include input port in the flow match field
335 match.setField(MatchType.IN_PORT, inPort);
337 if (topologyManager.isInternal(inPort)) {
338 log.debug("inPort {}/{} is internal uplink port", currNode,
341 log.debug("inPort {}/{} is host facing port", currNode,
345 // for incoming network link; if the VLAN tag is defined, include it for incoming flow matching
346 if (topologyManager.isInternal(inPort)) {
347 // TODO: Replace this with SAL equivalent
348 //short tag = container.getTag((Long)currNode.getNodeID());
351 log.debug("adding MATCH VLAN {} for traffic entering" +
353 new Object[] {tag, currNode, inPort});
354 match.setField(MatchType.DL_VLAN, tag);
356 log.debug("No tag assigned to switch {}", currNode);
360 // Make sure the priority for IP switch entries is
361 // set to a level just above default drop entries
362 Flow flow = new Flow(match, actions);
363 flow.setIdleTimeout((short) 0);
364 flow.setHardTimeout((short) 0);
365 flow.setPriority(DEFAULT_IPSWITCH_PRIORITY);
367 String policyName = host.getNetworkAddress().getHostAddress()
369 String flowName = "["
370 + (!inPort.getType().equals(NodeConnectorIDType.ALL) ?
371 (inPort.getID()).toString()
373 + host.getNetworkAddress().getHostAddress() + "/32 on N "
375 FlowEntry po = new FlowEntry(policyName, flowName, flow, currNode);
377 /* Now save the rule in the DB rule, so on updates from topology we
380 this.rulesDB.put(key, pos);
381 if (!inPort.getType().equals(NodeConnectorIDType.ALL)) {
382 log.debug("Adding Match(inPort = {} , DIP = {})" +
383 " Action(outPort= {}) to node {}",
384 new Object[] { inPort,
385 host.getNetworkAddress().getHostAddress(),
387 if ((removed_po != null)
388 && (!po.getFlow().getMatch().equals(
389 removed_po.getFlow().getMatch()))) {
390 log.debug("Old Flow match: {}, New Flow match: {}",
391 removed_po.getFlow().getMatch(), po.getFlow()
393 addTobePrunedPolicy(currNode, removed_po, po);
397 log.debug("Adding policyMatch(DIP = {}) Action(outPort= {}) " +
398 "to node {}", new Object[] {
399 host.getNetworkAddress().getHostAddress(), outPort,
406 * Calculate the per-Host rules to be installed in the rulesDB,
407 * and that will later on be installed in HW, this routine will
408 * implicitly calculate the shortest path tree among the switch
409 * to which the host is attached and all the other switches in the
410 * network and will automatically create all the rules that allow
411 * a /32 destination IP based forwarding, as in traditional IP
414 * @param host Host for which we are going to prepare the rules in the rulesDB
416 * @return A set of switches touched by the calculation
418 private Set<Node> preparePerHostRules(HostNodeConnector host) {
423 //TODO: race condition! unset* functions can make these null.
424 if (this.routing == null) {
427 if (this.switchManager == null) {
430 if (this.rulesDB == null) {
434 Node rootNode = host.getnodeconnectorNode();
435 Set<Node> nodes = this.switchManager.getNodes();
436 Set<Node> switchesToProgram = new HashSet<Node>();
438 HashMap<NodeConnector, FlowEntry> pos;
441 // for all nodes in the system
442 for (Node node : nodes) {
443 if (node.equals(rootNode)) {
444 // We skip it because for the node with host attached
445 // we will process in every case even if there are no
450 Path res = this.routing.getRoute(node, rootNode);
451 if ((res == null) || ((links = res.getEdges()) == null)) {
452 // No route from node to rootNode can be found, back out any
453 // existing forwarding rules if they exist.
454 log.debug("NO Route/Path between SW[{}] --> SW[{}] cleaning " +
455 "potentially existing entries", node, rootNode);
456 key = new HostNodePair(host, node);
457 pos = this.rulesDB.get(key);
459 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
462 // uninstall any existing rules we put in the
463 // ForwardingRulesManager
464 this.frm.uninstallFlowEntry(po);
467 this.rulesDB.remove(key);
472 log.debug("Route between SW[{}] --> SW[{}]", node, rootNode);
473 Node currNode = node;
474 key = new HostNodePair(host, currNode);
476 // for each link in the route from here to there
477 for (Edge link : links) {
479 log.error("Could not retrieve the Link");
480 // TODO: should we keep going?
484 log.debug(link.toString());
486 // Index all the switches to be programmed
487 updatePerHostRuleInSW(host, currNode, rootNode, link, key);
488 if ((this.rulesDB.get(key)) != null) {
489 /* Calling updatePerHostRuleInSW() doesn't guarantee that
490 * rules will be added in currNode (e.g, there is only one
491 * link from currNode to rootNode This check makes sure that
492 * there are some rules in the rulesDB for the given key
493 * prior to adding switch to switchesToProgram
495 switchesToProgram.add(currNode);
497 currNode = link.getHeadNodeConnector().getNode();
498 key = new HostNodePair(host, currNode);
502 // This rule will be added no matter if any topology is built
503 // or no, it serve as a way to handle the case of a node with
504 // multiple hosts attached to it but not yet connected to the
506 switchesToProgram.add(rootNode);
507 updatePerHostRuleInSW(host, rootNode, rootNode, null,
508 new HostNodePair(host, rootNode));
510 // log.debug("Getting out at the end!");
511 return switchesToProgram;
515 * Calculate the per-Host rules to be installed in the rulesDB
516 * from a specific switch when a host facing port comes up.
517 * These rules will later on be installed in HW. This routine
518 * will implicitly calculate the shortest path from the switch
519 * where the port has come up to the switch where host is ,
520 * attached and will automatically create all the rules that allow
521 * a /32 destination IP based forwarding, as in traditional IP
524 * @param host Host for which we are going to prepare the rules in the rulesDB
525 * @param swId Switch ID where the port has come up
527 * @return A set of switches touched by the calculation
529 private Set<Node> preparePerHostPerSwitchRules(HostNodeConnector host,
530 Node node, NodeConnector swport) {
531 if ((host == null) || (node == null)) {
534 if (this.routing == null) {
537 if (this.switchManager == null) {
540 if (this.rulesDB == null) {
544 Node rootNode = host.getnodeconnectorNode();
545 Set<Node> switchesToProgram = new HashSet<Node>();
547 Map<NodeConnector, FlowEntry> pos;
551 Path res = this.routing.getRoute(node, rootNode);
552 if ((res == null) || ((links = res.getEdges()) == null)) {
553 // the routing service doesn't know how to get there from here
554 log.debug("NO Route/Path between SW[{}] --> SW[{}] cleaning " +
555 "potentially existing entries", node, rootNode);
556 key = new HostNodePair(host, node);
557 pos = this.rulesDB.get(key);
559 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
562 //Uninstall the policy
563 this.frm.uninstallFlowEntry(po);
566 this.rulesDB.remove(key);
571 log.debug("Route between SW[{}] --> SW[{}]", node, rootNode);
573 Node currNode = node;
574 key = new HostNodePair(host, currNode);
576 for (curr = 0; curr < links.size(); curr++) {
577 link = links.get(curr);
579 log.error("Could not retrieve the Link");
583 log.debug("Link [{}/{}] --> [{}/{}]", new Object[] {
584 currNode, link.getHeadNodeConnector(),
585 link.getHeadNodeConnector().getNode(),
586 link.getTailNodeConnector()});
588 // Index all the switches to be programmed
589 switchesToProgram.add(currNode);
590 updatePerHostRuleInSW(host, currNode, rootNode, link, key);
591 break; // come out of the loop for port up case, interested only in programming one switch
594 // This rule will be added no matter if any topology is built
595 // or no, it serve as a way to handle the case of a node with
596 // multiple hosts attached to it but not yet connected to the
598 // switchesToProgram.add(rootNode);
599 //updatePerHostRuleInSW(host, rootNode,
601 // new HostNodePair(host, rootNode),ports);
603 // log.debug("Getting out at the end!");
604 return switchesToProgram;
608 * Routine that fetch the per-Host rules from the rulesDB and
609 * install in HW, the one having the same match rules will be
610 * overwritten silently.
612 * @param host host for which we want to install in HW the per-Host rules
613 * @param switchesToProgram list of switches to be programmed in
614 * HW, usually are them all, but better to be explicit, that list
615 * may change with time based on new switch addition/removal
617 * @return a return code that convey the programming status of the HW
619 private RulesProgrammingReturnCode installPerHostRules(
620 HostNodeConnector host, Set<Node> switchesToProgram) {
621 RulesProgrammingReturnCode retCode = RulesProgrammingReturnCode.SUCCESS;
622 if (host == null || switchesToProgram == null) {
623 return RulesProgrammingReturnCode.FAILED_WRONG_PARAMS;
625 Map<NodeConnector, FlowEntry> pos;
627 // Now program every single switch
628 log.debug("Inside installPerHostRules");
629 for (Node swId : switchesToProgram) {
630 HostNodePair key = new HostNodePair(host, swId);
631 pos = this.rulesDB.get(key);
635 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
638 // Populate the Policy field now
639 Status poStatus = this.frm.installFlowEntry(po);
640 if (!poStatus.isSuccess()) {
641 log.error("Failed to install policy: "
642 + po.getGroupName() + " ("
643 + poStatus.getDescription() + ")");
645 retCode = RulesProgrammingReturnCode.FAILED_FEW_SWITCHES;
646 // Remove the entry from the DB, it was not installed!
647 this.rulesDB.remove(key);
649 log.debug("Successfully installed policy "
650 + po.toString() + " on switch " + swId);
653 log.error("Cannot find a policy for SW:({}) Host: ({})",
655 /* // Now dump every single rule */
656 /* for (HostNodePair dumpkey : this.rulesDB.keySet()) { */
657 /* po = this.rulesDB.get(dumpkey); */
658 /* log.debug("Dumping entry H{" + dumpkey.getHost() + "} S{" + dumpkey.getSwitchId() + "} = {" + (po == null ? "null policy" : po)); */
663 log.debug("Leaving installPerHostRules");
668 * Cleanup all the host rules for a given host
670 * @param host Host for which the host rules need to be cleaned
671 * up, the host could be null in that case it match all the hosts
673 * @return a return code that convey the programming status of the HW
675 private RulesProgrammingReturnCode uninstallPerHostRules(
676 HostNodeConnector host) {
677 RulesProgrammingReturnCode retCode = RulesProgrammingReturnCode.SUCCESS;
678 Map<NodeConnector, FlowEntry> pos;
680 // Now program every single switch
681 for (HostNodePair key : this.rulesDB.keySet()) {
682 if (host == null || key.getHost().equals(host)) {
683 pos = this.rulesDB.get(key);
684 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
687 // Uninstall the policy
688 this.frm.uninstallFlowEntry(po);
691 this.rulesDB.remove(key);
698 * Cleanup all the host rules for a given node, triggered when the
699 * switch disconnects, so there is no reason for Hw cleanup
700 * because it's disconnected anyhow
701 * TBD - Revisit above stmt in light of CSCus88743
702 * @param targetNode Node for which we want to do cleanup
705 private void uninstallPerNodeRules(Node targetNode) {
706 //RulesProgrammingReturnCode retCode = RulesProgrammingReturnCode.SUCCESS;
707 Map<NodeConnector, FlowEntry> pos;
710 // Now program every single switch
711 for (HostNodePair key : this.rulesDB.keySet()) {
712 Node node = key.getNode();
713 if (targetNode == null || node.equals(targetNode)) {
714 log.debug("Work on {} host {}", node, key.getHost());
715 pos = this.rulesDB.get(key);
716 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
719 // Uninstall the policy
720 this.frm.uninstallFlowEntry(po);
723 log.debug("Remove {}", key);
724 this.rulesDB.remove(key);
730 * Cleanup all the host rules currently present in the rulesDB
732 * @return a return code that convey the programming status of the HW
734 private RulesProgrammingReturnCode uninstallPerHostRules() {
735 return uninstallPerHostRules(null);
739 public void recalculateDone() {
740 if (this.hostTracker == null) {
741 //Not yet ready to process all the updates
742 //TODO: we should make sure that this call is executed eventually
745 Set<HostNodeConnector> allHosts = this.hostTracker.getAllHosts();
746 for (HostNodeConnector host : allHosts) {
747 Set<Node> switches = preparePerHostRules(host);
748 if (switches != null) {
749 // This will refresh existing rules, by overwriting
751 installPerHostRules(host, switches);
752 pruneExcessRules(switches);
757 void addTobePrunedPolicy(Node swId, FlowEntry po, FlowEntry new_po) {
758 List<FlowEntry> pl = tobePrunedPos.get(swId);
760 pl = new LinkedList<FlowEntry>();
761 tobePrunedPos.put(swId, pl);
764 log.debug("Adding Pruned Policy for SwId: {}", swId);
765 log.debug("Old Policy: {}", po);
766 log.debug("New Policy: {}", new_po);
769 private void pruneExcessRules(Set<Node> switches) {
770 for (Node swId : switches) {
771 List<FlowEntry> pl = tobePrunedPos.get(swId);
775 "Policies for Switch: {} in the list to be deleted: {}",
777 Iterator<FlowEntry> plIter = pl.iterator();
778 //for (Policy po: pl) {
779 while (plIter.hasNext()) {
780 FlowEntry po = plIter.next();
781 log.error("Removing Policy, Switch: {} Policy: {}", swId,
783 this.frm.uninstallFlowEntry(po);
787 // tobePrunedPos.remove(swId);
792 * A Host facing port has come up in a container. Add rules on the switch where this
793 * port has come up for all the known hosts to the controller.
794 * @param swId switch id of the port where port came up
795 * @param swPort port which came up
797 private void updateRulesforHIFup(Node node, NodeConnector swPort) {
798 if (this.hostTracker == null) {
799 //Not yet ready to process all the updates
802 log.debug("Host Facing Port in a container came up, install the rules for all hosts from this port !");
803 Set<HostNodeConnector> allHosts = this.hostTracker.getAllHosts();
804 for (HostNodeConnector host : allHosts) {
805 if (node.equals(host.getnodeconnectorNode())
806 && swPort.equals(host.getnodeConnector())) {
808 * This host resides behind the same switch and port for which a port up
809 * message is received. Ideally this should not happen, but if it does,
810 * don't program any rules for this host
814 Set<Node> switches = preparePerHostPerSwitchRules(host, node,
816 if (switches != null) {
817 // This will refresh existing rules, by overwriting
819 installPerHostRules(host, switches);
826 public void notifyHTClient(HostNodeConnector host) {
830 Set<Node> switches = preparePerHostRules(host);
831 if (switches != null) {
832 installPerHostRules(host, switches);
837 public void notifyHTClientHostRemoved(HostNodeConnector host) {
841 uninstallPerHostRules(host);
845 public void notifyNode(Node node, UpdateType type,
846 Map<String, Property> propMap) {
852 log.debug("Node {} gone, doing a cleanup", node);
853 uninstallPerNodeRules(node);
861 public void notifyNodeConnector(NodeConnector nodeConnector,
862 UpdateType type, Map<String, Property> propMap) {
863 if (nodeConnector == null)
874 State state = (State) propMap.get(State.StatePropName);
875 if ((state != null) && (state.getValue() == State.EDGE_UP)) {
884 handleNodeConnectorStatusUp(nodeConnector);
886 handleNodeConnectorStatusDown(nodeConnector);
890 private void handleNodeConnectorStatusUp(NodeConnector nodeConnector) {
891 if (topologyManager == null) {
892 log.debug("topologyManager is not set yet");
896 if (topologyManager.isInternal(nodeConnector)) {
897 log.debug("{} is not a host facing link", nodeConnector);
901 log.debug("{} is up", nodeConnector);
902 updateRulesforHIFup(nodeConnector.getNode(), nodeConnector);
905 private void handleNodeConnectorStatusDown(NodeConnector nodeConnector) {
906 log.debug("{} is down", nodeConnector);
909 void setClusterContainerService(IClusterContainerServices s) {
910 log.debug("Cluster Service set");
911 this.clusterContainerService = s;
914 void unsetClusterContainerService(IClusterContainerServices s) {
915 if (this.clusterContainerService == s) {
916 log.debug("Cluster Service removed!");
917 this.clusterContainerService = null;
922 * Function called by the dependency manager when all the required
923 * dependencies are satisfied
931 * Function called by the dependency manager when at least one
932 * dependency become unsatisfied or when the component is shutting
933 * down because for example bundle is being stopped.
940 * Function called by dependency manager after "init ()" is called
941 * and after the services provided by the class are registered in
942 * the service registry
949 * Function called by the dependency manager before the services
950 * exported by the component are unregistered, this will be
951 * followed by a "destroy ()" calls
957 public void setSwitchManager(ISwitchManager switchManager) {
958 this.switchManager = switchManager;
961 public void unsetSwitchManager(ISwitchManager switchManager) {
962 if (this.switchManager == switchManager) {
963 this.switchManager = null;