3 * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
5 * This program and the accompanying materials are made available under the
6 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
7 * and is available at http://www.eclipse.org/legal/epl-v10.html
10 package org.opendaylight.controller.samples.simpleforwarding.internal;
12 import java.util.ArrayList;
13 import java.util.EnumSet;
14 import java.util.HashMap;
15 import java.util.HashSet;
16 import java.util.Iterator;
17 import java.util.LinkedList;
18 import java.util.List;
21 import java.util.concurrent.ConcurrentMap;
23 import org.opendaylight.controller.clustering.services.CacheConfigException;
24 import org.opendaylight.controller.clustering.services.CacheExistException;
25 import org.opendaylight.controller.clustering.services.IClusterContainerServices;
26 import org.opendaylight.controller.clustering.services.IClusterServices;
27 import org.opendaylight.controller.forwardingrulesmanager.FlowEntry;
28 import org.opendaylight.controller.forwardingrulesmanager.IForwardingRulesManager;
29 import org.opendaylight.controller.hosttracker.IfIptoHost;
30 import org.opendaylight.controller.hosttracker.IfNewHostNotify;
31 import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
32 import org.opendaylight.controller.sal.action.Action;
33 import org.opendaylight.controller.sal.action.Output;
34 import org.opendaylight.controller.sal.action.PopVlan;
35 import org.opendaylight.controller.sal.action.SetDlDst;
36 import org.opendaylight.controller.sal.action.SetVlanId;
37 import org.opendaylight.controller.sal.core.Edge;
38 import org.opendaylight.controller.sal.core.Node;
39 import org.opendaylight.controller.sal.core.NodeConnector;
40 import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
41 import org.opendaylight.controller.sal.core.Path;
42 import org.opendaylight.controller.sal.core.Property;
43 import org.opendaylight.controller.sal.core.State;
44 import org.opendaylight.controller.sal.core.UpdateType;
45 import org.opendaylight.controller.sal.flowprogrammer.Flow;
46 import org.opendaylight.controller.sal.match.Match;
47 import org.opendaylight.controller.sal.match.MatchType;
48 import org.opendaylight.controller.sal.routing.IListenRoutingUpdates;
49 import org.opendaylight.controller.sal.routing.IRouting;
50 import org.opendaylight.controller.sal.utils.EtherTypes;
51 import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
52 import org.opendaylight.controller.sal.utils.Status;
53 import org.opendaylight.controller.switchmanager.IInventoryListener;
54 import org.opendaylight.controller.switchmanager.ISwitchManager;
55 import org.opendaylight.controller.topologymanager.ITopologyManager;
56 import org.slf4j.Logger;
57 import org.slf4j.LoggerFactory;
59 public class SimpleForwardingImpl implements IfNewHostNotify,
60 IListenRoutingUpdates, IInventoryListener {
61 private static Logger log = LoggerFactory
62 .getLogger(SimpleForwardingImpl.class);
63 private static short DEFAULT_IPSWITCH_PRIORITY = 1;
64 private IfIptoHost hostTracker;
65 private IForwardingRulesManager frm;
66 private ITopologyManager topologyManager;
67 private IRouting routing;
68 private ConcurrentMap<HostNodePair, HashMap<NodeConnector, FlowEntry>> rulesDB;
69 private Map<Node, List<FlowEntry>> tobePrunedPos = new HashMap<Node, List<FlowEntry>>();
70 private IClusterContainerServices clusterContainerService = null;
71 private ISwitchManager switchManager;
74 * Return codes from the programming of the perHost rules in HW
77 public enum RulesProgrammingReturnCode {
78 SUCCESS, FAILED_FEW_SWITCHES, FAILED_ALL_SWITCHES, FAILED_WRONG_PARAMS
81 public void setRouting(IRouting routing) {
82 this.routing = routing;
85 public void unsetRouting(IRouting routing) {
86 if (this.routing == routing) {
91 public ITopologyManager getTopologyManager() {
92 return topologyManager;
95 public void setTopologyManager(ITopologyManager topologyManager) {
96 log.debug("Setting topologyManager");
97 this.topologyManager = topologyManager;
100 public void unsetTopologyManager(ITopologyManager topologyManager) {
101 if (this.topologyManager == topologyManager) {
102 this.topologyManager = null;
106 public void setHostTracker(IfIptoHost hostTracker) {
107 log.debug("Setting HostTracker");
108 this.hostTracker = hostTracker;
111 public void setForwardingRulesManager(
112 IForwardingRulesManager forwardingRulesManager) {
113 log.debug("Setting ForwardingRulesManager");
114 this.frm = forwardingRulesManager;
117 public void unsetHostTracker(IfIptoHost hostTracker) {
118 if (this.hostTracker == hostTracker) {
119 this.hostTracker = null;
123 public void unsetForwardingRulesManager(
124 IForwardingRulesManager forwardingRulesManager) {
125 if (this.frm == forwardingRulesManager) {
131 * Function called when the bundle gets activated
134 public void startUp() {
140 * Function called when the bundle gets stopped
143 public void shutDown() {
144 log.debug("Destroy all the host Rules given we are shutting down");
145 uninstallPerHostRules();
149 @SuppressWarnings("deprecation")
150 private void allocateCaches() {
151 if (this.clusterContainerService == null) {
152 log.info("un-initialized clusterContainerService, can't create cache");
157 clusterContainerService.createCache("forwarding.ipswitch.rules",
158 EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
159 } catch (CacheExistException cee) {
160 log.error("\nCache already exists - destroy and recreate if needed");
161 } catch (CacheConfigException cce) {
162 log.error("\nCache configuration invalid - check cache mode");
166 @SuppressWarnings({ "unchecked", "deprecation" })
167 private void retrieveCaches() {
168 if (this.clusterContainerService == null) {
169 log.info("un-initialized clusterContainerService, can't retrieve cache");
173 rulesDB = (ConcurrentMap<HostNodePair, HashMap<NodeConnector, FlowEntry>>) clusterContainerService
174 .getCache("forwarding.ipswitch.rules");
175 if (rulesDB == null) {
176 log.error("\nFailed to get rulesDB handle");
180 @SuppressWarnings("deprecation")
181 private void destroyCaches() {
182 if (this.clusterContainerService == null) {
183 log.info("un-initialized clusterContainerService, can't destroy cache");
187 clusterContainerService.destroyCache("forwarding.ipswitch.rules");
190 @SuppressWarnings("unused")
191 private void updatePerHostRuleInSW(HostNodeConnector host, Node currNode,
192 Node rootNode, Edge link, HostNodePair key,
193 Set<NodeConnector> passedPorts) {
195 // link parameter it's optional
196 if (host == null || key == null || currNode == null || rootNode == null) {
199 Set<NodeConnector> ports = passedPorts;
200 // TODO: Replace this with SAL equivalent when available
201 //if (container == null) {
202 ports = new HashSet<NodeConnector>();
203 ports.add(NodeConnectorCreator.createNodeConnector(
204 NodeConnectorIDType.ALL, NodeConnector.SPECIALNODECONNECTORID,
208 HashMap<NodeConnector, FlowEntry> pos = this.rulesDB.get(key);
210 pos = new HashMap<NodeConnector, FlowEntry>();
213 log.debug("Empty port list, nothing to do");
216 for (NodeConnector inPort : ports) {
218 * skip the port connected to the target host
220 if (currNode.equals(rootNode)
221 && (host.getnodeConnector().equals(inPort))) {
224 FlowEntry removed_po = pos.remove(inPort);
225 Match match = new Match();
226 List<Action> actions = new ArrayList<Action>();
227 // IP destination based forwarding
228 //on /32 entries only!
229 match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
230 match.setField(MatchType.NW_DST, host.getNetworkAddress());
232 //Action for the policy if to
233 //forward to a port except on the
234 //switch where the host sits,
235 //which is to rewrite also the MAC
236 //and to forward on the Host port
237 NodeConnector outPort = null;
239 if (currNode.equals(rootNode)) {
240 outPort = host.getnodeConnector();
241 if (inPort.equals(outPort)) {
247 actions.add(new SetDlDst(host.getDataLayerAddressBytes()));
249 if (!inPort.getType().equals(
250 NodeConnectorIDType.ALL)) {
252 * Container mode: at the destination switch, we need to strip out the tag (VLAN)
254 actions.add(new PopVlan());
258 * currNode is NOT the rootNode
261 outPort = link.getTailNodeConnector();
262 if (inPort.equals(outPort)) {
264 * skip the outgoing port
269 * If outPort is network link, add VLAN tag
271 if (topologyManager.isInternal(outPort)) {
272 log.debug("outPort {}/{} is internal uplink port",
275 log.debug("outPort {}/{} is host facing port",
279 if ((!inPort.getType().equals(
280 NodeConnectorIDType.ALL))
281 && (topologyManager.isInternal(outPort))) {
282 Node nextNode = link.getHeadNodeConnector()
284 // TODO: Replace this with SAL equivalent
285 //short tag = container.getTag((Long)nextNode.getNodeID());
288 log.debug("adding SET_VLAN {} for traffic " +
289 "leaving {}/{} toward switch {}",
290 new Object[] { tag, currNode, outPort,
292 actions.add(new SetVlanId(tag));
294 log.debug("No tag assigned to switch {}", nextNode);
299 if (outPort != null) {
300 actions.add(new Output(outPort));
302 if (!inPort.getType().equals(NodeConnectorIDType.ALL)) {
304 * include input port in the flow match field
306 match.setField(MatchType.IN_PORT, inPort);
308 if (topologyManager.isInternal(inPort)) {
309 log.debug("inPort {}/{} is internal uplink port", currNode,
312 log.debug("inPort {}/{} is host facing port", currNode,
316 * for incoming network link; if the VLAN tag is defined, include it for incoming flow matching
318 if (topologyManager.isInternal(inPort)) {
319 // TODO: Replace this with SAL equivalent
320 //short tag = container.getTag((Long)currNode.getNodeID());
323 log.debug("adding MATCH VLAN {} for traffic entering" +
325 new Object[] {tag, currNode, inPort});
326 match.setField(MatchType.DL_VLAN, tag);
328 log.debug("No tag assigned to switch {}", currNode);
332 // Make sure the priority for IP switch entries is
333 // set to a level just above default drop entries
334 Flow flow = new Flow(match, actions);
335 flow.setIdleTimeout((short) 0);
336 flow.setHardTimeout((short) 0);
337 flow.setPriority(DEFAULT_IPSWITCH_PRIORITY);
339 String policyName = host.getNetworkAddress().getHostAddress()
341 String flowName = "["
342 + (!inPort.getType().equals(NodeConnectorIDType.ALL) ?
343 (inPort.getID()).toString()
345 + host.getNetworkAddress().getHostAddress() + "/32 on N "
347 FlowEntry po = new FlowEntry(policyName, flowName, flow, currNode);
349 // Now save the rule in the DB rule,
350 // so on updates from topology we can
353 this.rulesDB.put(key, pos);
354 if (!inPort.getType().equals(NodeConnectorIDType.ALL)) {
355 log.debug("Adding Match(inPort = {} , DIP = {})" +
356 " Action(outPort= {}) to node {}",
357 new Object[] { inPort,
358 host.getNetworkAddress().getHostAddress(),
360 if ((removed_po != null)
361 && (!po.getFlow().getMatch().equals(
362 removed_po.getFlow().getMatch()))) {
363 log.debug("Old Flow match: {}, New Flow match: {}",
364 removed_po.getFlow().getMatch(), po.getFlow()
366 addTobePrunedPolicy(currNode, removed_po, po);
370 log.debug("Adding policyMatch(DIP = {}) Action(outPort= {}) " +
371 "to node {}", new Object[] {
372 host.getNetworkAddress().getHostAddress(), outPort,
379 * Calculate the per-Host rules to be installed in the rulesDB,
380 * and that will later on be installed in HW, this routine will
381 * implicitly calculate the shortest path tree among the switch
382 * to which the host is attached and all the other switches in the
383 * network and will automatically create all the rules that allow
384 * a /32 destination IP based forwarding, as in traditional IP
387 * @param host Host for which we are going to prepare the rules in the rulesDB
389 * @return A set of switches touched by the calculation
391 private Set<Node> preparePerHostRules(HostNodeConnector host) {
395 if (this.routing == null) {
398 if (this.switchManager == null) {
401 if (this.rulesDB == null) {
405 Node rootNode = host.getnodeconnectorNode();
406 Set<Node> nodes = this.switchManager.getNodes();
407 Set<Node> switchesToProgram = new HashSet<Node>();
409 HashMap<NodeConnector, FlowEntry> pos;
412 for (Node node : nodes) {
413 if (node.equals(rootNode)) {
414 // We skip it because for the node with host attached
415 // we will process in every case even if there are no
420 Path res = this.routing.getRoute(node, rootNode);
421 if ((res == null) || ((links = res.getEdges()) == null)) {
422 // Still the path that connect node to rootNode
424 log.debug("NO Route/Path between SW[{}] --> SW[{}] cleaning " +
425 "potentially existing entries", node, rootNode);
426 key = new HostNodePair(host, node);
427 pos = this.rulesDB.get(key);
429 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
432 //Uninstall the policy
433 this.frm.uninstallFlowEntry(po);
436 this.rulesDB.remove(key);
441 log.debug("Route between SW[{}] --> SW[{}]", node, rootNode);
443 Node currNode = node;
444 key = new HostNodePair(host, currNode);
446 for (curr = 0; curr < links.size(); curr++) {
447 link = links.get(curr);
449 log.error("Could not retrieve the Link");
453 log.debug(link.toString());
455 // Index all the switches to be programmed
456 // switchesToProgram.add(currNode);
457 Set<NodeConnector> ports = null;
458 ports = switchManager.getUpNodeConnectors(currNode);
459 updatePerHostRuleInSW(host, currNode, rootNode, link, key,
461 if ((this.rulesDB.get(key)) != null) {
463 * Calling updatePerHostRuleInSW() doesn't guarantee that rules will be
464 * added in currNode (e.g, there is only one link from currNode to rootNode
465 * This check makes sure that there are some rules in the rulesDB for the
466 * given key prior to adding switch to switchesToProgram
468 switchesToProgram.add(currNode);
470 currNode = link.getHeadNodeConnector().getNode();
471 key = new HostNodePair(host, currNode);
475 // This rule will be added no matter if any topology is built
476 // or no, it serve as a way to handle the case of a node with
477 // multiple hosts attached to it but not yet connected to the
479 switchesToProgram.add(rootNode);
480 Set<NodeConnector> ports = switchManager
481 .getUpNodeConnectors(rootNode);
482 updatePerHostRuleInSW(host, rootNode, rootNode, null, new HostNodePair(
483 host, rootNode), ports);
485 // log.debug("Getting out at the end!");
486 return switchesToProgram;
490 * Calculate the per-Host rules to be installed in the rulesDB
491 * from a specific switch when a host facing port comes up.
492 * These rules will later on be installed in HW. This routine
493 * will implicitly calculate the shortest path from the switch
494 * where the port has come up to the switch where host is ,
495 * attached and will automatically create all the rules that allow
496 * a /32 destination IP based forwarding, as in traditional IP
499 * @param host Host for which we are going to prepare the rules in the rulesDB
500 * @param swId Switch ID where the port has come up
502 * @return A set of switches touched by the calculation
504 private Set<Node> preparePerHostPerSwitchRules(HostNodeConnector host,
505 Node node, NodeConnector swport) {
506 if ((host == null) || (node == null)) {
509 if (this.routing == null) {
512 if (this.switchManager == null) {
515 if (this.rulesDB == null) {
519 Node rootNode = host.getnodeconnectorNode();
520 Set<Node> switchesToProgram = new HashSet<Node>();
522 Map<NodeConnector, FlowEntry> pos;
524 Set<NodeConnector> ports = new HashSet<NodeConnector>();
528 Path res = this.routing.getRoute(node, rootNode);
529 if ((res == null) || ((links = res.getEdges()) == null)) {
530 // Still the path that connect node to rootNode
532 log.debug("NO Route/Path between SW[{}] --> SW[{}] cleaning " +
533 "potentially existing entries", node, rootNode);
534 key = new HostNodePair(host, node);
535 pos = this.rulesDB.get(key);
537 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
540 //Uninstall the policy
541 this.frm.uninstallFlowEntry(po);
544 this.rulesDB.remove(key);
549 log.debug("Route between SW[{}] --> SW[{}]", node, rootNode);
551 Node currNode = node;
552 key = new HostNodePair(host, currNode);
554 for (curr = 0; curr < links.size(); curr++) {
555 link = links.get(curr);
557 log.error("Could not retrieve the Link");
561 log.debug("Link [{}/{}] --> [{}/{}]", new Object[] {
562 currNode, link.getHeadNodeConnector(),
563 link.getHeadNodeConnector().getNode(),
564 link.getTailNodeConnector()});
566 // Index all the switches to be programmed
567 switchesToProgram.add(currNode);
568 updatePerHostRuleInSW(host, currNode, rootNode, link, key, ports);
569 break; // come out of the loop for port up case, interested only in programming one switch
572 // This rule will be added no matter if any topology is built
573 // or no, it serve as a way to handle the case of a node with
574 // multiple hosts attached to it but not yet connected to the
576 // switchesToProgram.add(rootNode);
577 //updatePerHostRuleInSW(host, rootNode,
579 // new HostNodePair(host, rootNode),ports);
581 // log.debug("Getting out at the end!");
582 return switchesToProgram;
586 * Routine that fetch the per-Host rules from the rulesDB and
587 * install in HW, the one having the same match rules will be
588 * overwritten silently.
590 * @param host host for which we want to install in HW the per-Host rules
591 * @param switchesToProgram list of switches to be programmed in
592 * HW, usually are them all, but better to be explicit, that list
593 * may change with time based on new switch addition/removal
595 * @return a return code that convey the programming status of the HW
597 private RulesProgrammingReturnCode installPerHostRules(
598 HostNodeConnector host, Set<Node> switchesToProgram) {
599 RulesProgrammingReturnCode retCode = RulesProgrammingReturnCode.SUCCESS;
600 if (host == null || switchesToProgram == null) {
601 return RulesProgrammingReturnCode.FAILED_WRONG_PARAMS;
603 Map<NodeConnector, FlowEntry> pos;
605 // Now program every single switch
606 log.debug("Inside installPerHostRules");
607 for (Node swId : switchesToProgram) {
608 HostNodePair key = new HostNodePair(host, swId);
609 pos = this.rulesDB.get(key);
613 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
616 // Populate the Policy field now
617 Status poStatus = this.frm.installFlowEntry(po);
618 if (!poStatus.isSuccess()) {
619 log.error("Failed to install policy: "
620 + po.getGroupName() + " ("
621 + poStatus.getDescription() + ")");
623 retCode = RulesProgrammingReturnCode.FAILED_FEW_SWITCHES;
624 // Remove the entry from the DB, it was not installed!
625 this.rulesDB.remove(key);
627 log.debug("Successfully installed policy "
628 + po.toString() + " on switch " + swId);
631 log.error("Cannot find a policy for SW:({}) Host: ({})",
633 /* // Now dump every single rule */
634 /* for (HostNodePair dumpkey : this.rulesDB.keySet()) { */
635 /* po = this.rulesDB.get(dumpkey); */
636 /* log.debug("Dumping entry H{" + dumpkey.getHost() + "} S{" + dumpkey.getSwitchId() + "} = {" + (po == null ? "null policy" : po)); */
641 log.debug("Leaving installPerHostRules");
646 * Cleanup all the host rules for a given host
648 * @param host Host for which the host rules need to be cleaned
649 * up, the host could be null in that case it match all the hosts
651 * @return a return code that convey the programming status of the HW
653 private RulesProgrammingReturnCode uninstallPerHostRules(
654 HostNodeConnector host) {
655 RulesProgrammingReturnCode retCode = RulesProgrammingReturnCode.SUCCESS;
656 Map<NodeConnector, FlowEntry> pos;
658 // Now program every single switch
659 for (HostNodePair key : this.rulesDB.keySet()) {
660 if (host == null || key.getHost().equals(host)) {
661 pos = this.rulesDB.get(key);
662 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
665 // Uninstall the policy
666 this.frm.uninstallFlowEntry(po);
669 this.rulesDB.remove(key);
676 * Cleanup all the host rules for a given node, triggered when the
677 * switch disconnects, so there is no reason for Hw cleanup
678 * because it's disconnected anyhow
679 * TBD - Revisit above stmt in light of CSCus88743
680 * @param targetNode Node for which we want to do cleanup
683 private void uninstallPerNodeRules(Node targetNode) {
684 //RulesProgrammingReturnCode retCode = RulesProgrammingReturnCode.SUCCESS;
685 Map<NodeConnector, FlowEntry> pos;
688 // Now program every single switch
689 for (HostNodePair key : this.rulesDB.keySet()) {
690 Node node = key.getNode();
691 if (targetNode == null || node.equals(targetNode)) {
692 log.debug("Work on {} host {}", node, key.getHost());
693 pos = this.rulesDB.get(key);
694 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
697 // Uninstall the policy
698 this.frm.uninstallFlowEntry(po);
701 log.debug("Remove {}", key);
702 this.rulesDB.remove(key);
708 * Cleanup all the host rules currently present in the rulesDB
710 * @return a return code that convey the programming status of the HW
712 private RulesProgrammingReturnCode uninstallPerHostRules() {
713 return uninstallPerHostRules(null);
717 public void recalculateDone() {
718 if (this.hostTracker == null) {
719 //Not yet ready to process all the updates
722 Set<HostNodeConnector> allHosts = this.hostTracker.getAllHosts();
723 for (HostNodeConnector host : allHosts) {
724 Set<Node> switches = preparePerHostRules(host);
725 if (switches != null) {
726 // This will refresh existing rules, by overwriting
728 installPerHostRules(host, switches);
729 pruneExcessRules(switches);
734 void addTobePrunedPolicy(Node swId, FlowEntry po, FlowEntry new_po) {
735 List<FlowEntry> pl = tobePrunedPos.get(swId);
737 pl = new LinkedList<FlowEntry>();
738 tobePrunedPos.put(swId, pl);
741 log.debug("Adding Pruned Policy for SwId: {}", swId);
742 log.debug("Old Policy: {}", po);
743 log.debug("New Policy: {}", new_po);
746 private void pruneExcessRules(Set<Node> switches) {
747 for (Node swId : switches) {
748 List<FlowEntry> pl = tobePrunedPos.get(swId);
752 "Policies for Switch: {} in the list to be deleted: {}",
754 Iterator<FlowEntry> plIter = pl.iterator();
755 //for (Policy po: pl) {
756 while (plIter.hasNext()) {
757 FlowEntry po = plIter.next();
758 log.error("Removing Policy, Switch: {} Policy: {}", swId,
760 this.frm.uninstallFlowEntry(po);
764 // tobePrunedPos.remove(swId);
769 * A Host facing port has come up in a container. Add rules on the switch where this
770 * port has come up for all the known hosts to the controller.
771 * @param swId switch id of the port where port came up
772 * @param swPort port which came up
774 private void updateRulesforHIFup(Node node, NodeConnector swPort) {
775 if (this.hostTracker == null) {
776 //Not yet ready to process all the updates
779 log.debug("Host Facing Port in a container came up, install the rules for all hosts from this port !");
780 Set<HostNodeConnector> allHosts = this.hostTracker.getAllHosts();
781 for (HostNodeConnector host : allHosts) {
782 if (node.equals(host.getnodeconnectorNode())
783 && swPort.equals(host.getnodeConnector())) {
785 * This host resides behind the same switch and port for which a port up
786 * message is received. Ideally this should not happen, but if it does,
787 * don't program any rules for this host
791 Set<Node> switches = preparePerHostPerSwitchRules(host, node,
793 if (switches != null) {
794 // This will refresh existing rules, by overwriting
796 installPerHostRules(host, switches);
803 public void notifyHTClient(HostNodeConnector host) {
807 Set<Node> switches = preparePerHostRules(host);
808 if (switches != null) {
809 installPerHostRules(host, switches);
814 public void notifyHTClientHostRemoved(HostNodeConnector host) {
818 uninstallPerHostRules(host);
822 public void notifyNode(Node node, UpdateType type,
823 Map<String, Property> propMap) {
829 log.debug("Node {} gone, doing a cleanup", node);
830 uninstallPerNodeRules(node);
838 public void notifyNodeConnector(NodeConnector nodeConnector,
839 UpdateType type, Map<String, Property> propMap) {
840 if (nodeConnector == null)
851 State state = (State) propMap.get(State.StatePropName);
852 if ((state != null) && (state.getValue() == State.EDGE_UP)) {
861 handleNodeConnectorStatusUp(nodeConnector);
863 handleNodeConnectorStatusDown(nodeConnector);
867 private void handleNodeConnectorStatusUp(NodeConnector nodeConnector) {
868 if (topologyManager == null) {
869 log.debug("topologyManager is not set yet");
873 if (topologyManager.isInternal(nodeConnector)) {
874 log.debug("{} is not a host facing link", nodeConnector);
878 log.debug("{} is up", nodeConnector);
879 updateRulesforHIFup(nodeConnector.getNode(), nodeConnector);
882 private void handleNodeConnectorStatusDown(NodeConnector nodeConnector) {
883 log.debug("{} is down", nodeConnector);
886 void setClusterContainerService(IClusterContainerServices s) {
887 log.debug("Cluster Service set");
888 this.clusterContainerService = s;
891 void unsetClusterContainerService(IClusterContainerServices s) {
892 if (this.clusterContainerService == s) {
893 log.debug("Cluster Service removed!");
894 this.clusterContainerService = null;
899 * Function called by the dependency manager when all the required
900 * dependencies are satisfied
908 * Function called by the dependency manager when at least one
909 * dependency become unsatisfied or when the component is shutting
910 * down because for example bundle is being stopped.
917 * Function called by dependency manager after "init ()" is called
918 * and after the services provided by the class are registered in
919 * the service registry
926 * Function called by the dependency manager before the services
927 * exported by the component are unregistered, this will be
928 * followed by a "destroy ()" calls
934 public void setSwitchManager(ISwitchManager switchManager) {
935 this.switchManager = switchManager;
938 public void unsetSwitchManager(ISwitchManager switchManager) {
939 if (this.switchManager == switchManager) {
940 this.switchManager = null;