3 * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
5 * This program and the accompanying materials are made available under the
6 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
7 * and is available at http://www.eclipse.org/legal/epl-v10.html
10 package org.opendaylight.controller.samples.simpleforwarding.internal;
12 import java.util.ArrayList;
13 import java.util.EnumSet;
14 import java.util.HashMap;
15 import java.util.HashSet;
16 import java.util.Iterator;
17 import java.util.LinkedList;
18 import java.util.List;
21 import java.util.concurrent.ConcurrentMap;
23 import org.opendaylight.controller.clustering.services.CacheConfigException;
24 import org.opendaylight.controller.clustering.services.CacheExistException;
25 import org.opendaylight.controller.clustering.services.IClusterContainerServices;
26 import org.opendaylight.controller.clustering.services.IClusterServices;
27 import org.opendaylight.controller.forwardingrulesmanager.FlowEntry;
28 import org.opendaylight.controller.forwardingrulesmanager.IForwardingRulesManager;
29 import org.opendaylight.controller.hosttracker.IfIptoHost;
30 import org.opendaylight.controller.hosttracker.IfNewHostNotify;
31 import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
32 import org.opendaylight.controller.sal.action.Action;
33 import org.opendaylight.controller.sal.action.Output;
34 import org.opendaylight.controller.sal.action.PopVlan;
35 import org.opendaylight.controller.sal.action.SetDlDst;
36 import org.opendaylight.controller.sal.action.SetVlanId;
37 import org.opendaylight.controller.sal.core.Edge;
38 import org.opendaylight.controller.sal.core.Node;
39 import org.opendaylight.controller.sal.core.NodeConnector;
40 import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
41 import org.opendaylight.controller.sal.core.Path;
42 import org.opendaylight.controller.sal.core.Property;
43 import org.opendaylight.controller.sal.core.State;
44 import org.opendaylight.controller.sal.core.UpdateType;
45 import org.opendaylight.controller.sal.flowprogrammer.Flow;
46 import org.opendaylight.controller.sal.match.Match;
47 import org.opendaylight.controller.sal.match.MatchType;
48 import org.opendaylight.controller.sal.routing.IListenRoutingUpdates;
49 import org.opendaylight.controller.sal.routing.IRouting;
50 import org.opendaylight.controller.sal.utils.EtherTypes;
51 import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
52 import org.opendaylight.controller.sal.utils.Status;
53 import org.opendaylight.controller.switchmanager.IInventoryListener;
54 import org.opendaylight.controller.switchmanager.ISwitchManager;
55 import org.opendaylight.controller.topologymanager.ITopologyManager;
56 import org.slf4j.Logger;
57 import org.slf4j.LoggerFactory;
59 public class SimpleForwardingImpl implements IfNewHostNotify,
60 IListenRoutingUpdates, IInventoryListener {
61 private static Logger log = LoggerFactory
62 .getLogger(SimpleForwardingImpl.class);
63 private static short DEFAULT_IPSWITCH_PRIORITY = 1;
64 private IfIptoHost hostTracker;
65 private IForwardingRulesManager frm;
66 private ITopologyManager topologyManager;
67 private IRouting routing;
68 private ConcurrentMap<HostNodePair, HashMap<NodeConnector, FlowEntry>> rulesDB;
69 private Map<Node, List<FlowEntry>> tobePrunedPos = new HashMap<Node, List<FlowEntry>>();
70 private IClusterContainerServices clusterContainerService = null;
71 private ISwitchManager switchManager;
74 * Return codes from the programming of the perHost rules in HW
77 public enum RulesProgrammingReturnCode {
78 SUCCESS, FAILED_FEW_SWITCHES, FAILED_ALL_SWITCHES, FAILED_WRONG_PARAMS
81 public void setRouting(IRouting routing) {
82 this.routing = routing;
85 public void unsetRouting(IRouting routing) {
86 if (this.routing == routing) {
91 public ITopologyManager getTopologyManager() {
92 return topologyManager;
95 public void setTopologyManager(ITopologyManager topologyManager) {
96 log.debug("Setting topologyManager");
97 this.topologyManager = topologyManager;
100 public void unsetTopologyManager(ITopologyManager topologyManager) {
101 if (this.topologyManager == topologyManager) {
102 this.topologyManager = null;
106 public void setHostTracker(IfIptoHost hostTracker) {
107 log.debug("Setting HostTracker");
108 this.hostTracker = hostTracker;
111 public void setForwardingRulesManager(
112 IForwardingRulesManager forwardingRulesManager) {
113 log.debug("Setting ForwardingRulesManager");
114 this.frm = forwardingRulesManager;
117 public void unsetHostTracker(IfIptoHost hostTracker) {
118 if (this.hostTracker == hostTracker) {
119 this.hostTracker = null;
123 public void unsetForwardingRulesManager(
124 IForwardingRulesManager forwardingRulesManager) {
125 if (this.frm == forwardingRulesManager) {
131 * Function called when the bundle gets activated
134 public void startUp() {
140 * Function called when the bundle gets stopped
143 public void shutDown() {
144 log.debug("Destroy all the host Rules given we are shutting down");
145 uninstallPerHostRules();
149 @SuppressWarnings("deprecation")
150 private void allocateCaches() {
151 if (this.clusterContainerService == null) {
152 log.info("un-initialized clusterContainerService, can't create cache");
157 clusterContainerService.createCache("forwarding.ipswitch.rules",
158 EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
159 } catch (CacheExistException cee) {
160 log.error("\nCache already exists - destroy and recreate if needed");
161 } catch (CacheConfigException cce) {
162 log.error("\nCache configuration invalid - check cache mode");
166 @SuppressWarnings({ "unchecked", "deprecation" })
167 private void retrieveCaches() {
168 if (this.clusterContainerService == null) {
169 log.info("un-initialized clusterContainerService, can't retrieve cache");
173 rulesDB = (ConcurrentMap<HostNodePair, HashMap<NodeConnector, FlowEntry>>) clusterContainerService
174 .getCache("forwarding.ipswitch.rules");
175 if (rulesDB == null) {
176 log.error("\nFailed to get rulesDB handle");
180 @SuppressWarnings("deprecation")
181 private void destroyCaches() {
182 if (this.clusterContainerService == null) {
183 log.info("un-initialized clusterContainerService, can't destroy cache");
187 clusterContainerService.destroyCache("forwarding.ipswitch.rules");
190 @SuppressWarnings("unused")
191 private void updatePerHostRuleInSW(HostNodeConnector host, Node currNode,
192 Node rootNode, Edge link, HostNodePair key,
193 Set<NodeConnector> passedPorts) {
195 // link parameter it's optional
196 if (host == null || key == null || currNode == null || rootNode == null) {
199 Set<NodeConnector> ports = passedPorts;
200 // TODO: Replace this with SAL equivalent when available
201 //if (container == null) {
202 ports = new HashSet<NodeConnector>();
203 ports.add(NodeConnectorCreator.createNodeConnector(
204 NodeConnectorIDType.ALL, NodeConnector.SPECIALNODECONNECTORID,
208 HashMap<NodeConnector, FlowEntry> pos = this.rulesDB.get(key);
210 pos = new HashMap<NodeConnector, FlowEntry>();
213 log.debug("Empty port list, nothing to do");
216 for (NodeConnector inPort : ports) {
218 * skip the port connected to the target host
220 if (currNode.equals(rootNode)
221 && (host.getnodeConnector().equals(inPort))) {
224 FlowEntry removed_po = pos.remove(inPort);
225 Match match = new Match();
226 List<Action> actions = new ArrayList<Action>();
227 // IP destination based forwarding
228 //on /32 entries only!
229 match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
230 match.setField(MatchType.NW_DST, host.getNetworkAddress());
232 //Action for the policy if to
233 //forward to a port except on the
234 //switch where the host sits,
235 //which is to rewrite also the MAC
236 //and to forward on the Host port
237 NodeConnector outPort = null;
239 if (currNode.equals(rootNode)) {
240 outPort = host.getnodeConnector();
241 if (inPort.equals(outPort)) {
247 actions.add(new SetDlDst(host.getDataLayerAddressBytes()));
249 if (!inPort.getType().equals(
250 NodeConnectorIDType.ALL)) {
252 * Container mode: at the destination switch, we need to strip out the tag (VLAN)
254 actions.add(new PopVlan());
258 * currNode is NOT the rootNode
261 outPort = link.getTailNodeConnector();
262 if (inPort.equals(outPort)) {
264 * skip the outgoing port
269 * If outPort is network link, add VLAN tag
271 if (topologyManager.isInternal(outPort)) {
272 log.debug("outPort {}/{} is internal uplink port",
275 log.debug("outPort {}/{} is host facing port",
279 if ((!inPort.getType().equals(
280 NodeConnectorIDType.ALL))
281 && (topologyManager.isInternal(outPort))) {
282 Node nextNode = link.getHeadNodeConnector()
284 // TODO: Replace this with SAL equivalent
285 //short tag = container.getTag((Long)nextNode.getNodeID());
288 log.debug("adding SET_VLAN " + tag
289 + " for traffic leaving " + currNode + "/"
290 + outPort + "toward switch " + nextNode);
291 actions.add(new SetVlanId(tag));
293 log.debug("No tag assigned to switch " + nextNode);
298 if (outPort != null) {
299 actions.add(new Output(outPort));
301 if (!inPort.getType().equals(NodeConnectorIDType.ALL)) {
303 * include input port in the flow match field
305 match.setField(MatchType.IN_PORT, inPort);
307 if (topologyManager.isInternal(inPort)) {
308 log.debug("inPort {}/{} is internal uplink port", currNode,
311 log.debug("inPort {}/{} is host facing port", currNode,
315 * for incoming network link; if the VLAN tag is defined, include it for incoming flow matching
317 if (topologyManager.isInternal(inPort)) {
318 // TODO: Replace this with SAL equivalent
319 //short tag = container.getTag((Long)currNode.getNodeID());
322 log.debug("adding MATCH VLAN " + tag
323 + " for traffic entering " + currNode + "/"
325 match.setField(MatchType.DL_VLAN, tag);
327 log.debug("No tag assigned to switch " + currNode);
331 // Make sure the priority for IP switch entries is
332 // set to a level just above default drop entries
333 Flow flow = new Flow(match, actions);
334 flow.setIdleTimeout((short) 0);
335 flow.setHardTimeout((short) 0);
336 flow.setPriority(DEFAULT_IPSWITCH_PRIORITY);
338 String policyName = host.getNetworkAddress().getHostAddress()
340 String flowName = "["
341 + (!inPort.getType().equals(NodeConnectorIDType.ALL) ?
342 (inPort.getID()).toString()
344 + host.getNetworkAddress().getHostAddress() + "/32 on N "
346 FlowEntry po = new FlowEntry(policyName, flowName, flow, currNode);
348 // Now save the rule in the DB rule,
349 // so on updates from topology we can
352 this.rulesDB.put(key, pos);
353 if (!inPort.getType().equals(NodeConnectorIDType.ALL)) {
354 log.debug("Adding Match(inPort=" + inPort + ",DIP="
355 + host.getNetworkAddress().getHostAddress()
356 + ") Action(outPort=" + outPort + ") to node "
358 if ((removed_po != null)
359 && (!po.getFlow().getMatch().equals(
360 removed_po.getFlow().getMatch()))) {
361 log.debug("Old Flow match: {}, New Flow match: {}",
362 removed_po.getFlow().getMatch(), po.getFlow()
364 addTobePrunedPolicy(currNode, removed_po, po);
368 log.debug("Adding policy Match(DIP="
369 + host.getNetworkAddress().getHostAddress()
370 + ") Action(outPort=" + outPort + ") to node "
377 * Calculate the per-Host rules to be installed in the rulesDB,
378 * and that will later on be installed in HW, this routine will
379 * implicitly calculate the shortest path tree among the switch
380 * to which the host is attached and all the other switches in the
381 * network and will automatically create all the rules that allow
382 * a /32 destination IP based forwarding, as in traditional IP
385 * @param host Host for which we are going to prepare the rules in the rulesDB
387 * @return A set of switches touched by the calculation
389 private Set<Node> preparePerHostRules(HostNodeConnector host) {
393 if (this.routing == null) {
396 if (this.switchManager == null) {
399 if (this.rulesDB == null) {
403 Node rootNode = host.getnodeconnectorNode();
404 Set<Node> nodes = this.switchManager.getNodes();
405 Set<Node> switchesToProgram = new HashSet<Node>();
407 HashMap<NodeConnector, FlowEntry> pos;
410 for (Node node : nodes) {
411 if (node.equals(rootNode)) {
412 // We skip it because for the node with host attached
413 // we will process in every case even if there are no
418 Path res = this.routing.getRoute(node, rootNode);
419 if ((res == null) || ((links = res.getEdges()) == null)) {
420 // Still the path that connect node to rootNode
422 log.debug("NO Route/Path between SW[" + node + "] --> SW["
423 + rootNode + "] cleaning potentially existing entries");
424 key = new HostNodePair(host, node);
425 pos = this.rulesDB.get(key);
427 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
430 //Uninstall the policy
431 this.frm.uninstallFlowEntry(po);
434 this.rulesDB.remove(key);
439 log.debug("Route between SW[" + node + "] --> SW[" + rootNode
442 Node currNode = node;
443 key = new HostNodePair(host, currNode);
445 for (curr = 0; curr < links.size(); curr++) {
446 link = links.get(curr);
448 log.error("Could not retrieve the Link");
452 log.debug(link.toString());
454 // Index all the switches to be programmed
455 // switchesToProgram.add(currNode);
456 Set<NodeConnector> ports = null;
457 ports = switchManager.getUpNodeConnectors(currNode);
458 updatePerHostRuleInSW(host, currNode, rootNode, link, key,
460 if ((this.rulesDB.get(key)) != null) {
462 * Calling updatePerHostRuleInSW() doesn't guarantee that rules will be
463 * added in currNode (e.g, there is only one link from currNode to rootNode
464 * This check makes sure that there are some rules in the rulesDB for the
465 * given key prior to adding switch to switchesToProgram
467 switchesToProgram.add(currNode);
469 currNode = link.getHeadNodeConnector().getNode();
470 key = new HostNodePair(host, currNode);
474 // This rule will be added no matter if any topology is built
475 // or no, it serve as a way to handle the case of a node with
476 // multiple hosts attached to it but not yet connected to the
478 switchesToProgram.add(rootNode);
479 Set<NodeConnector> ports = switchManager
480 .getUpNodeConnectors(rootNode);
481 updatePerHostRuleInSW(host, rootNode, rootNode, null, new HostNodePair(
482 host, rootNode), ports);
484 // log.debug("Getting out at the end!");
485 return switchesToProgram;
489 * Calculate the per-Host rules to be installed in the rulesDB
490 * from a specific switch when a host facing port comes up.
491 * These rules will later on be installed in HW. This routine
492 * will implicitly calculate the shortest path from the switch
493 * where the port has come up to the switch where host is ,
494 * attached and will automatically create all the rules that allow
495 * a /32 destination IP based forwarding, as in traditional IP
498 * @param host Host for which we are going to prepare the rules in the rulesDB
499 * @param swId Switch ID where the port has come up
501 * @return A set of switches touched by the calculation
503 private Set<Node> preparePerHostPerSwitchRules(HostNodeConnector host,
504 Node node, NodeConnector swport) {
505 if ((host == null) || (node == null)) {
508 if (this.routing == null) {
511 if (this.switchManager == null) {
514 if (this.rulesDB == null) {
518 Node rootNode = host.getnodeconnectorNode();
519 Set<Node> switchesToProgram = new HashSet<Node>();
521 Map<NodeConnector, FlowEntry> pos;
523 Set<NodeConnector> ports = new HashSet<NodeConnector>();
527 Path res = this.routing.getRoute(node, rootNode);
528 if ((res == null) || ((links = res.getEdges()) == null)) {
529 // Still the path that connect node to rootNode
531 log.debug("NO Route/Path between SW[" + node + "] --> SW["
532 + rootNode + "] cleaning potentially existing entries");
533 key = new HostNodePair(host, node);
534 pos = this.rulesDB.get(key);
536 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
539 //Uninstall the policy
540 this.frm.uninstallFlowEntry(po);
543 this.rulesDB.remove(key);
548 log.debug("Route between SW[" + node + "] --> SW[" + rootNode + "]");
550 Node currNode = node;
551 key = new HostNodePair(host, currNode);
553 for (curr = 0; curr < links.size(); curr++) {
554 link = links.get(curr);
556 log.error("Could not retrieve the Link");
560 log.debug("Link [" + currNode + "/" + link.getHeadNodeConnector()
562 + link.getHeadNodeConnector().getNode() + "/"
563 + link.getTailNodeConnector() + "]");
565 // Index all the switches to be programmed
566 switchesToProgram.add(currNode);
567 updatePerHostRuleInSW(host, currNode, rootNode, link, key, ports);
568 break; // come out of the loop for port up case, interested only in programming one switch
571 // This rule will be added no matter if any topology is built
572 // or no, it serve as a way to handle the case of a node with
573 // multiple hosts attached to it but not yet connected to the
575 // switchesToProgram.add(rootNode);
576 //updatePerHostRuleInSW(host, rootNode,
578 // new HostNodePair(host, rootNode),ports);
580 // log.debug("Getting out at the end!");
581 return switchesToProgram;
585 * Routine that fetch the per-Host rules from the rulesDB and
586 * install in HW, the one having the same match rules will be
587 * overwritten silently.
589 * @param host host for which we want to install in HW the per-Host rules
590 * @param switchesToProgram list of switches to be programmed in
591 * HW, usually are them all, but better to be explicit, that list
592 * may change with time based on new switch addition/removal
594 * @return a return code that convey the programming status of the HW
596 private RulesProgrammingReturnCode installPerHostRules(
597 HostNodeConnector host, Set<Node> switchesToProgram) {
598 RulesProgrammingReturnCode retCode = RulesProgrammingReturnCode.SUCCESS;
599 if (host == null || switchesToProgram == null) {
600 return RulesProgrammingReturnCode.FAILED_WRONG_PARAMS;
602 Map<NodeConnector, FlowEntry> pos;
604 // Now program every single switch
605 log.debug("Inside installPerHostRules");
606 for (Node swId : switchesToProgram) {
607 HostNodePair key = new HostNodePair(host, swId);
608 pos = this.rulesDB.get(key);
612 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
615 // Populate the Policy field now
616 Status poStatus = this.frm.installFlowEntry(po);
617 if (!poStatus.isSuccess()) {
618 log.error("Failed to install policy: "
619 + po.getGroupName() + " ("
620 + poStatus.getDescription() + ")");
622 retCode = RulesProgrammingReturnCode.FAILED_FEW_SWITCHES;
623 // Remove the entry from the DB, it was not installed!
624 this.rulesDB.remove(key);
626 log.debug("Successfully installed policy "
627 + po.toString() + " on switch " + swId);
630 log.error("Cannot find a policy for SW:{" + swId
631 + "} Host: {" + host + "}");
632 /* // Now dump every single rule */
633 /* for (HostNodePair dumpkey : this.rulesDB.keySet()) { */
634 /* po = this.rulesDB.get(dumpkey); */
635 /* log.debug("Dumping entry H{" + dumpkey.getHost() + "} S{" + dumpkey.getSwitchId() + "} = {" + (po == null ? "null policy" : po)); */
640 log.debug("Leaving installPerHostRules");
645 * Cleanup all the host rules for a given host
647 * @param host Host for which the host rules need to be cleaned
648 * up, the host could be null in that case it match all the hosts
650 * @return a return code that convey the programming status of the HW
652 private RulesProgrammingReturnCode uninstallPerHostRules(
653 HostNodeConnector host) {
654 RulesProgrammingReturnCode retCode = RulesProgrammingReturnCode.SUCCESS;
655 Map<NodeConnector, FlowEntry> pos;
657 // Now program every single switch
658 for (HostNodePair key : this.rulesDB.keySet()) {
659 if (host == null || key.getHost().equals(host)) {
660 pos = this.rulesDB.get(key);
661 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
664 // Uninstall the policy
665 this.frm.uninstallFlowEntry(po);
668 this.rulesDB.remove(key);
675 * Cleanup all the host rules for a given node, triggered when the
676 * switch disconnects, so there is no reason for Hw cleanup
677 * because it's disconnected anyhow
678 * TBD - Revisit above stmt in light of CSCus88743
679 * @param targetNode Node for which we want to do cleanup
682 private void uninstallPerNodeRules(Node targetNode) {
683 //RulesProgrammingReturnCode retCode = RulesProgrammingReturnCode.SUCCESS;
684 Map<NodeConnector, FlowEntry> pos;
687 // Now program every single switch
688 for (HostNodePair key : this.rulesDB.keySet()) {
689 Node node = key.getNode();
690 if (targetNode == null || node.equals(targetNode)) {
691 log.debug("Work on " + node + " host " + key.getHost());
692 pos = this.rulesDB.get(key);
693 for (Map.Entry<NodeConnector, FlowEntry> e : pos.entrySet()) {
696 // Uninstall the policy
697 this.frm.uninstallFlowEntry(po);
700 log.debug("Remove " + key);
701 this.rulesDB.remove(key);
707 * Cleanup all the host rules currently present in the rulesDB
709 * @return a return code that convey the programming status of the HW
711 private RulesProgrammingReturnCode uninstallPerHostRules() {
712 return uninstallPerHostRules(null);
716 public void recalculateDone() {
717 if (this.hostTracker == null) {
718 //Not yet ready to process all the updates
721 Set<HostNodeConnector> allHosts = this.hostTracker.getAllHosts();
722 for (HostNodeConnector host : allHosts) {
723 Set<Node> switches = preparePerHostRules(host);
724 if (switches != null) {
725 // This will refresh existing rules, by overwriting
727 installPerHostRules(host, switches);
728 pruneExcessRules(switches);
733 void addTobePrunedPolicy(Node swId, FlowEntry po, FlowEntry new_po) {
734 List<FlowEntry> pl = tobePrunedPos.get(swId);
736 pl = new LinkedList<FlowEntry>();
737 tobePrunedPos.put(swId, pl);
740 log.debug("Adding Pruned Policy for SwId: {}", swId);
741 log.debug("Old Policy: " + po.toString());
742 log.debug("New Policy: " + new_po.toString());
745 private void pruneExcessRules(Set<Node> switches) {
746 for (Node swId : switches) {
747 List<FlowEntry> pl = tobePrunedPos.get(swId);
751 "Policies for Switch: {} in the list to be deleted: {}",
753 Iterator<FlowEntry> plIter = pl.iterator();
754 //for (Policy po: pl) {
755 while (plIter.hasNext()) {
756 FlowEntry po = plIter.next();
757 log.error("Removing Policy, Switch: {} Policy: {}", swId,
759 this.frm.uninstallFlowEntry(po);
763 // tobePrunedPos.remove(swId);
768 * A Host facing port has come up in a container. Add rules on the switch where this
769 * port has come up for all the known hosts to the controller.
770 * @param swId switch id of the port where port came up
771 * @param swPort port which came up
773 private void updateRulesforHIFup(Node node, NodeConnector swPort) {
774 if (this.hostTracker == null) {
775 //Not yet ready to process all the updates
778 log.debug("Host Facing Port in a container came up, install the rules for all hosts from this port !");
779 Set<HostNodeConnector> allHosts = this.hostTracker.getAllHosts();
780 for (HostNodeConnector host : allHosts) {
781 if (node.equals(host.getnodeconnectorNode())
782 && swPort.equals(host.getnodeConnector())) {
784 * This host resides behind the same switch and port for which a port up
785 * message is received. Ideally this should not happen, but if it does,
786 * don't program any rules for this host
790 Set<Node> switches = preparePerHostPerSwitchRules(host, node,
792 if (switches != null) {
793 // This will refresh existing rules, by overwriting
795 installPerHostRules(host, switches);
802 public void notifyHTClient(HostNodeConnector host) {
806 Set<Node> switches = preparePerHostRules(host);
807 if (switches != null) {
808 installPerHostRules(host, switches);
813 public void notifyHTClientHostRemoved(HostNodeConnector host) {
817 uninstallPerHostRules(host);
821 public void notifyNode(Node node, UpdateType type,
822 Map<String, Property> propMap) {
828 log.debug("Node " + node + " gone, doing a cleanup");
829 uninstallPerNodeRules(node);
837 public void notifyNodeConnector(NodeConnector nodeConnector,
838 UpdateType type, Map<String, Property> propMap) {
839 if (nodeConnector == null)
850 State state = (State) propMap.get(State.StatePropName);
851 if ((state != null) && (state.getValue() == State.EDGE_UP)) {
860 handleNodeConnectorStatusUp(nodeConnector);
862 handleNodeConnectorStatusDown(nodeConnector);
866 private void handleNodeConnectorStatusUp(NodeConnector nodeConnector) {
867 if (topologyManager == null) {
868 log.debug("topologyManager is not set yet");
872 if (topologyManager.isInternal(nodeConnector)) {
873 log.debug("{} is not a host facing link", nodeConnector);
877 log.debug("{} is up", nodeConnector);
878 updateRulesforHIFup(nodeConnector.getNode(), nodeConnector);
881 private void handleNodeConnectorStatusDown(NodeConnector nodeConnector) {
882 log.debug("{} is down", nodeConnector);
885 void setClusterContainerService(IClusterContainerServices s) {
886 log.debug("Cluster Service set");
887 this.clusterContainerService = s;
890 void unsetClusterContainerService(IClusterContainerServices s) {
891 if (this.clusterContainerService == s) {
892 log.debug("Cluster Service removed!");
893 this.clusterContainerService = null;
898 * Function called by the dependency manager when all the required
899 * dependencies are satisfied
907 * Function called by the dependency manager when at least one
908 * dependency become unsatisfied or when the component is shutting
909 * down because for example bundle is being stopped.
916 * Function called by dependency manager after "init ()" is called
917 * and after the services provided by the class are registered in
918 * the service registry
925 * Function called by the dependency manager before the services
926 * exported by the component are unregistered, this will be
927 * followed by a "destroy ()" calls
933 public void setSwitchManager(ISwitchManager switchManager) {
934 this.switchManager = switchManager;
937 public void unsetSwitchManager(ISwitchManager switchManager) {
938 if (this.switchManager == switchManager) {
939 this.switchManager = null;