--- /dev/null
+'''
+Created on Jan 24, 2014
+
+@author: vdemcak
+'''
+
+import logging
+
+class Comparator():
+
+ log = logging.getLogger('Comparator')
+
+ @staticmethod
+ def compare_results(actual, expected):
+ #print 'ACT: ', actual
+ #print 'EXP: ', expected
+
+ list_unused = list(set(actual.keys()) - set(expected.keys()))
+ if len(list_unused) > 0:
+ Comparator.log.info('unchecked tags: {}'.format(list_unused))
+
+ list_duration = ['duration','hard_timeout','idle_timeout']
+
+ Comparator.test_duration(actual, expected)
+
+ # compare results from actual flow (mn dump result) and expepected flow (stored result)
+ for k in expected.keys():
+ if k not in list_duration:
+ assert k in actual, 'cannot find key {} in flow {}'.format(k, actual)
+ assert actual[k] == expected[k], 'key:{}, actual:{} != expected:{}'.format(k, actual[k], expected[k])
+
+ @staticmethod
+ def test_duration(actual, expected):
+ duration_key = 'duration'
+ hard_to_key = 'hard_timeout'
+
+ if duration_key in expected.keys():
+ assert duration_key in actual.keys(), '{} is not set in {}'.format(duration_key, actual)
+ try:
+ duration = float(expected['duration'].rstrip('s'))
+ hard_timeout = int(actual['hard_timeout'])
+ assert duration <= hard_timeout, 'duration is higher than hard_timeout, {} > {}'.format(duration, hard_timeout)
+ except KeyError as e:
+ Comparator.log.warning('cannot find keys to test duration tag', exc_info=True)
+ else:
+ # VD - what should we do in this case
+ pass
\ No newline at end of file
--- /dev/null
+'''
+Created on Jan 24, 2014
+
+@author: vdemcak
+'''
+
+
+class ConvertorTools():
+ """
+ Tool class contains static conversion method
+ for the value conversions
+ """
+ CONVERTORS = {
+ 'cookie': hex,
+ 'metadata': hex
+ }
+
+ @staticmethod
+ def base_tag_values_conversion(key, value):
+ """
+ Check a need to conversion and convert if need
+ """
+ if value is None : return ''
+ else:
+ convertor = ConvertorTools.CONVERTORS.get(key, None)
+ if convertor is None : return value
+ else :
+ return convertor(int(value))
\ No newline at end of file
--- /dev/null
+import requests
+import logging
+import time
+import xml.dom.minidom as md
+import threading
+from xml.etree import ElementTree as ET
+
+from openvswitch.parser_tools import ParseTools
+
+TO_GET = 30
+TO_PUT = 10
+TO_DEL = 30
+WAIT_TIME = 15
+OPERATIONAL_DELAY = 11
+FLOWS_PER_SECOND = 5
+
+FLOW_ID_TEMPLATE = 'FLOW_ID_TEMPLATE'
+COOKIE_TEMPLATE = 'COOKIE_TEMPLATE'
+HARD_TO_TEMPLATE = 'HARD_TO_TEMPLATE'
+FLOW_NAME_TEMPLATE = 'FLOW_NAME_TEMPLATE'
+IPV4DST_TEMPLATE = 'IPV4DST_TEMPLATE'
+PRIORITY_TEMPLATE = 'PRIORITY_TEMPLATE'
+
+xml_template = '<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>' \
+'<flow xmlns=\"urn:opendaylight:flow:inventory\">' \
+ '<strict>false</strict>' \
+ '<instructions><instruction><order>0</order><apply-actions><action><order>0</order><dec-nw-ttl/>' \
+ '</action></apply-actions></instruction></instructions><table_id>2</table_id>' \
+ '<id>'+FLOW_ID_TEMPLATE+'</id><cookie_mask>255</cookie_mask><installHw>false</installHw>' \
+ '<match><ethernet-match><ethernet-type><type>2048</type></ethernet-type></ethernet-match>' \
+ '<ipv4-destination>'+IPV4DST_TEMPLATE+'</ipv4-destination></match><hard-timeout>'+HARD_TO_TEMPLATE+'</hard-timeout>' \
+ '<flags>FlowModFlags [_cHECKOVERLAP=false, _rESETCOUNTS=false, _nOPKTCOUNTS=false, _nOBYTCOUNTS=false, _sENDFLOWREM=false]</flags>' \
+ '<cookie>'+COOKIE_TEMPLATE+'</cookie><idle-timeout>34000</idle-timeout><flow-name>'+FLOW_NAME_TEMPLATE+'</flow-name><priority>'+PRIORITY_TEMPLATE+'</priority>' \
+ '<barrier>false</barrier></flow>'
+
+class MapNames():
+ TEST = 'TEST'
+ DUMMY = 'DUMMY'
+
+loglevels = {
+ 'debug' : logging.DEBUG,
+ 'info' : logging.INFO,
+ 'warning' : logging.WARNING,
+ 'error' : logging.ERROR
+}
+
+
+class FlowAdderThread(threading.Thread):
+ """
+ Thread to remove flows from TestClassAdd
+
+ TestClassAdd should implement methods inc_flow(value, key) and inc_error()
+ """
+
+ def __init__(self, test_class, thread_id, host, port, net, flows_ids_from=0, flows_ids_to=1):
+ """
+ test_class: should be type of TestClassAdd
+ thread_id: id of thread
+ host: controller's ip address
+ port: controller's port
+ net: mininet instance
+ flows_ids_from: minimum id of flow to be added (including)
+ flows_ids_to: maximum id of flow to be added (excluding)
+ """
+
+ threading.Thread.__init__(self)
+ self.thread_id = thread_id
+ self.flows = 0
+ self.errors = 0
+ self.flows_ids_from = flows_ids_from
+ self.flows_ids_to = flows_ids_to
+
+ self.test_class = test_class
+ self.net = net
+ self.host = host
+ self.port = port
+
+ self.flows = 0
+ self.errors = 0
+
+ self.log = logging.getLogger('FlowAdderThread: ' + str(thread_id))
+ self.log.propagate = False
+ self.channel = logging.StreamHandler()
+ self.log.addHandler(self.channel)
+ formatter = logging.Formatter('THREAD A{0}: %(levelname)s: %(message)s'.format(self.thread_id))
+ self.channel.setFormatter(formatter)
+
+ self.log.info('created new FlowAdderThread->id:{0}, flows id: {1} -> {2}'.format(self.thread_id, self.flows_ids_from, self.flows_ids_to))
+
+ def make_cookie_marker(self, input, number=0):
+ return '0x' + "{0:x}".format(int(''.join("{0:x}".format(ord(c)) for c in (input)), 16) + number)
+
+ def make_ipv4_address(self, number, octet_count=4, octet_size=255):
+ mask = 24
+ ip = ['10', '0', '0', '0']
+
+ if number < (255**3):
+ for o in range(1, octet_count):
+ ip[octet_count - 1 - o] = str(number % octet_size)
+ number = number / octet_size
+ #mask -= 8
+ if number == 0:
+ break
+
+ return '.'.join(ip) + '/{0}'.format(mask)
+
+ def __add_flows(self, act_flow_id):
+ cookie_id = None
+
+ try:
+ self.log.debug('adding flow id: {0}'.format(act_flow_id))
+
+ cookie_id = self.make_cookie_marker('stress', act_flow_id)
+
+ xml_string = str(xml_template).replace(FLOW_ID_TEMPLATE, str(act_flow_id))\
+ .replace(COOKIE_TEMPLATE, str(int(cookie_id, 16)))\
+ .replace(HARD_TO_TEMPLATE, '1200').replace(FLOW_NAME_TEMPLATE,'FooXf{0}'.format(act_flow_id))\
+ .replace(IPV4DST_TEMPLATE,self.make_ipv4_address(act_flow_id)).replace(PRIORITY_TEMPLATE,str(act_flow_id))
+
+ #TestRestartMininet.log.info('loaded xml: {0}'.format(''.join(xml_string.split())))
+ tree = md.parseString(xml_string)
+ ids = ParseTools.get_values(tree.documentElement, 'table_id', 'id')
+ data = (self.host, self.port, ids['table_id'], ids['id'])
+
+ url = 'http://%s:%d/restconf/config/opendaylight-inventory:nodes' \
+ '/node/openflow:1/table/%s/flow/%s' % data
+ # send request via RESTCONF
+ headers = {
+ 'Content-Type': 'application/xml',
+ 'Accept': 'application/xml',
+ }
+ self.log.debug('sending request to url: {0}'.format(url))
+ rsp = requests.put(url, auth=('admin', 'admin'), data=xml_string,
+ headers=headers, timeout=TO_PUT)
+ self.log.debug('received status code: {0}'.format(rsp.status_code))
+ self.log.debug('received content: {0}'.format(rsp.text))
+ assert rsp.status_code == 204 or rsp.status_code == 200, 'Status' \
+ ' code returned %d' % rsp.status_code
+
+ # we expect that controller doesn't fail to store flow on switch
+ self.test_class.inc_flow(flow_id=act_flow_id, cookie_id=cookie_id)
+ self.flows += 1
+ except Exception as e:
+ self.log.error('Error storing flow id:{0}, cookie-id:{1}, reason: {2}'.format(act_flow_id, cookie_id, str(e)))
+ self.test_class.inc_error()
+ self.errors += 1
+
+ def run(self):
+ self.flows, self.errors = 0, 0
+ self.log.info('adding flows {0} to {1}'.format(self.flows_ids_from, self.flows_ids_to))
+ for i in range(self.flows_ids_from, self.flows_ids_to):
+ self.__add_flows(i)
+
+ self.log.info('finished, added {0} flows, {1} errors'.format(self.flows,self.errors))
+
+
+
+class FlowRemoverThread(threading.Thread):
+ """
+ Thread to remove flows from TestClassDelete
+
+ TestClassDelete should implement method delete_flows_from_map(value, key)
+ """
+
+
+ def __init__(self, test_class, thread_id, host, port, net, flows_to_delete=[]):
+ """
+ test_class: should be type of TestClassDelete
+ thread_id: id of thread
+ host: controller's ip address
+ port: controller's port
+ net: mininet instance
+ flows_to_delete: dictionary of flows to delete with items to match method delete_flows_from_map(value, key)
+ """
+
+ threading.Thread.__init__(self)
+ self.thread_id = thread_id
+ self.flows_to_delete = flows_to_delete
+ self.test_class = test_class
+
+
+ self.removed = 0
+ self.errors = 0
+
+ self.net = net
+ self.host = host
+ self.port = port
+
+ self.log = logging.getLogger('FlowRemoverThread: ' + str(thread_id))
+ self.log.propagate = False
+ self.channel = logging.StreamHandler()
+ self.log.addHandler(self.channel)
+ formatter = logging.Formatter('THREAD R{0}: %(levelname)s: %(message)s'.format(self.thread_id))
+ self.channel.setFormatter(formatter)
+
+ self.log.info('created new FlowRemoverThread->id:{0}'.format(self.thread_id))
+
+ def __remove_flows(self, act_flow_id, cookie_id):
+ headers = {'Content-Type': 'application/xml', 'Accept': 'application/xml'}
+ url = 'http://%s:%d/restconf/config/opendaylight-inventory:nodes' \
+ '/node/openflow:1/table/2/flow/%s' % (self.host, self.port, act_flow_id)
+
+ try:
+ response = requests.delete(url, auth=('admin','admin'), headers=headers, timeout=TO_DEL)
+ self.log.debug('deletion flow: {0} from controller: response: {1}'.format(act_flow_id, response.status_code))
+
+ assert response.status_code == 200 or response.status_code == 204, 'Delete response should be 200 or 204 is {0}'.format(response.status_code)
+ self.test_class.delete_flow_from_map(act_flow_id, cookie_id)
+ self.removed += 1
+
+ except Exception as e:
+ self.log.error('Error deleting flow:{0}, reason: {1}'.format(act_flow_id, str(e)))
+ self.errors += 1
+ except requests.exceptions.Timeout as te:
+ self.log.error('Error deleting flow: {0}, timeout reached: {1}'.format(act_flow_id, str(te)))
+ self.errors += 1
+
+
+ def run(self):
+ self.log.debug('started removing flows')
+ for flow_ids in set(self.flows_to_delete):
+ self.__remove_flows(flow_ids[1], flow_ids[0])
+
+ self.log.info('finished removing {0} flows, {1} errors'.format(self.removed, self.errors))
+
--- /dev/null
+'''
+Created on Jan 24, 2014
+
+@author: vdemcak
+'''
+
+import re
+import logging
+import mininet.topo
+import mininet.net
+import mininet.util
+from mininet.node import RemoteController
+from mininet.node import OVSKernelSwitch
+
+class MininetTools():
+ """
+ Tool class provides static method for Open_vswitch
+ mininet out of box controls
+ """
+ @staticmethod
+ def create_network(controller_ip, controller_port):
+ """Create topology and mininet network."""
+ topo = mininet.topo.Topo()
+
+ topo.addSwitch('s1')
+ topo.addHost('h1')
+ topo.addHost('h2')
+
+ topo.addLink('h1', 's1')
+ topo.addLink('h2', 's1')
+
+ switch=mininet.util.customConstructor(
+ {'ovsk':OVSKernelSwitch}, 'ovsk,protocols=OpenFlow13')
+
+ controller=mininet.util.customConstructor(
+ {'remote': RemoteController}, 'remote,ip=%s,port=%s' % (controller_ip,controller_port))
+
+
+ net = mininet.net.Mininet(topo=topo, switch=switch, controller=controller)
+
+ return net
+
+ @staticmethod
+ def __mininet_parse_response(resp_str='', x_dict={}, ikwd={}):
+ for elm in re.split('\s', resp_str.strip()) :
+ elm_prop = re.split('=',elm,1)
+ a_key = (elm_prop[0]).strip()
+ if (ikwd.get(a_key, None) is None) :
+ a_value = ''
+ if (len(elm_prop) > 1):
+ if len(elm_prop[1].split('=')) > 1 :
+ new_dict={}
+ MininetTools.__mininet_parse_response(elm_prop[1],new_dict,ikwd)
+ a_value = new_dict
+ else :
+ a_value = elm_prop[1]
+ a_value = a_value.strip() if isinstance(a_value,str) else (str(a_value)).strip()
+ x_dict[a_key] = a_value
+
+
+ @staticmethod
+ def get_dict_of_flows(net, ikwd={}):
+ """Get list of flows from network's first switch.
+
+ Return list of all flows on switch, sorted by duration (newest first)
+ One flow is a dictionary with all flow's attribute:value pairs. Matches
+ are stored under 'matches' key as another dictionary.
+ Example:
+
+ {
+ 'actions': 'drop',
+ 'cookie': '0xa,',
+ 'duration': '3.434s,',
+ 'hard_timeout': '12,',
+ 'idle_timeout': '34,',
+ 'matches': {
+ 'ip': None,
+ 'nw_dst': '10.0.0.0/24'
+ },
+ 'n_bytes': '0,',
+ 'n_packets': '0,',
+ 'priority': '2',
+ 'table': '1,'
+ }
+
+ """
+ log = logging.getLogger(__name__)
+
+ # dictionary for return
+ flows = {}
+ # flow command prompt output
+ output = MininetTools.get_flows_string(net)
+ # prepare cmd for parsing to dictionary
+ output = output.replace(',',' ') ;
+ output = output.replace(' ',' ')
+
+ # action has to be parsed in different way
+ if (len(re.split('actions=', output, 1)) > 0) :
+ action_str = re.split('actions=',output,1)[1]
+ action_dict = {}
+ MininetTools.__mininet_parse_response(action_str, action_dict, ikwd)
+ flows['actions'] = str(action_dict)
+ else :
+ flows['actions'] = ''
+
+ # remove actions from string (always last) and parse everything else
+ output= re.split('actions=',output,1)[0]
+ MininetTools.__mininet_parse_response(output, flows, ikwd)
+
+ return flows
+
+ @staticmethod
+ def get_flows_string(net=None):
+ """
+ Return flows from switch in string format
+ same as by a call 'ovs-ofctl -O OpenFlow13 dump-flows sx'
+ """
+ if net is None:
+ return []
+
+ switch = net.switches[0]
+ output = switch.cmdPrint(
+ 'ovs-ofctl -O OpenFlow13 dump-flows %s' % switch.name)
+
+ return output.splitlines()[1:]
\ No newline at end of file
--- /dev/null
+'''
+Created on Jan 24, 2014
+
+@author: vdemcak
+'''
+
+import re
+
+from xml.etree import ElementTree as ET
+from convertor_tools import ConvertorTools
+
+class ParseTools():
+
+ @staticmethod
+ def get_element_alias_by_key(element,key_dict):
+ return key_dict.get(element.tag) if (key_dict.get(element.tag, None) > None) else None
+
+ @staticmethod
+ def sort_ordered_dict_to_array(x_dict=None):
+ if (x_dict > None):
+ out_put = []
+ for val in map(lambda val: x_dict.get(val), sorted(x_dict.keys())) :
+ out_put.append(val)
+# if (out_put > None) :
+# out_put += ', %s' %val
+# else :
+# out_put = val
+ return ', '.join(out_put)
+ return
+
+ @staticmethod
+ def get_element_value(element):
+ return (re.sub('[\s]+', '', element.text, count=1)).lower() if element.text > None else ''
+
+ @staticmethod
+ def __parse_ordered_tags_from_xml(element, kwd, p_elm_n=None, ikwd=None, ord_value=None):
+ a_dict = {}
+ if (element > None) :
+ elm_n = ParseTools.get_element_alias_by_key(element, kwd)
+ if ((element.getchildren() > None) & (len(element.getchildren()) > 0)) :
+ sub_dict ={}
+ for child in element.getchildren() :
+ if (child.tag == 'order') :
+ ord_value = ParseTools.get_element_value(child)
+ else :
+ sub_dict.update(ParseTools.__parse_ordered_tags_from_xml(child, kwd, p_elm_n, ikwd))
+
+ a_value = ParseTools.sort_ordered_dict_to_array(sub_dict)
+ if (ord_value > None) :
+ order = ord_value if (len(ord_value) > 0) else '0'
+ else :
+ order = '0'
+ a_dict[order]=a_value
+
+ else :
+ if (ord_value > None) :
+ order = ord_value if ((len(ord_value) > 0)) else '0'
+ else :
+ order = '0'
+ a_val = elm_n if elm_n > None else element.tag
+ a_dict[order] = a_val
+
+ return a_dict
+
+ @staticmethod
+ def __parse_tags_from_xml(element, flow_dict, kwd, p_elm_n=None, ikwd=None):
+ if element > None :
+ # find and translate element.tag in key_word_dictionary
+ elm_n = ParseTools.get_element_alias_by_key(element, kwd)
+ if ((element.getchildren() > None) & (len(element.getchildren()) > 0)) :
+ for child in element.getchildren() :
+ new_p_elm_n = elm_n if elm_n > None else p_elm_n
+ ParseTools.__parse_tags_from_xml(child, flow_dict, kwd, new_p_elm_n, ikwd)
+ else :
+ # prefer parent element_name before elment_name and element_name before element.tag
+ a_key = elm_n if elm_n > None else p_elm_n if (p_elm_n > None) else element.tag
+ a_value = ParseTools.get_element_value(element)
+ # Don't continue for ignore tags
+ if (ikwd > None) :
+ if (ikwd.get(a_key, None) > None) :
+ # TODO add check for cookie_mask (mask has to have same or more length as cookie if is more as 0)
+ return
+ flow_dict[a_key] = ConvertorTools.base_tag_values_conversion(a_key, a_value)
+
+ @staticmethod
+ def get_switchflow_from_xml(xml_string, key_dict=None, action_key_dict=None, match_key_dict=None, ignore_key_dict=None):
+ if xml_string > None :
+ # remove namespace
+ xml_string = re.sub(' xmlns="[^"]+"', '', xml_string, count=1)
+ tree = ET.fromstring(xml_string)
+
+ flow_dict = {}
+
+ if (tree > None) :
+ if (tree.getchildren() > None) :
+ for child in tree.getchildren() :
+ if (child.tag == 'match') :
+ ParseTools.__parse_tags_from_xml(child, flow_dict, match_key_dict, ikwd=ignore_key_dict)
+ elif (child.tag == 'instructions') :
+ x_dict = ParseTools.__parse_ordered_tags_from_xml(child, action_key_dict, ikwd=ignore_key_dict)
+ flow_dict['actions'] = ParseTools.sort_ordered_dict_to_array(x_dict)
+ else :
+ ParseTools.__parse_tags_from_xml(child, flow_dict, key_dict, ikwd=ignore_key_dict)
+
+ return flow_dict
+
+ # TODO VD remove this method
+# @staticmethod
+# def get_switchflow_dict(switch_dict, ignore_key_dict=None):
+# x_dict={}
+# for sw_key in switch_dict.keys() :
+# if (ignore_key_dict.get(sw_key,None) is None):
+# x_dict[sw_key] = switch_dict.get(sw_key)
+#
+# return x_dict
+
+ @staticmethod
+ def all_nodes(xml_root):
+ """
+ Generates every non-text nodes.
+ """
+ current_nodes = [xml_root]
+ next_nodes = []
+
+ while len(current_nodes) > 0:
+ for node in current_nodes:
+ if node.nodeType != xml_root.TEXT_NODE:
+ yield node
+ next_nodes.extend(node.childNodes)
+
+ current_nodes, next_nodes = next_nodes, []
+
+ @staticmethod
+ def get_values(node, *tags):
+ result = dict((tag, None) for tag in tags)
+# result = {tag: None for tag in tags}
+ for node in ParseTools.all_nodes(node):
+ if node.nodeName in result and len(node.childNodes) > 0:
+ result[node.nodeName] = node.childNodes[0].nodeValue
+ return result
+
+ @staticmethod
+ def dump_string_to_dict(dump_string):
+ dump_list = ParseTools.dump_string_to_list(dump_string)
+ return ParseTools.dump_list_to_dict(dump_list)
+
+ @staticmethod
+ def dump_string_to_list(dump_string):
+ out_list = []
+ for item in dump_string.split():
+ out_list.extend(item.rstrip(',').split(','))
+
+ return out_list
+
+ @staticmethod
+ def dump_list_to_dict(dump_list):
+ out_dict = {}
+ for item in dump_list:
+ parts = item.split('=')
+ if len(parts) == 1:
+ parts.append(None)
+ out_dict[parts[0]] = parts[1]
+
+ return out_dict
\ No newline at end of file
--- /dev/null
+import logging
+import requests
+import re
+import time
+
+import xml.dom.minidom as md
+from xml.etree import ElementTree as ET
+
+from openvswitch.flow_tools import TO_GET, WAIT_TIME, OPERATIONAL_DELAY, FLOWS_PER_SECOND
+
+class GetFlowsComponent():
+
+ log = logging.getLogger('GetFlowsComponent')
+
+ @staticmethod
+ def get_flows(host, port, store='config'):
+ url = 'http://%s:%d/restconf/%s/opendaylight-inventory:nodes' \
+ '/node/openflow:1/table/2/' % (host, port, store)
+ GetFlowsComponent.log.debug('checking flows in controller - sending request to url: {0}'.format(url))
+ response = requests.get(url, auth=('admin', 'admin'),
+ headers={'Accept': 'application/xml'}, timeout=TO_GET)
+ return response
+
+class CheckSwitchDump():
+
+ log = logging.getLogger('CheckSwitchDump')
+
+ def get_id_by_entry(self, dump_flow, id_map):
+ id = None
+
+ try:
+ cookie_regexp = re.compile("cookie=0x[0-9,a-f,A-F]+")
+ cookie_match = re.search(cookie_regexp, dump_flow)
+ if cookie_match is not None:
+ cookie_id = cookie_match.group(0)[7:-1]
+ id = id_map[cookie_id]
+ else:
+ CheckSwitchDump.log.info('skipping parsing dump entry: {0} '.format(dump_flow))
+
+ except KeyError as e:
+ CheckSwitchDump.log.error('cookie: {0} is not contained in stored flows'.format(cookie_id))
+ except StandardError as e:
+ CheckSwitchDump.log.error('problem getting stored flow id from cookie from flow dump:{0} reason:{1}'.format(dump_flow, str(e)))
+
+ return id
+
+
+class CheckConfigFlowsComponent():
+
+ log = logging.getLogger('CheckConfigFlowsComponent')
+
+ def check_config_flows(self, host, port, id_map):
+ try:
+ # check config
+ response = GetFlowsComponent.get_flows(host, port)
+ assert response.status_code == 200, 'response from config must be 200, is {0}'.format(response.status_code)
+ tree = ET.ElementTree(ET.fromstring(response.text))
+
+ return GetXMLFlowsComponent.get_xml_flows_by_map(tree.getroot(), id_map)
+ except StandardError as e:
+ CheckConfigFlowsComponent.log.error('problem getting flows from config: {0}'.format(str(e)))
+ return -1
+
+
+class CheckOperFlowsComponent():
+
+ log = logging.getLogger('CheckOperFlowsComponent')
+
+ def check_oper_flows(self, host, port, id_map, wait_time=OPERATIONAL_DELAY):
+ time.sleep(wait_time)
+
+ try:
+ response = GetFlowsComponent.get_flows(host, port, 'operational')
+ assert response.status_code == 200, 'response from config must be 200, is {0}'.format(response.status_code)
+ CheckOperFlowsComponent.log.debug('got resposnse: {0}'.format(response.status_code))
+ CheckOperFlowsComponent.log.debug('operational dump:\n{0}'.format(response.text))
+
+ tree = ET.ElementTree(ET.fromstring(response.text))
+
+ # fliter id's
+ return GetXMLFlowsComponent.get_xml_flows_by_map(tree.getroot(), id_map)
+ except StandardError as e:
+ CheckOperFlowsComponent.log.error('problem getting flows from operational: {0}'.format(str(e)))
+ return -1
+
+ def check_oper_flows_loop(self, host, port, id_map, max_tries=2):
+ # determine how much time we will wait for flows to get on switch
+ target_oper_flows = len(id_map)
+ current_try = 0
+ current_oper_flows = 0
+
+ wait_for_flows = (target_oper_flows / FLOWS_PER_SECOND) + OPERATIONAL_DELAY
+
+ #check operational - in loop for determined number of tries
+ while current_oper_flows < target_oper_flows and max_tries > current_try:
+ CheckOperFlowsComponent.log.info('checking operational... waiting {0} seconds, {1}/{2}'.format(wait_for_flows, current_try + 1, max_tries))
+ current_oper_flows = self.check_oper_flows(host, port, id_map, wait_for_flows)
+ CheckOperFlowsComponent.log.info('got {0} flows on {1}. try'.format(current_oper_flows, current_try + 1))
+ current_try += 1
+ return current_oper_flows
+
+class GetXMLFlowsComponent():
+
+ @staticmethod
+ def get_xml_flows_by_map(xml_tree, id_map, namespace='{urn:opendaylight:flow:inventory}'):
+ element_count = 0
+
+ for e in xml_tree.findall(namespace + 'flow'):
+ xml_id = int(e.find(namespace + 'id').text)
+
+ if xml_id in id_map.values():
+ element_count += 1
+
+ return element_count
--- /dev/null
+class TestClassAdd():
+ def inc_flow(self, flow_id, cookie_id):
+ raise NotImplementedError("inc_flow is not implemented")
+
+ def inc_error(self):
+ raise NotImplementedError("inc_error is not implemented")
+
+class TestClassRemove():
+ def delete_flow_from_map(self, flow_id, cookie_id):
+ raise NotImplementedError("inc_flow is not implemented")
+
--- /dev/null
+'''
+Created on Jan 24, 2014
+
+@author: vdemcak
+'''
+
+import logging
+import os
+import re
+from xml.etree import ElementTree as ET
+
+class Loader():
+
+ @staticmethod
+ def loadXml(file_name):
+ path_to_xml = os.path.join('', file_name)
+ path_to_xml = os.path.abspath(__file__ + '/../../../' + path_to_xml)
+ with open(path_to_xml) as f:
+ xml_string = f.read()
+ xml_string = re.sub(' xmlns="[^"]+"', '', xml_string, count=1)
+
+ tree = ET.fromstring(xml_string)
+ return tree, xml_string
+
+ @staticmethod
+ def buildXmlDocDictionaryForComarableElements(element, flow_dict, p_elm_name=None, kwd=None, akwd=None, mkwd=None):
+ act_key_dict = kwd if (kwd > None) else akwd if (akwd > None) else mkwd if (mkwd > None) else None
+ if element > None :
+ elm_alias = element.tag if (act_key_dict.get(element.tag, None) > None) else None
+ if ((element.getchildren() > None) & (len(element.getchildren()) > 0)):
+ for child in element.getchildren() :
+ if (element.tag == 'match') :
+ Loader.buildXmlDocDictionaryForComarableElements(child, flow_dict, mkwd=mkwd)
+ elif (element.tag == 'actions') :
+ Loader.buildXmlDocDictionaryForComarableElements(child, flow_dict, akwd=akwd)
+ else :
+ Loader.buildXmlDocDictionaryForComarableElements(child, flow_dict, elm_alias, kwd, akwd, mkwd);
+ else :
+ if element.text > None :
+ text = re.sub( '[\s]+','', element.text, count=1)
+ a_key = p_elm_name if (p_elm_name > None) else element.tag
+ flow_dict[a_key] = text;
+ return
+
+type_int = 1
+type_boolean = 2
+type_ethernet = 3
+type_ipv4 = 4
+type_ipv6 = 5
+
+class Field():
+ """
+ fields to check, arguments:
+ key: element tag from keywords and xml
+ bits: expected length in bits
+ prerequisites: dictionary of elements tag from xml which are required for this field and their values in list
+ or [None] if value is undefined or it's irrelevant (we just need to check if tag is set)
+ convert_from: format in which is value, that is checked against prerequisite values stored in xml
+
+ e.g.:
+ key:'ipv4-source'
+ bits:32
+ prerequisites: {'ethernet-type': [2048]}
+ convert_from: 10
+
+ OF_IPV4_SRC = Field('ipv4-source', 32, {'ethernet-type': [2048]}, 10)
+ IN_PHY_PORT = Field('in-phy-port', 32, {'in-port': [None]}, 10)
+ """
+
+ def __init__(self, key, bits, prerequisites=None, convert_from=10, value_type=type_int):
+ self.key = str(key)
+ self.bits = bits
+ if prerequisites is not None:
+ self.prerequisites = dict(prerequisites)
+ else:
+ self.prerequisites = None
+ self.convert_from = convert_from
+ self.value_type = value_type
+
+ def __str__(self):
+ return "Field: {0}, size: {1}, prerequisites: {2}"\
+ .format(self.key, self.bits, self.prerequisites)
+
+
+class XMLValidator():
+
+ log = logging.getLogger('XMLValidator')
+ log.propagate=False
+ channel = logging.StreamHandler()
+ log.addHandler(channel)
+
+ def __init__(self, kwd, akwd, mkwd, loglevel=logging.INFO):
+
+ self.test_name = 'No test loaded'
+ XMLValidator.log.setLevel(loglevel)
+
+ self.xml_ok = True
+ self.fields = list()
+ self.flow_dict = dict()
+
+ self.kwd = kwd
+ self.akwd = akwd
+ self.mkwd = mkwd
+
+ def create_dictionaries(self, file_name):
+ self.test_name = file_name
+
+ formatter = logging.Formatter('TEST {0}: %(levelname)s: %(message)s'.format(self.test_name))
+ XMLValidator.channel.setFormatter(formatter)
+
+ self.flow_dict = dict()
+ treeXml1, self.xml_string = Loader.loadXml(file_name)
+ Loader.buildXmlDocDictionaryForComarableElements(treeXml1, self.flow_dict, kwd=self.kwd, akwd=self.akwd, mkwd=self.mkwd)
+ XMLValidator.log.debug('loaded dict from xml: {0}'.format(self.flow_dict))
+
+
+ def fill_fields(self):
+ Matchers.fill_validator(self)
+
+ def add_field(self, fields):
+ self.fields.append(fields)
+
+ def integer_check(self, value, bits, convert_from=10):
+ XMLValidator.log.debug('validating integer: {0}'.format(value))
+ if (int(value, convert_from) / 2**bits) > 0:
+ XMLValidator.log.error('value: {0} is larger than expected: {1}'.format(value, 2**bits))
+ raise StandardError
+
+ def boolean_check(self, value, bits):
+ XMLValidator.log.debug('validating boolean: {0}'.format(value))
+ if bits < 1:
+ XMLValidator.log.error('value: {0} is larger than expected: {1}'.format(value, 2**bits))
+ raise StandardError
+
+ def ethernet_check(self, a):
+ XMLValidator.log.debug('validating ethernet address: {0}'.format(a))
+ numbers = a.split(':')
+ max_range = (2**8) - 1
+
+ for n in numbers:
+ if int(n, 16) > max_range:
+ XMLValidator.log.error('octet: {0} in ethernet address: {1} larger than: {2}'.format(n, a, max_range))
+ raise StandardError
+
+ def mask_check(self, address, mask, base=16, part_len=16, delimiter=':'):
+ if (int(mask) % part_len) != 0:
+ raise StandardError('{0} is not valid mask, should be multiples of {1}'.format(mask, part_len))
+
+ part_count = int(mask) / part_len
+
+ for part in address.split(delimiter):
+ part_value = int(part, base) if part != '' else 0
+ part_count -= 1
+
+ if part_count < 0 and part_value != 0:
+ raise StandardError('address part {0} should be 0'.format(part))
+
+ def ipv4_check(self, a):
+ XMLValidator.log.debug('validating ipv4 address: {0}'.format(a))
+
+ mask_pos = a.find('/')
+ if mask_pos > 0:
+ a_mask = a[mask_pos + 1:]
+ a = a[:mask_pos]
+ self.mask_check(a, a_mask, 10, 8, '.')
+
+ numbers = a.split('.')
+ max_range = (2**8) - 1
+
+ for n in numbers:
+ if int(n) > max_range:
+ raise StandardError('octet: {0} in ipv4 address: {1} larger than: {2}'.format(n, a, max_range))
+
+ def ipv6_check(self, a):
+ XMLValidator.log.debug('validating ipv6 address: {0}'.format(a))
+ mask_pos = a.find('/')
+ if mask_pos > 0:
+ a_mask = a[mask_pos + 1:]
+ a = a[:mask_pos]
+ self.mask_check(a, a_mask)
+
+ numbers = a.split(':')
+ max_range = (2**16) - 1
+
+ for n in numbers:
+ #if n == '' then the number is 0000 which is always smaller than max_range
+ if n != '' and int(n, 16) > max_range:
+ raise StandardError('number: {0} in ipv6 address: {1} larger than: {2}'.format(n, a, max_range))
+
+ def check_size(self, value, bits, value_type, convert_from=10):
+ XMLValidator.log.debug('checking value: {0}, size should be {1} bits'.format(value, bits))
+ ipv6_regexp = re.compile("^[0-9,A-F,a-f]{0,4}(:[0-9,A-F,a-f]{0,4}){1,7}(/[0-9]{1,3})?$")
+ ipv4_regexp = re.compile("^([0-9]{1,3}\.){3}[0-9]{1,3}(/[0-9]{1,2})?$")
+ ethernet_regexp = re.compile("^[0-9,A-F,a-f]{2}(:[0-9,A-F,a-f]{2}){5}$")
+
+ try:
+ if value_type == type_boolean and value in ['true', 'false']: #boolean values
+ self.boolean_check(value, bits)
+ elif value_type == type_ethernet and ethernet_regexp.match(value): #ethernet address
+ self.ethernet_check(value)
+ elif value_type == type_ipv4 and ipv4_regexp.match(value): #IPV4 address
+ self.ipv4_check(value)
+ elif value_type == type_ipv6 and ipv6_regexp.match(value): #IPV6 address
+ self.ipv6_check(value)
+ elif value_type == type_int: #integer values
+ self.integer_check(value, bits, convert_from)
+ else:
+ raise StandardError
+
+ XMLValidator.log.info('size of: {0} < 2^{1} validated successfully'.format(value, bits))
+
+ except ValueError:
+ XMLValidator.log.error('problem converting value to int or IP addresses: {0}'.format(value))
+ self.xml_ok = False
+
+ except TypeError:
+ XMLValidator.log.error('problem converting value: {0}, TypeError'.format(value))
+ self.xml_ok = False
+
+ except StandardError as e:
+ XMLValidator.log.error('problem checking size for value: {0}, reason: {1}'.format(value, str(e)))
+ self.xml_ok = False
+
+
+ def has_prerequisite(self, key, values, convert_from, flow_dict):
+ XMLValidator.log.debug('checking prerequisite: {0} - {1}'.format(key, values))
+ try:
+ flow_value_raw = flow_dict[key]
+
+ #if prerequisites values are [None] we don't care about actual value
+ if values != [None]:
+ flow_value = int(flow_value_raw, convert_from)
+
+ if flow_value not in values:
+ raise StandardError()
+
+ XMLValidator.log.info('prerequisite {0}: {1} to value {2} validated successfully'.format(key, values, flow_value_raw))
+
+ except KeyError:
+ XMLValidator.log.error('can\'t find element: {0} in xml {1} or in keywords {2}'.format(key, self.xml_string, self.mkwd.keys()))
+ self.xml_ok = False
+
+ except ValueError or TypeError:
+ # flow_value_raw is string that cannot be converted to decimal or hex number or None
+ if flow_value_raw not in values:
+ XMLValidator.log.error('can\'t find element: {0} with value value: {1} '
+ 'in expected values {2}'.format(key, flow_value_raw, values))
+ self.xml_ok = False
+ else:
+ XMLValidator.log.info('prerequisite {0}: {1} to value {2} validated successfully'.format(key, values, flow_value_raw))
+
+ except StandardError:
+ XMLValidator.log.error('can\'t find element: {0} with value value: {1} '
+ 'in expected values {2}'.format(key, flow_value, values))
+ self.xml_ok = False
+
+ def check_all_prerequisites(self, prerequisites_dict, convert_from, flow_dict):
+ XMLValidator.log.debug('checking prerequisites: {0}'.format(prerequisites_dict))
+ for k, v in prerequisites_dict.items():
+ self.has_prerequisite(k, v, convert_from, flow_dict)
+
+ def check_single_field(self, field, flow_dict):
+ """
+ @type field MatchField
+ @type flow_dict dict
+ """
+
+ if field.key not in flow_dict:
+ XMLValidator.log.debug('{0} is not set in XML, skipping validation'.format(field.key))
+ return
+ else:
+ XMLValidator.log.info('validating: {0}'.format(field))
+
+ if field.bits is not None:
+ self.check_size(flow_dict[field.key], field.bits, field.value_type, field.convert_from)
+
+ if field.prerequisites is not None:
+ self.check_all_prerequisites(field.prerequisites, field.convert_from, flow_dict)
+
+ def validate_fields(self):
+ self.xml_ok = True
+ XMLValidator.log.info('validating against flow: {0}'.format(self.flow_dict))
+ for field in self.fields:
+ self.check_single_field(field, self.flow_dict)
+
+ def validate_misc_values(self):
+ for kw in self.kwd.keys():
+ if kw in self.flow_dict.keys():
+ XMLValidator.log.info('validating: {0}: {1}'.format(kw, self.flow_dict[kw]))
+ try:
+ value = int(self.flow_dict[kw])
+ if value < 0:
+ XMLValidator.log.error('value: {0}: {1} should be non-negative'.format(kw, self.flow_dict[kw]))
+ self.xml_ok = False
+ else:
+ XMLValidator.log.info('value: {0}: {1} validated successfully'.format(kw, self.flow_dict[kw]))
+ except StandardError:
+ XMLValidator.log.error('can\'t convert value: {0}: {1} to integer'.format(kw, self.flow_dict[kw]))
+ self.xml_ok = False
+ else:
+ XMLValidator.log.debug('{0} is not set in XML, skipping validation'.format(kw))
+
+ def validate(self):
+ self.validate_fields()
+ self.validate_misc_values()
+
+ XMLValidator.log.info('XML valid: {0}'.format(self.xml_ok))
+
+ return self.xml_ok
+
+class Matchers():
+
+ IN_PORT = Field('in-port', 32)
+ IN_PHY_PORT = Field('in-phy-port', 32, {'in-port': [None]})
+ METADATA = Field('metadata', 64, convert_from=16)
+
+ ETH_DST = Field('ethernet-source', 48, value_type=type_ethernet)
+ ETH_SRC = Field('ethernet-destination', 48, value_type=type_ethernet)
+ ETH_TYPE = Field('ethernet-type', 16)
+
+ VLAN_VID = Field('vlan-id', 13)
+ VLAN_PCP = Field('vlan-pcp', 3, {'vlan-id': [None]})
+
+ IP_DSCP = Field('ip-dscp', 6, {'ethernet-type': [2048, 34525]})
+ IP_ENC = Field('ip-ecn', 2, {'ethernet-type': [2048, 34525]})
+ IP_PROTO = Field('ip-protocol', 8, {'ethernet-type': [2048, 34525]})
+
+ IPV4_SRC = Field('ipv4-source', 32, {'ethernet-type': [2048]}, value_type=type_ipv4)
+ IPV4_DST = Field('ipv4-destination', 32, {'ethernet-type': [2048]}, value_type=type_ipv4)
+
+ TCP_SRC = Field('tcp-source-port', 16, {'ip-protocol': [6]})
+ TCP_DST = Field('tcp-destination-port', 16, {'ip-protocol': [6]})
+ UDP_SRC = Field('udp-source-port', 16, {'ip-protocol': [17]})
+ UDP_DST = Field('udp-destination-port', 16, {'ip-protocol': [17]})
+ SCTP_SRC = Field('sctp-source-port', 16, {'ip-protocol': [132]})
+ SCTP_DST = Field('sctp-destination-port', 16, {'ip-protocol': [132]})
+ ICMPV4_TYPE = Field('icmpv4-type', 8, {'ip-protocol': [1]})
+ ICMPV4_CODE = Field('icmpv4-code', 8, {'ip-protocol': [1]})
+
+ ARP_OP = Field('arp-op', 16, {'ethernet-type': [2054]})
+ ARP_SPA = Field('arp-source-transport-address', 32, {'ethernet-type': [2054]}, value_type=type_ipv4)
+ ARP_TPA = Field('arp-target-transport-address', 32, {'ethernet-type': [2054]}, value_type=type_ipv4)
+ ARP_SHA = Field('arp-source-hardware-address', 48, {'ethernet-type': [2054]}, value_type=type_ethernet)
+ ARP_THA = Field('arp-target-hardware-address', 48, {'ethernet-type': [2054]}, value_type=type_ethernet)
+
+ IPV6_SRC = Field('ipv6-source', 128, {'ethernet-type': [34525]}, value_type=type_ipv6)
+ IPV6_DST = Field('ipv6-destination', 128, {'ethernet-type': [34525]}, value_type=type_ipv6)
+ IPV6_FLABEL = Field('ipv6-flabel', 20, {'ethernet-type': [34525]})
+
+ ICMPV6_TYPE = Field('icmpv6-type', 8, {'ip-protocol': [58]})
+ ICMPV6_CODE = Field('icmpv6-code', 8, {'ip-protocol': [58]})
+
+ IPV6_ND_TARGET = Field('ipv6-nd-target', 128, {'icmpv6-type': [135, 136]}, value_type=type_ipv6)
+ IPV6_ND_SLL = Field('ipv6-nd-sll', 48, {'icmpv6-type': [135]}, value_type=type_ethernet)
+ IPV6_ND_TLL = Field('ipv6-nd-tll', 48, {'icmpv6-type': [136]}, value_type=type_ethernet)
+
+ MPLS_LABEL = Field('mpls-label', 20, {'ethernet-type': [34887, 34888]})
+ MPLS_TC = Field('mpls-tc', 3, {'ethernet-type': [34887, 34888]})
+ MPLS_BOS = Field('mpls-bos', 1, {'ethernet-type': [34887, 34888]})
+
+ PBB_ISID = Field('pbb-isid', 24, {'ethernet-type': [35047]})
+ TUNNEL_ID = Field('tunnel-id', 64)
+ IPV6_EXTHDR = Field('ipv6-exthdr', 9, {'ethernet-type': [34525]})
+
+
+ @staticmethod
+ def fill_validator(validator):
+ """
+ @type validator XMLValidator
+ """
+
+ validator.add_field(Matchers.IN_PORT)
+ validator.add_field(Matchers.IN_PHY_PORT)
+ validator.add_field(Matchers.METADATA)
+ validator.add_field(Matchers.ETH_DST)
+ validator.add_field(Matchers.ETH_SRC)
+ validator.add_field(Matchers.ETH_TYPE)
+ #validator.add_field(Matchers.VLAN_VID) - incorrenct XML parsing, if vlan-id-present is present its overriden by it, need to fix loader
+ validator.add_field(Matchers.VLAN_PCP)
+ validator.add_field(Matchers.IP_DSCP)
+ validator.add_field(Matchers.IP_ENC)
+ validator.add_field(Matchers.IP_PROTO)
+ validator.add_field(Matchers.IPV4_SRC)
+ validator.add_field(Matchers.IPV4_DST)
+ validator.add_field(Matchers.TCP_SRC)
+ validator.add_field(Matchers.TCP_DST)
+ validator.add_field(Matchers.UDP_SRC)
+ validator.add_field(Matchers.UDP_DST)
+ validator.add_field(Matchers.SCTP_SRC)
+ validator.add_field(Matchers.SCTP_DST)
+ validator.add_field(Matchers.ICMPV4_TYPE)
+ validator.add_field(Matchers.ICMPV4_CODE)
+ validator.add_field(Matchers.ARP_OP)
+ validator.add_field(Matchers.ARP_SPA)
+ validator.add_field(Matchers.ARP_TPA)
+ validator.add_field(Matchers.ARP_SHA)
+ validator.add_field(Matchers.ARP_THA)
+ validator.add_field(Matchers.IPV6_SRC)
+ validator.add_field(Matchers.IPV6_DST)
+ validator.add_field(Matchers.IPV6_FLABEL)
+ validator.add_field(Matchers.ICMPV6_TYPE)
+ validator.add_field(Matchers.ICMPV6_CODE)
+ validator.add_field(Matchers.IPV6_ND_TARGET)
+ validator.add_field(Matchers.IPV6_ND_SLL)
+ validator.add_field(Matchers.IPV6_ND_TLL)
+ validator.add_field(Matchers.MPLS_LABEL)
+ validator.add_field(Matchers.MPLS_TC)
+ validator.add_field(Matchers.MPLS_BOS)
+ validator.add_field(Matchers.PBB_ISID)
+ validator.add_field(Matchers.TUNNEL_ID)
+ validator.add_field(Matchers.IPV6_EXTHDR)
+
+
+if __name__ == '__main__':
+
+ keywords = None
+ with open(os.path.abspath(__file__ + '/../../../keywords.csv')) as f:
+ keywords = dict(line.strip().split(';') for line in f if not line.startswith('#'))
+
+ #print keywords
+
+ match_keywords = None
+ with open(os.path.abspath(__file__ + '/../../../match-keywords.csv')) as f:
+ match_keywords = dict(line.strip().split(';') for line in f if not line.startswith('#'))
+
+ #print match_keywords
+
+ action_keywords = None
+ with open(os.path.abspath(__file__ + '/../../../action-keywords.csv')) as f:
+ action_keywords = dict(line.strip().split(';') for line in f if not line.startswith('#'))
+
+ paths_to_xml = list()
+ for i in range(1, 50):
+ #paths_to_xml = ['xmls/f5.xml', 'xmls/f14.xml', 'xmls/f23.xml', 'xmls/f25.xml']
+ paths_to_xml.append('xmls/f%d.xml' % i)
+
+ validator = XMLValidator(keywords, action_keywords, match_keywords, logging.ERROR)
+ validator.fill_fields()
+
+ for path in paths_to_xml:
+ validator.create_dictionaries(path)
+ validator.validate()
+
--- /dev/null
+import unittest
+import os
+import re
+import sys
+import logging
+import time
+import argparse
+import requests
+
+import xml.dom.minidom as md
+from xml.etree import ElementTree as ET
+
+from openvswitch.mininet_tools import MininetTools
+from openvswitch.flow_tools import FlowAdderThread, FlowRemoverThread, MapNames, loglevels, TO_GET, WAIT_TIME, OPERATIONAL_DELAY, FLOWS_PER_SECOND
+from openvswitch.testclass_templates import TestClassAdd, TestClassRemove
+from openvswitch.testclass_components import CheckConfigFlowsComponent, CheckOperFlowsComponent
+
+class MultiTest(unittest.TestCase, TestClassAdd, TestClassRemove, CheckConfigFlowsComponent, CheckOperFlowsComponent):
+
+ log = logging.getLogger('MultiTest')
+ total_errors = 0
+
+ id_maps = dict()
+ active_map = dict()
+
+ def setUp(self):
+ MultiTest.log.info('test setup...')
+ self.__start_MN()
+
+ MultiTest.id_maps[MapNames.TEST] = dict()
+ MultiTest.id_maps[MapNames.DUMMY] = dict()
+
+ MultiTest.active_map = MultiTest.id_maps[MapNames.TEST]
+
+
+ def tearDown(self):
+ MultiTest.log.info('test cleanup...')
+ self.__set_active_map(MapNames.DUMMY)
+ self.remover = FlowRemoverThread(self, 1, self.host, self.port, self.net, list(MultiTest.active_map.items()))
+ self.remover.run()
+
+ for k, v in MultiTest.id_maps.items():
+ if len(v) > 0:
+ MultiTest.log.warning('not all flows were deleted, remaining test flows: {0}, from map: {1}'.format(len(v),k))
+
+ def __set_active_map(self, key):
+ try:
+ MultiTest.active_map = MultiTest.id_maps[key]
+ except KeyError as e:
+ MultiTest.log.warning('Error switching between map ids: {0}'.format(str(e)))
+
+
+ def inc_error(self, value=1):
+ MultiTest.total_errors += value
+
+ def inc_flow(self, flow_id= None, cookie_id=1):
+ if flow_id is not None and cookie_id is not None:
+ #we dont care about actual value, just need to store flow_id as unique identifier
+ MultiTest.active_map[flow_id] = flow_id
+
+ def delete_flow_from_map(self, flow_id, cookie_id=None):
+ del MultiTest.active_map[flow_id]
+
+ def __start_MN(self):
+ self.net = MininetTools.create_network(self.host, self.mn_port)
+ self.net.start()
+ MultiTest.log.info('mininet stared')
+ MultiTest.log.info('waiting {0} seconds...'.format(WAIT_TIME))
+ time.sleep(WAIT_TIME)
+
+ def test(self):
+ # add dummy flows to test removal when there are already some flows in operational
+ self.__set_active_map(MapNames.DUMMY)
+ self.adder = FlowAdderThread(self, 0, self.host, self.port, self.net, MultiTest.flows + 1, MultiTest.flows + 11)
+ self.adder.run()
+
+ self.__set_active_map(MapNames.TEST)
+ self.adder = FlowAdderThread(self, 1, self.host, self.port, self.net, 1, MultiTest.flows + 1)
+ self.adder.run()
+
+ # if we didn't manage to get any flows on controller there is no point doing test
+ assert len(MultiTest.active_map) > 0, ('Stored flows should be greater than 0, actual is {0}'.format(len(MultiTest.active_map)))
+
+ # check numer of flows before deletion
+ MultiTest.log.debug('{0} flows are stored by results from threads, {1} errors'.format(len(MultiTest.active_map), MultiTest.total_errors))
+ MultiTest.log.debug('{0} flows are stored in controller config'.format(self.check_config_flows(self.host, self.port, self.active_map)))
+ MultiTest.log.info('{0} flows are stored in controller operational'.format(self.check_oper_flows_loop(self.host, self.port, self.active_map)))
+
+ self.remover = FlowRemoverThread(self, 0, self.host, self.port, self.net, list(MultiTest.active_map.items()))
+ self.remover.run()
+
+ MultiTest.log.info('\n\n---------- preparation finished, running test ----------')
+
+ # check and test after deletion
+ flows_oper_after = self.check_oper_flows_loop(self.host, self.port, self.active_map)
+ MultiTest.log.debug('{0} flows are stored in controller config'.format(self.check_config_flows(self.host, self.port, self.active_map)))
+ MultiTest.log.info('{0} flows are stored in controller operational'.format(flows_oper_after))
+
+ # check if we have managed to delete all test
+ if len(MultiTest.active_map) <> 0:
+ MultiTest.log.warning('Not all flows added during test have been deleted, ids of remaining flows are: {0}'.format(sorted(MultiTest.active_map)))
+
+ # if we didn't manage to get any flows on controller there is no point doing test
+ assert flows_oper_after == len(MultiTest.active_map), 'Number of flows added during test stored in operational should be {0}, is {1}'.format(len(MultiTest.active_map), flows_oper_after)
+
+
+if __name__ == '__main__':
+
+ requests_log = logging.getLogger("requests")
+ requests_log.setLevel(logging.WARNING)
+
+ # parse cmdline arguments
+ parser = argparse.ArgumentParser(description='End to end stress tests of flows '
+ 'addition from multiple connections')
+ parser.add_argument('--odlhost', default='127.0.0.1', help='host where '
+ 'odl controller is running (default = 127.0.0.1) ')
+ parser.add_argument('--odlport', type=int, default=8080, help='port on '
+ 'which odl\'s RESTCONF is listening (default = 8080) ')
+ parser.add_argument('--mnport', type=int, default=6653, help='port on '
+ 'which odl\'s controller is listening (default = 6653)')
+ parser.add_argument('--flows', default=100, help='how many flows will be added'
+ ' (default = 100)')
+ parser.add_argument('--log', default='info', help='log level, permitted values are'
+ ' debug/info/warning/error (default = info)')
+ args = parser.parse_args()
+
+ #logging.basicConfig(level=logging.DEBUG)
+ logging.basicConfig(level=loglevels.get(args.log, logging.INFO))
+
+ # set host and port of ODL controller for test cases
+ MultiTest.port = args.odlport
+ MultiTest.host = args.odlhost
+ MultiTest.mn_port = args.mnport
+ MultiTest.flows = int(args.flows)
+
+ del sys.argv[1:]
+
+ suite = unittest.TestSuite()
+ test = MultiTest('test')
+ suite.addTest(test)
+
+ try:
+ unittest.TextTestRunner(verbosity=2).run(suite)
+ #unittest.main()
+ finally:
+ test.net.stop()
+ print 'end'
import logging
import time
import argparse
-import threading
import requests
-from multiprocessing import Process
import xml.dom.minidom as md
from xml.etree import ElementTree as ET
-from odl_tests_new import MininetTools, ParseTools
+from openvswitch.mininet_tools import MininetTools
+from openvswitch.flow_tools import FlowAdderThread, FlowRemoverThread, loglevels, TO_GET, WAIT_TIME, OPERATIONAL_DELAY, FLOWS_PER_SECOND
+from openvswitch.testclass_templates import TestClassAdd, TestClassRemove
+from openvswitch.testclass_components import CheckSwitchDump, CheckOperFlowsComponent, CheckConfigFlowsComponent
-FLOW_ID_TEMPLATE = 'FLOW_ID_TEMPLATE'
-COOKIE_TEMPLATE = 'COOKIE_TEMPLATE'
-HARD_TO_TEMPLATE = 'HARD_TO_TEMPLATE'
-FLOW_NAME_TEMPLATE = 'FLOW_NAME_TEMPLATE'
-IPV4DST_TEMPLATE = 'IPV4DST_TEMPLATE'
-PRIORITY_TEMPLATE = 'PRIORITY_TEMPLATE'
+REMOVE_FLOWS_PER_THREAD = 250
-xml_template = '<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>' \
-'<flow xmlns=\"urn:opendaylight:flow:inventory\">' \
- '<strict>false</strict>' \
- '<instructions><instruction><order>0</order><apply-actions><action><order>0</order><dec-nw-ttl/>' \
- '</action></apply-actions></instruction></instructions><table_id>2</table_id>' \
- '<id>'+FLOW_ID_TEMPLATE+'</id><cookie_mask>255</cookie_mask><installHw>false</installHw>' \
- '<match><ethernet-match><ethernet-type><type>2048</type></ethernet-type></ethernet-match>' \
- '<ipv4-destination>'+IPV4DST_TEMPLATE+'</ipv4-destination></match><hard-timeout>'+HARD_TO_TEMPLATE+'</hard-timeout>' \
- '<flags>FlowModFlags [_cHECKOVERLAP=false, _rESETCOUNTS=false, _nOPKTCOUNTS=false, _nOBYTCOUNTS=false, _sENDFLOWREM=false]</flags>' \
- '<cookie>'+COOKIE_TEMPLATE+'</cookie><idle-timeout>34</idle-timeout><flow-name>'+FLOW_NAME_TEMPLATE+'</flow-name><priority>'+PRIORITY_TEMPLATE+'</priority>' \
- '<barrier>false</barrier></flow>'
-
-
-class Tool():
-
- @staticmethod
- def get_flows_string(net=None):
- if net is None:
- return []
-
- switch = net.switches[0]
- output = switch.cmdPrint(
- 'ovs-ofctl -O OpenFlow13 dump-flows %s' % switch.name)
-
- return output.splitlines()[1:]
-
-
-class MultiTest(unittest.TestCase):
+class MultiTest(unittest.TestCase, TestClassAdd, TestClassRemove, CheckSwitchDump, CheckOperFlowsComponent, CheckConfigFlowsComponent):
log = logging.getLogger('MultiTest')
total_errors = 0
- total_flows = 0
def setUp(self):
- MultiTest.log.info('setUp')
+ MultiTest.log.info('test setup...')
self.threads_count = 50
self.thread_pool = list()
+ self.active_map = dict()
+
self.__start_MN()
self.__setup_threads()
self.__run_threads()
+
def tearDown(self):
- MultiTest.log.info('tearDown')
- self.net.stop()
+ MultiTest.log.info('test cleanup...')
+ MultiTest.total_deleted = 0
+ self.__delete_flows()
- @staticmethod
- def inc_error(value=1):
+ def inc_error(self, value=1):
MultiTest.total_errors += value
- @staticmethod
- def inc_flow(value=1):
- MultiTest.total_flows += 1
+ def inc_flow(self, flow_id= None, cookie_id=None):
+ if flow_id is not None and cookie_id is not None:
+ self.active_map[cookie_id] = flow_id
- def __start_MN(self):
- wait_time = 15
+ def delete_flow_from_map(self, flow_id, cookie_id):
+ del self.active_map[cookie_id]
+ def __start_MN(self):
self.net = MininetTools.create_network(self.host, self.mn_port)
self.net.start()
MultiTest.log.info('mininet stared')
- MultiTest.log.info('waiting {} seconds...'.format(wait_time))
- time.sleep(wait_time)
+ MultiTest.log.info('waiting {0} seconds...'.format(WAIT_TIME))
+ time.sleep(WAIT_TIME)
def __setup_threads(self):
self.threads_count = int(args.threads)
for i in range(0, self.threads_count):
- #thread will have predetermined flows id to avoid using shared resource
- t = FlowAdderThread(i, self.host, self.port, self.net, flows_ids_from=i*MultiTest.flows + 1, flows_ids_to=(i+1)*MultiTest.flows + 1)
+ t = FlowAdderThread(self, i, self.host, self.port, self.net,\
+ flows_ids_from=i*MultiTest.flows + 1, flows_ids_to=(i+1)*MultiTest.flows + 1)
self.thread_pool.append(t)
def __run_threads(self):
- # start threads
for t in self.thread_pool:
t.start()
- # wait for them to finish
for t in self.thread_pool:
t.join()
- # collect results
- #for t in self.thread_pool:
- # MultiTest.inc_flow(t.flows)
- # MultiTest.inc_error(t.errors)
-
def test(self):
-
+ cookie_regexp = re.compile("cookie=0x[0-9,a-f,A-F]+")
switch_flows = 0
+ oper_flows = 0
+ flows_on_controller_operational = None
+ flows_on_controller = None
- assert MultiTest.total_flows > 0, 'Stored flows should be greater than 0, actual is {}'.format(MultiTest.total_flows)
+ total_flows = len(self.active_map.keys())
+
+ assert total_flows > 0, ('Stored flows should be greater than 0, actual is {0}'.format(total_flows))
MultiTest.log.info('\n\n---------- preparation finished, running test ----------')
+
# check config
- url = 'http://%s:%d/restconf/config/opendaylight-inventory:nodes' \
- '/node/openflow:1/table/2/' % (self.host, self.port)
- MultiTest.log.info('checking flows in controller - sending request to url: {}'.format(url))
- response = requests.get(url, auth=('admin', 'admin'),
- headers={'Accept': 'application/xml'})
- assert response.status_code == 200
-
- tree = ET.ElementTree(ET.fromstring(response.text))
- flows_on_controller = len(tree.getroot())
-
- # check operational
- url = 'http://%s:%d/restconf/operational/opendaylight-inventory:nodes' \
- '/node/openflow:1/table/2/' % (self.host, self.port)
- MultiTest.log.info('checking flows in controller - sending request to url: {}'.format(url))
- response = requests.get(url, auth=('admin', 'admin'),
- headers={'Accept': 'application/xml'})
- #assert response.status_code == 200
- MultiTest.log.info('got resposnse: {}'.format(response.status_code))
- MultiTest.log.info('operational dump:\n{}'.format(response.text))
-
- MultiTest.log.info('{} flows are stored by results from threads, {} errors'.format(MultiTest.total_flows, MultiTest.total_errors))
- MultiTest.log.info('{} flows are stored in controller config'.format(flows_on_controller))
-
- switch_flows_list = Tool.get_flows_string(self.net)
- switch_flows = len(switch_flows_list)
- MultiTest.log.info('{} flows are stored on switch'.format(switch_flows))
- MultiTest.log.debug('switch flow-dump:\n{}'.format(switch_flows_list))
-
-
- assert MultiTest.total_flows == switch_flows, 'Added amount of flows to switch should be equal to successfully added flows to controller {} <> {}'.format(switch_flows,MultiTest.total_flows)
-
-
-class FlowAdderThread(threading.Thread):
-
- def __init__(self, thread_id, host, port, net, flows_ids_from=0, flows_ids_to=1):
- threading.Thread.__init__(self)
- self.thread_id = thread_id
- self.flows = 0
- self.errors = 0
- self.flows_ids_from = flows_ids_from
- self.flows_ids_to = flows_ids_to
-
- self.net = net
- self.host = host
- self.port = port
-
- self.log = logging.getLogger('FlowAdderThread: ' + str(thread_id))
- self.log.propagate = False
- self.channel = logging.StreamHandler()
- self.log.addHandler(self.channel)
- formatter = logging.Formatter('THREAD {}: %(levelname)s: %(message)s'.format(self.thread_id))
- self.channel.setFormatter(formatter)
- #self.log.setLevel(logging.INFO)
-
- self.log.info('created new FlowAdderThread->id:{}, flows id: {} -> {}'.format(self.thread_id, self.flows_ids_from, self.flows_ids_to))
-
- def make_ipv4_address(self, number, octet_count=4, octet_size=255):
- mask = 24
- ip = ['10', '0', '0', '0']
-
- if number < (255**3):
- for o in range(1, octet_count):
- ip[octet_count - 1 - o] = str(number % octet_size)
- number = number / octet_size
- #mask -= 8
- if number == 0:
- break
-
- return '.'.join(ip) + '/{}'.format(mask)
-
- def __add_flows(self, act_flow_id):
- try:
- self.log.info('adding flow id: {}'.format(act_flow_id))
- self.log.debug('flow ip address from id: {}'.format(self.make_ipv4_address(act_flow_id)))
-
- xml_string = str(xml_template).replace(FLOW_ID_TEMPLATE, str(act_flow_id)).replace(COOKIE_TEMPLATE, str(act_flow_id))\
- .replace(HARD_TO_TEMPLATE, '12').replace(FLOW_NAME_TEMPLATE,'FooXf{}'.format(act_flow_id))\
- .replace(IPV4DST_TEMPLATE,self.make_ipv4_address(act_flow_id)).replace(PRIORITY_TEMPLATE,str(act_flow_id))
-
- #TestRestartMininet.log.info('loaded xml: {}'.format(''.join(xml_string.split())))
- tree = md.parseString(xml_string)
- ids = ParseTools.get_values(tree.documentElement, 'table_id', 'id')
- data = (self.host, self.port, ids['table_id'], ids['id'])
-
- url = 'http://%s:%d/restconf/config/opendaylight-inventory:nodes' \
- '/node/openflow:1/table/%s/flow/%s' % data
- # send request via RESTCONF
- headers = {
- 'Content-Type': 'application/xml',
- 'Accept': 'application/xml',
- }
- self.log.debug('sending request to url: {}'.format(url))
- rsp = requests.put(url, auth=('admin', 'admin'), data=xml_string,
- headers=headers)
- self.log.debug('received status code: {}'.format(rsp.status_code))
- self.log.debug('received content: {}'.format(rsp.text))
- assert rsp.status_code == 204 or rsp.status_code == 200, 'Status' \
- ' code returned %d' % rsp.status_code
-
- # check request content against restconf's datastore
- #response = requests.get(url, auth=('admin', 'admin'),
- # headers={'Accept': 'application/xml'})
- #assert response.status_code == 200, 'Conifg response should be 200, is {}'.format(response.status_code)
-
- #switch_flows = Tool.get_flows_string(self.net)
- #assert len(switch_flows) > 0, 'Flows stored on switch shoul be greater than 0'
-
- # we expect that controller doesn't fail to store flow on switch
- self.flows += 1
- MultiTest.inc_flow()
- # store last used table id which got flows for later checkup
- #self.log.debug('{} successfully stored flows - {} flows are on switch'.format(self.flows, len(switch_flows)))
- except AssertionError as e:
- self.errors += 1
- self.log.error('AssertionError storing flow id:{}, reason: {}'.format(act_flow_id, str(e)))
- MultiTest.inc_error()
- except Exception as e:
- self.errors += 1
- self.log.error('Error storing flow id:{}, reason: {}'.format(act_flow_id, str(e)))
- MultiTest.inc_error()
-
- def run(self):
- self.log.info('started... adding flows {} to {}'.format(self.flows_ids_from, self.flows_ids_to))
- for i in range(self.flows_ids_from, self.flows_ids_to):
- self.__add_flows(i)
-
- self.log.info('finished, successfully added {} flows, {} errors'.format(self.flows,self.errors))
+ flows_on_controller = self.check_config_flows(self.host, self.port, self.active_map)
+
+ #check operational
+ flows_on_controller_operational = self.check_oper_flows_loop(self.host, self.port, self.active_map)
+
+ # check switch
+ switch_flows_list = MininetTools.get_flows_string(self.net)
+ MultiTest.log.info('flow dump has {0} entries (including informational)'.format(len(switch_flows_list)))
+ for item in switch_flows_list:
+ if self.get_id_by_entry(item, self.active_map) is not None:
+ MultiTest.log.debug('flow_id:{0} = {1}'.format(self.get_id_by_entry(item, self.active_map), item))
+ switch_flows += 1
+
+ # print info
+ MultiTest.log.info('{0} flows are stored by results from threads, {1} errors'.format(total_flows, MultiTest.total_errors))
+ MultiTest.log.info('{0} flows are stored in controller config'.format(flows_on_controller))
+ MultiTest.log.info('{0} flows are stored in controller operational'.format(flows_on_controller_operational))
+ MultiTest.log.info('{0} flows are stored on switch'.format(switch_flows))
+
+ assert total_flows == switch_flows, 'Added amount of flows to switch should be equal to successfully added flows to controller {0} <> {1}'.format(switch_flows,total_flows)
+
+ def __delete_flows(self):
+ flows_deleted = 0
+ flows_on_controller = 0
+ MultiTest.log.info('deleting flows added during test')
+
+ # using threads to delete to speed up cleanup
+ items_to_delete = list(self.active_map.items())
+ self.thread_pool = []
+ thread_id = 0
+ slice_from = REMOVE_FLOWS_PER_THREAD * thread_id
+ slice_to = REMOVE_FLOWS_PER_THREAD * (thread_id + 1)
+
+ total_flows = len(self.active_map.keys())
+ total_deleted = 0
+
+ while(slice_from < len(items_to_delete)):
+ self.thread_pool.append(FlowRemoverThread(self, thread_id, self.host, self.port, self.net, items_to_delete[slice_from:slice_to]))
+ thread_id += 1
+ slice_from = REMOVE_FLOWS_PER_THREAD * thread_id
+ slice_to = REMOVE_FLOWS_PER_THREAD * (thread_id + 1)
+
+ for t in self.thread_pool:
+ t.start()
+
+ for t in self.thread_pool:
+ t.join()
+
+ for t in self.thread_pool:
+ total_deleted += t.removed
+ MultiTest.log.info('deleted {0} flows'.format(total_deleted))
+ if total_flows <> total_deleted:
+ raise StandardError('Not all flows have been deleted, flows added'\
+ ' during test: {0} <> deleted flows: {1},\nflows ids left on controller: {2}'.format(\
+ total_flows, total_deleted, sorted(self.active_map.values())))
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
+
+ requests_log = logging.getLogger("requests")
+ requests_log.setLevel(logging.WARNING)
# parse cmdline arguments
- parser = argparse.ArgumentParser(description='Test for flow addition to'
- ' switch after the switch has been restarted')
+ parser = argparse.ArgumentParser(description='End to end stress tests of flows '
+ 'addition from multiple connections')
parser.add_argument('--odlhost', default='127.0.0.1', help='host where '
- 'odl controller is running')
+ 'odl controller is running (default = 127.0.0.1) ')
parser.add_argument('--odlport', type=int, default=8080, help='port on '
- 'which odl\'s RESTCONF is listening')
+ 'which odl\'s RESTCONF is listening (default = 8080) ')
parser.add_argument('--mnport', type=int, default=6653, help='port on '
- 'which odl\'s controller is listening')
- parser.add_argument('--xmls', default=None, help='generete tests only '
- 'from some xmls (i.e. 1,3,34) ')
+ 'which odl\'s controller is listening (default = 6653)')
parser.add_argument('--threads', default=50, help='how many threads '
- 'should be used')
+ 'should be used (default = 50)')
parser.add_argument('--flows', default=20, help='how many flows will add'
- ' one thread')
+ ' one thread (default = 20)')
+ parser.add_argument('--log', default='info', help='log level, permitted values are'
+ ' debug/info/warning/error (default = info)')
args = parser.parse_args()
+ #logging.basicConfig(level=logging.DEBUG)
+ logging.basicConfig(level=loglevels.get(args.log, logging.INFO))
+
# set host and port of ODL controller for test cases
MultiTest.port = args.odlport
MultiTest.host = args.odlhost
MultiTest.flows = int(args.flows)
del sys.argv[1:]
- unittest.main()
+
+ suite = unittest.TestSuite()
+ test = MultiTest('test')
+ suite.addTest(test)
+
+ try:
+ unittest.TextTestRunner(verbosity=2).run(suite)
+ #unittest.main()
+ finally:
+ test.net.stop()
+ print 'end'