try:
jsonobj = json.loads(args['jsonblob'])
except KeyError:
- print "countnodes: json blob to parse not found"
+ print("countnodes: json blob to parse not found")
raise
if 'subnode' in args:
try:
jsonobj = json.loads(str(args['jsonblob']))
except KeyError:
- print "get_id_by_name: json blob not specified:"
+ print("get_id_by_name: json blob not specified:")
raise
try:
name = args['name']
except KeyError:
- print "get_id_by_name: name [usr, domain, role] not specified in args"
+ print("get_id_by_name: name [usr, domain, role] not specified in args")
raise
if 'head' in args:
try:
datatype = args['typeval']
except KeyError:
- print "get_id_by_name: need a type arg to process correct name for id"
+ print("get_id_by_name: need a type arg to process correct name for id")
raise
try:
try:
jsonobj = json.loads(args['jsonblob'])
except KeyError:
- print "get_attribute_by_id: json blob not specified:"
+ print("get_attribute_by_id: json blob not specified:")
raise
try:
nodeid = args['id']
except KeyError:
- print "get_attribute_by_id: id to look for not specified in parameters"
+ print("get_attribute_by_id: id to look for not specified in parameters")
raise
if 'attr' in args:
try:
datatype = args['typeval']
except KeyError:
- print "get_attribute_by_id: need type arg to process name for id"
+ print("get_attribute_by_id: need type arg to process name for id")
raise
try:
size = args['size']
except KeyError:
- print "get_attribute_by_id: specify number of records we need"
+ print("get_attribute_by_id: specify number of records we need")
raise
typename = datatype + 'id'
raise RuntimeError("Parse failed: " + resp.text)
self.token = token
# TODO: Use logging so that callers could see token refreshes.
- # print "DEBUG: token:", token
# We keep self.session to use for the following restconf requests.
def oneshot_method(self, method, uri, **kwargs):
raise RuntimeError("Parse failed: " + resp.text)
self.token = token
# TODO: Use logging so that callers could see token refreshes.
- # print "DEBUG: token:", token
# We keep self.session to use for the following restconf requests.
def oneshot_method(self, method, uri, **kwargs):
for ip in ips:
i = 1
dict[ip] = None
- print "numOfShards => " + str(numOfShards)
+ print("numOfShards => ", str(numOfShards))
while i <= numOfShards:
shardMemberName = "member-" + str(i) + "-" + shardName
j = 1
- print 'j => ' + str(j)
- print 'numOfTries => ' + str(numOfTries)
+ print('j => ', str(j))
+ print('numOfTries => ', str(numOfTries))
while int(j) <= int(numOfTries):
print("Try number " + str(j))
try:
print("getting role of " + ip + " for shardName = " + shardMemberName)
url = SettingsLibrary.getJolokiaURL(ip, str(port), str(i), shardName)
- print url
+ print(url)
resp = UtilLibrary.get(url)
print(resp)
if resp.status_code != 200:
for ip in dict.keys():
if dict[ip] == 'Follower':
result.append(ip)
- print "i=", i, "result=", result
+ print("i=%s result=%s" % (i, result))
if (len(result) == (len(ips) - 1)):
break
sleep(1)
def testGetLeader():
leader = getLeader("shard-inventory-config", 3, 1, 1, 8181,
"10.194.126.116", "10.194.126.117", "10.194.126.118")
- print leader
+ print(leader)
return leader
if __name__ == '__main__':
import doctest
- print doctest.testmod()
+ print(doctest.testmod())
</note>
"""
- print "Buying " + str(numberOfCarBuyers) + " Cars"
+ print("Buying " + str(numberOfCarBuyers) + " Cars")
for x in range(start, start + numberOfCarBuyers):
strId = str(x + 1)
:param num: initial number of switches in the topology
"""
if self._running:
- print 'Mininet topology is already active'
+ print('Mininet topology is already active')
return
cntl, numsw = line.split()
self._topo = mininet.topo.Topo()
def help_start(self):
"""Provide help message for start command"""
- print 'Starts mininet'
- print 'Usage: start <controller_ip> <num>'
- print '\tcontroller_ip - controllers ip or host name'
- print '\tnum - number of switches at start'
+ print('Starts mininet')
+ print('Usage: start <controller_ip> <num>')
+ print('\tcontroller_ip - controllers ip or host name')
+ print('\tnum - number of switches at start')
def do_start_with_cluster(self, line):
"""Starts mininet network with initial number of switches
e.g. 1.1.1.1,2.2.2.2,3.3.3.3 (no spaces)
"""
if self._running:
- print 'Mininet topology is already active'
+ print('Mininet topology is already active')
return
cntls = line.split(',')
for i, cntl_ip in enumerate(cntls):
cnt = self._net.addController('c{0}'.format(i), controller=RemoteController, ip=cntl_ip, port=6633)
controllers.append(cnt)
- print "contrller {0} created".format(cnt)
+ print("contrller {0} created".format(cnt))
self._net.buildFromTopo(topo=self._topo)
self._net.start()
def help_start_with_cluster(self):
"""Provide help message for start_with_cluster command"""
- print 'Starts mininet with one switch'
- print 'Usage: start <controller_ips>'
- print '\tcontroller_ips - comma separated list of controllers ip or host names'
+ print('Starts mininet with one switch')
+ print('Usage: start <controller_ips>')
+ print('\tcontroller_ips - comma separated list of controllers ip or host names')
def do_start_switches_with_cluster(self, line):
"""Starts mininet network with initial number of switches
e.g. 1.1.1.1,2.2.2.2,3.3.3.3 (no spaces)
"""
if self._running:
- print 'Mininet topology is already active'
+ print('Mininet topology is already active')
return
num, contls = line.split()
cntls = contls.split(',')
for i, cntl_ip in enumerate(cntls):
cnt = self._net.addController('c{0}'.format(i), controller=RemoteController, ip=cntl_ip, port=6633)
controllers.append(cnt)
- print "contrller {0} created".format(cnt)
+ print("contrller {0} created".format(cnt))
self._net.buildFromTopo(topo=self._topo)
self._net.start()
def help_start_switches_with_cluster(self):
"""Provide help message for start_with_cluster command"""
- print 'Starts mininet with one switch'
- print 'Usage: start <swnr> <controller_ips>'
- print '\tswnt - number of switches in topology'
- print '\tcontroller_ips - comma separated list of controllers ip or host names'
+ print('Starts mininet with one switch')
+ print('Usage: start <swnr> <controller_ips>')
+ print('\tswnt - number of switches in topology')
+ print('\tcontroller_ips - comma separated list of controllers ip or host names')
def do_add_switch(self, line):
"""Adds one switch to the network
def help_add_switch(self):
"""Provide help message for add_switch command"""
- print 'Adds one sinle switch to the running topology'
- print 'Usage: add_switch'
+ print('Adds one sinle switch to the running topology')
+ print('Usage: add_switch')
def do_add_switches(self, line):
"""Adds switches to the network
def help_add_switches(self):
"""Provide help message for add_switch command"""
- print 'Adds one sinle switch to the running topology'
- print 'Usage: add_switches <num>'
- print '\tnum - number of switches tp be added'
+ print('Adds one sinle switch to the running topology')
+ print('Usage: add_switches <num>')
+ print('\tnum - number of switches tp be added')
def do_exit(self, line):
"""Stops mininet"""
def help_exit(self):
"""Provide help message for exit command"""
- print 'Exit mininet cli'
- print 'Usage: exit'
+ print('Exit mininet cli')
+ print('Usage: exit')
def do_sh(self, line):
"""Run an external shell command
def help_sh(self, line):
"""Provide help message for sh command"""
- print 'Executes given commandAdds one sinle switch to the running topology'
- print 'Usage: sh <line>'
- print '\tline - command to be executed(e.g. ps -e'
+ print('Executes given commandAdds one sinle switch to the running topology')
+ print('Usage: sh <line>')
+ print('\tline - command to be executed(e.g. ps -e')
def emptyline(self):
pass
try:
setattr(objA, name, value)
except AttributeError:
- print name, "giving attribute error in", objA
+ print("%s giving attribute error in %s" % (name, objA))
def copy_rloc(objA, objB):
try:
setattr(objA, name, value)
except AttributeError:
- print name, "giving attribute error in", objA
+ print(" %s giving attribute error in" % (name, objA))
def clean_hops(obj):
return Wrap_input(Get_LispAddress_JSON(eid_string, vni))
-def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=001, loc_id="ISP1"):
+def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1"):
""" Description: Returns locator record object from pyangbind generated classes
Returns: locator record object
Params:
return lrecord_obj
-def Get_LocatorRecord_JSON(rloc, weights='1/1/255/0', flags=001, loc_id="ISP1"):
+def Get_LocatorRecord_JSON(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1"):
""" Description: Returns locator record dictionary
Returns: python dictionary
Params:
loc_id = loc.keys()[0]
loc_obj = loc[loc_id]
if loc_id in loc_ids:
- print "Locator objects should have different keys"
+ print("Locator objects should have different keys")
break
# TODO: Locator-id, currently in the format of loc_id0, loc_id1
mrecord_obj.LocatorRecord.add(loc_id)
if len(sys.argv) < 5:
print("Please povide correct inputs. Exiting!!!")
- print "{0} <switch_count> <host_per_switch> <base_mac: Eg:00:4b:00:00:00:00 > \
- <base_ip: Eg:75.75.0.0>".format(sys.argv[0].split('/')[-1])
- print "Dpid of switches is derived from base mac and \
- host ip address is derived from base ip"
+ print("{0} <switch_count> <host_per_switch> <base_mac: Eg:00:4b:00:00:00:00 > \
+ <base_ip: Eg:75.75.0.0>".format(sys.argv[0].split('/')[-1]))
+ print("Dpid of switches is derived from base mac and \
+ host ip address is derived from base ip")
sys.exit(1)
switch_count = int(sys.argv[1])
\nHence generating this python file dynamically\"\"\" \
\nfrom mininet.topo import Topo\nclass DemoTopo(Topo): \
\n'.format(switch_count, switch_count * host_per_switch, sys.argv[0]))
- print "This topology has %d switches %d hosts" \
- % (switch_count, switch_count * host_per_switch)
+ print("This topology has %d switches %d hosts"
+ % (switch_count, switch_count * host_per_switch))
configfile.write(" def __init__(self):\n ")
configfile.write(" # Initialize topology\n")
configfile.write(" Topo.__init__(self)\n")
Returns:
:returns (switches, flows_reported, flows-found): tupple with counts of switches, reported and found flows
"""
- # print type(flow_details), flow_details
active_flows = 0
found_flows = 0
switches = _get_operational_inventory_of_switches(controller)
active_flows += t['opendaylight-flow-table-statistics:flow-table-statistics']['active-flows']
if 'flow' in t:
found_flows += len(t['flow'])
- print "Switches,ActiveFlows(reported)/FlowsFound", len(switches), active_flows, found_flows
+ print("Switches,ActiveFlows(reported)/FlowsFound", len(switches), active_flows, found_flows)
return len(switches), active_flows, found_flows
if __name__ == '__main__':
topology = Topology()
- print topology.get_nodes_from_topology(2)
- print topology.get_nodes_from_topology('2')
+ print(topology.get_nodes_from_topology(2))
+ print(topology.get_nodes_from_topology('2'))
if __name__ == '__main__':
topologynew = Topologynew()
- # print topologynew.get_nodes_from_tree_topo(2)
- # print topologynew.get_nodes_from_tree_topo('2')
- print topologynew.get_nodes_from_tree_topo('(2,3)')
- # print topologynew.get_ids_of_leaf_nodes(2,2 )#, depth)
+ print(topologynew.get_nodes_from_tree_topo('(2,3)'))
use username and password of controller server for ssh and need
karaf distribution location like /root/Documents/dist
"""
- print "executing ssh command"
+ print("executing ssh command")
lib = SSHLibrary()
lib.open_connection(ip)
lib.login(username=username, password=password)
- print "login done"
+ print("login done")
cmd_response = lib.execute_command(command)
- print "command executed : " + command
+ print("command executed : " + command)
lib.close_connection()
return cmd_response
url = "http://" + ip + ":" + str(port) + \
"/restconf/config/opendaylight-inventory:nodes/node/controller-config/yang-ext:mount/config:modules"
- print "Waiting for controller " + ip + " up."
+ print("Waiting for controller " + ip + " up.")
# Try 30*10s=5 minutes for the controller to be up.
for i in xrange(30):
try:
- print "attempt " + str(i) + " to url " + url
+ print("attempt %s to url %s" % (str(i), url))
resp = get(url, "admin", "admin")
- print "attempt " + str(i) + " response is " + str(resp)
- print resp.text
+ print("attempt %s response is %s" % (str(i), str(resp)))
+ print(resp.text)
if ('clustering-it-provider' in resp.text):
- print "Wait for controller " + ip + " succeeded"
+ print("Wait for controller " + ip + " succeeded")
return True
except Exception as e:
- print e
+ print(e)
time.sleep(10)
- print "Wait for controller " + ip + " failed"
+ print("Wait for controller " + ip + " failed")
return False
i = 1
while i <= tries:
stdout = lib.execute_command("ps -axf | grep karaf | grep -v grep | wc -l")
- # print "stdout: "+stdout
processCnt = stdout[0].strip('\n')
print("processCnt: " + processCnt)
if processCnt == '0':
lib.close_connection()
if i > tries:
- print "Killing controller"
+ print("Killing controller")
kill_controller(ip, username, password, karafHome)
cmd_str = base_str + controller + ' --destination ' + isolated_controller + ' -j DROP'
execute_ssh_command(isolated_controller, username, password, cmd_str)
ip_tables = execute_ssh_command(isolated_controller, username, password, 'sudo iptables -L')
- print ip_tables
+ print(ip_tables)
iso_result = 'pass'
for controller in controllers:
controller_regex_string = "[\s\S]*" + isolated_controller + " *" + controller + "[\s\S]*"
cmd_str = base_str + controller + ' --destination ' + isolated_controller + ' -j DROP'
execute_ssh_command(isolated_controller, username, password, cmd_str)
ip_tables = execute_ssh_command(isolated_controller, username, password, 'sudo iptables -L')
- print ip_tables
+ print(ip_tables)
iso_result = 'pass'
for controller in controllers:
controller_regex_string = "[\s\S]*" + isolated_controller + " *" + controller + "[\s\S]*"
"""
flush_result = 'pass'
for controller in controllers:
- print 'Flushing ' + controller
+ print('Flushing ', controller)
cmd_str = 'sudo iptables -v -F'
cmd_result = execute_ssh_command(controller, username, password, cmd_str)
- print cmd_result
+ print(cmd_result)
success_string = "Flushing chain `INPUT'" + "\n"
success_string += "Flushing chain `FORWARD'" + "\n"
success_string += "Flushing chain `OUTPUT'"
if not cmd_result == success_string:
flush_result = "Failed to flush IPTables. Check Log."
- print "."
- print "."
- print "."
+ print(".")
+ print(".")
+ print(".")
return flush_result
regroups = re.finditer(pat, text)
outdict = {}
for g in regroups:
- print g.group()
+ print(g.group())
if g.group('key') == '_uuid':
cntl_uuid = g.group('value')
outdict[cntl_uuid] = {}
nodeDict = XMLtoDictParserTools.parseTreeToDict(node)
XMLtoDictParserTools.addDictValue(reportDict, index, nodeDict)
index += 1
- # print nodeDict
- # print origDict
if nodeDict == origDict:
return True, ''
if nodeDict['flow']['priority'] == origDict['flow']['priority']:
def is_flow_operational2(self, requested_flow, oper_resp, check_id=False):
def _rem_unimplemented_tags(tagpath, recurs, tdict):
- # print "_rem_unimplemented_tags", tagpath, tdict
if len(tagpath) > 1 and tagpath[0] in tdict:
_rem_unimplemented_tags(tagpath[1:], recurs, tdict[tagpath[0]])
del tdict[tagpath[0]]
if tdict.keys() == ['order']:
del tdict['order']
- # print "leaving", tdict
def _add_tags(tagpath, newtag, value, tdict):
'''if whole tagpath exists and the tag is not present, it is added with given value'''
- # print "_add_tags", tagpath, newtag, value, tdict
if len(tagpath) > 0 and tagpath[0] in tdict:
_add_tags(tagpath[1:], newtag, value, tdict[tagpath[0]])
elif len(tagpath) == 0 and newtag not in tdict:
def _to_be_modified_tags(tagpath, tag, related_tag, tdict):
'''if whole tagpath exists and the tag is not present, it is added with given value'''
- # print "_to_be_modified_tags", tagpath, tag, related_tag, tdict
if len(tagpath) > 0 and tagpath[0] in tdict:
_to_be_modified_tags(tagpath[1:], tag, related_tag, tdict[tagpath[0]])
elif len(tagpath) == 0 and tag in tdict and related_tag in tdict:
ignoreList=IGNORED_TAGS_LIST)
XMLtoDictParserTools.addDictValue(reportDict, index, nodeDict)
index += 1
- # print nodeDict
- # print origDict
- # print reportDict
if nodeDict == origDict:
return True, ''
if nodeDict['flow']['priority'] == origDict['flow']['priority']:
for (p, t, rt) in TAGS_TO_MODIFY_FOR_OC:
_to_be_modified_tags(p, t, rt, td)
- # print "comparing1", nodeDict
- # print "comparing2", td
if nodeDict == td:
return True, ''
if nodeDict == origDict:
if args.printDifferences:
for patchline in differences_after_patching:
- print json.dumps(patchline)
+ print(json.dumps(patchline))
- print len(differences_after_patching)
+ print(len(differences_after_patching))
return len(differences_after_patching)
g1 = m.group(1)
subx.append(g1)
ret = "[#%d]" % n
- # print "f1:", g1, ret
return ret
x = re.sub(r"[\['](\??\(.*?\))[\]']", f1, x)
# put expressions back
def f2(m):
g1 = m.group(1)
- # print "f2:", g1
return subx[int(g1)]
x = re.sub(r"#([0-9]+)", f2, x)
def trace(expr, obj, path):
if debug:
- print "trace", expr, "/", path
+ print("trace", expr, "/", path)
if expr:
x = expr.split(';')
loc = x[0]
x = ';'.join(x[1:])
if debug:
- print "\t", loc, type(obj)
+ print("\t", loc, type(obj))
if loc == "*":
def f03(key, loc, expr, obj, path):
if debug > 1:
- print "\tf03", key, loc, expr, path
+ print("\tf03", key, loc, expr, path)
trace(s(key, expr), obj, path)
walk(loc, x, obj, path, f03)
def f04(key, loc, expr, obj, path):
if debug > 1:
- print "\tf04", key, loc, expr, path
+ print("\tf04", key, loc, expr, path)
if isinstance(obj, dict):
if key in obj:
trace(s('..', expr), obj[key], s(path, key))
# [(index_expression)]
if loc.startswith("(") and loc.endswith(")"):
if debug > 1:
- print "index", loc
+ print("index", loc)
e = evalx(loc, obj)
trace(s(e, x), obj, path)
return
# ?(filter_expression)
if loc.startswith("?(") and loc.endswith(")"):
if debug > 1:
- print "filter", loc
+ print("filter", loc)
def f05(key, loc, expr, obj, path):
if debug > 1:
- print "f05", key, loc, expr, path
+ print("f05", key, loc, expr, path)
if isinstance(obj, dict):
eval_result = evalx(loc, obj[key])
else:
# [index,index....]
for piece in re.split(r"'?,'?", loc):
if debug > 1:
- print "piece", piece
+ print("piece", piece)
trace(s(piece, x), obj, path)
else:
store(path, obj)
"""eval expression"""
if debug:
- print "evalx", loc
+ print("evalx", loc)
# a nod to JavaScript. doesn't work for @.name.name.length
# Write len(@.name.name) instead!!!
loc = re.sub(r'(?<!\\)@', "__obj", loc).replace(r'\@', '@')
if not use_eval:
if debug:
- print "eval disabled"
+ print("eval disabled")
raise Exception("eval disabled")
if debug:
- print "eval", loc
+ print("eval", loc)
try:
# eval w/ caller globals, w/ local "__obj"!
v = eval(loc, caller_globals, {'__obj': obj})
- except Exception, e:
+ except Exception as e:
if debug:
- print e
- return False
+ print(e)
+ return False
if debug:
- print "->", v
+ print("->", v)
return v
# body of jsonpath()
try:
# Now, parse the hextets into a 128-bit integer.
- ip_int = 0L
+ ip_int = 0
for i in xrange(parts_hi):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
if __name__ == "__main__":
flows = call_dpctl().split("recirc_id")
for flow in flows:
- print flow
+ print(flow)
try:
socket.inet_aton(ip)
except socket.error:
- print "Error: %s is not a valid IPv4 address of controller!" % ip
+ print("Error: %s is not a valid IPv4 address of controller!" % (ip))
os.exit(2)
call(['sudo', 'ovs-vsctl', 'set-controller', sw, 'tcp:%s:6653' % ip])
try:
socket.inet_aton(ip)
except socket.error:
- print "Error: %s is not a valid IPv4 address of manager!" % ip
+ print("Error: %s is not a valid IPv4 address of manager!" % (ip))
os.exit(2)
cmd = ['sudo', 'ovs-vsctl', 'set-manager', 'tcp:%s:6640' % ip]
# prepending zeros to match 16-byt length, e.g. 123 -> 0000000000000123
dpid = filler[:len(filler) - len(dpid)] + dpid
elif len(dpid) > 16:
- print 'DPID: %s is too long' % dpid
+ print('DPID: %s is too long' % dpid)
sys.exit(3)
call(['sudo', 'ovs-vsctl', 'set', 'bridge', name,
'other-config:datapath-id=%s' % dpid])
connect_container_to_switch(
sw['name'], host, containerID)
host['port-name'] = 'vethl-' + host['name']
- print "Created container: %s with IP: %s. Connect using docker attach %s," \
- "disconnect with 'ctrl-p-q'." % (host['name'], host['ip'], host['name'])
+ print("Created container: %s with IP: %s. Connect using docker attach %s,"
+ "disconnect with 'ctrl-p-q'." % (host['name'], host['ip'], host['name']))
if __name__ == "__main__":
if len(sys.argv) < 2 or len(sys.argv) > 3:
- print "Please, specify IP of ODL and switch index in arguments."
- print "usage: ./infrastructure_launch.py ODL_IP SWITCH_INDEX"
+ print("Please, specify IP of ODL and switch index in arguments.")
+ print("usage: ./infrastructure_launch.py ODL_IP SWITCH_INDEX")
sys.exit(2)
controller = sys.argv[1]
try:
socket.inet_aton(controller)
except socket.error:
- print "Error: %s is not a valid IPv4 address!" % controller
+ print("Error: %s is not a valid IPv4 address!" % (controller))
sys.exit(2)
sw_index = int(sys.argv[2])
- print sw_index
- print switches[sw_index]
+ print(sw_index)
+ print(switches[sw_index])
if sw_index not in range(0, len(switches) + 1):
- print len(switches) + 1
- print "Error: %s is not a valid switch index!" % sw_index
+ print(len(switches) + 1)
+ print("Error: %s is not a valid switch index!" % (sw_index))
sys.exit(2)
sw_type = switches[sw_index]['type']
sw_name = switches[sw_index]['name']
if sw_type == 'gbp':
- print "*****************************"
- print "Configuring %s as a GBP node." % sw_name
- print "*****************************"
+ print("*****************************")
+ print("Configuring %s as a GBP node." % (sw_name))
+ print("*****************************")
print
launch([switches[sw_index]], hosts, controller)
- print "*****************************"
- print "OVS status:"
- print "-----------"
+ print("*****************************")
+ print("OVS status:")
+ print("-----------")
print
call(['sudo', 'ovs-vsctl', 'show'])
print
- print "Docker containers:"
- print "------------------"
+ print("Docker containers:")
+ print("------------------")
call(['docker', 'ps'])
- print "*****************************"
+ print("*****************************")
elif sw_type == 'sff':
- print "*****************************"
- print "Configuring %s as an SFF." % sw_name
- print "*****************************"
+ print("*****************************")
+ print("Configuring %s as an SFF." % (sw_name))
+ print("*****************************")
call(['sudo', 'ovs-vsctl', 'set-manager', 'tcp:%s:6640' % controller])
print
elif sw_type == 'sf':
- print "*****************************"
- print "Configuring %s as an SF." % sw_name
- print "*****************************"
+ print("*****************************")
+ print("Configuring %s as an SF." % (sw_name))
+ print("*****************************")
call(['%s/sf-config.sh' % os.path.dirname(os.path.realpath(__file__)), '%s' % sw_name])
net.build()
s1.start([c0])
s1.cmd('sudo ovs-vsctl set bridge s1 protocols=OpenFlow13')
- print h1.cmd('./h1-bond0.sh')
- print h2.cmd('./h2-bond0.sh')
+ print(h1.cmd('./h1-bond0.sh'))
+ print(h2.cmd('./h2-bond0.sh'))
CLI(net)
net.stop()
totalTime = 0
for txn in txns:
if txns[txn].totalTime() > timeToComplete:
- print txns[txn]
+ print(txns[txn])
totalTime += txns[txn].totalTime()
- print "Total time for these transactions = " + unicode(totalTime)
+ print("Total time for these transactions = ", unicode(totalTime))
def csv():
txns = processFiles()
- print Transaction.csv_header()
+ print(Transaction.csv_header())
for txn in txns:
- print txns[txn].csv()
+ print(txns[txn].csv())
distribution_name) # noqa
if distribution_ver is None:
- print distribution_name + " is not a valid distribution version." \
- " (Must contain version in the form: " \
- "\"<#>.<#>.<#>-<name>\" or \"<#>.<#>." \
- "<#>-<name>-SR<#>\" or \"<#>.<#>.<#>" \
- "-<name>-RC<#>\", e.g. 0.2.0-SNAPSHOT)"
+ print("%s is not a valid distribution version."
+ " (Must contain version in the form: "
+ "\"<#>.<#>.<#>-<name>\" or \"<#>.<#>."
+ "<#>-<name>-SR<#>\" or \"<#>.<#>.<#>"
+ "-<name>-RC<#>\", e.g. 0.2.0-SNAPSHOT)" % distribution_name)
sys.exit(1)
distribution_ver = distribution_ver.group()
# Copy the distribution to the host and unzip it
odl_file_path = self.dir_name + "/odl.zip"
self.remote.copy_file(self.distribution, odl_file_path)
- self.remote.exec_cmd("unzip -o " + odl_file_path + " -d " +
- self.dir_name + "/")
+ self.remote.exec_cmd("unzip -o " + odl_file_path + " -d "
+ + self.dir_name + "/")
# Rename the distribution directory to odl
- self.remote.exec_cmd("mv " + self.dir_name + "/" +
- distribution_name + " " + self.dir_name + "/odl")
+ self.remote.exec_cmd("mv " + self.dir_name + "/"
+ + distribution_name + " " + self.dir_name + "/odl")
# Copy all the generated files to the server
- self.remote.mkdir(self.dir_name +
- "/odl/configuration/initial")
- self.remote.copy_file(akka_conf, self.dir_name +
- "/odl/configuration/initial/")
- self.remote.copy_file(module_shards_conf, self.dir_name +
- "/odl/configuration/initial/")
- self.remote.copy_file(modules_conf, self.dir_name +
- "/odl/configuration/initial/")
- self.remote.copy_file(features_cfg, self.dir_name +
- "/odl/etc/")
- self.remote.copy_file(jolokia_xml, self.dir_name +
- "/odl/deploy/")
- self.remote.copy_file(management_cfg, self.dir_name +
- "/odl/etc/")
+ self.remote.mkdir(self.dir_name
+ + "/odl/configuration/initial")
+ self.remote.copy_file(akka_conf, self.dir_name
+ + "/odl/configuration/initial/")
+ self.remote.copy_file(module_shards_conf, self.dir_name
+ + "/odl/configuration/initial/")
+ self.remote.copy_file(modules_conf, self.dir_name
+ + "/odl/configuration/initial/")
+ self.remote.copy_file(features_cfg, self.dir_name
+ + "/odl/etc/")
+ self.remote.copy_file(jolokia_xml, self.dir_name
+ + "/odl/deploy/")
+ self.remote.copy_file(management_cfg, self.dir_name
+ + "/odl/etc/")
if datastore_cfg is not None:
self.remote.copy_file(datastore_cfg, self.dir_name + "/odl/etc/")
# Add symlink
- self.remote.exec_cmd("ln -sfn " + self.dir_name + " " +
- args.rootdir + "/deploy/current")
+ self.remote.exec_cmd("ln -sfn " + self.dir_name + " "
+ + args.rootdir + "/deploy/current")
# Run karaf
self.remote.start_controller(self.dir_name)
def main():
# Validate some input
if os.path.exists(args.distribution) is False:
- print args.distribution + " is not a valid file"
+ print("%s is not a valid file" % args.distribution)
sys.exit(1)
if os.path.exists(os.getcwd() + "/templates/" + args.template) is False:
- print args.template + " is not a valid template"
+ print("%s is not a valid template" % args.template)
# Prepare some 'global' variables
hosts = args.hosts.split(",")
replicas = {}
for x in range(0, len(hosts)):
- ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@" +
- hosts[x] + ":2550")
- rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@" +
- hosts[x] + ":2551")
+ ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@"
+ + hosts[x] + ":2550")
+ rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@"
+ + hosts[x] + ":2551")
all_replicas.append("member-" + str(x + 1))
for x in range(0, 10):
self.lib.close_connection()
def exec_cmd(self, command):
- print "Executing command " + command + " on host " + self.host
+ print("Executing command %s on host %s" % (command, self.host))
rc = self.lib.execute_command(command, return_rc=True)
if rc[1] != 0:
raise Exception('remote command failed [{0}] with exit code {1}.'
def copy_file(self, src, dest):
if src is None:
- print "src is None not copy anything to " + dest
+ print("src is None not copy anything to ", dest)
return
if os.path.exists(src) is False:
- print "Src file " + src + " was not found"
+ print("Src file " + src + " was not found")
return
- print "Copying " + src + " to " + dest + " on " + self.host
+ print("Copying %s to %s on %s" % (src, dest, self.host))
self.lib.put_file(src, dest)
def kill_controller(self):
parser.add_argument("--skipattr", default=False, action="store_true", help=str_help)
arguments = parser.parse_args()
if arguments.multiplicity < 1:
- print "Multiplicity", arguments.multiplicity, "is not positive."
+ print("Multiplicity", arguments.multiplicity, "is not positive.")
raise SystemExit(1)
# TODO: Are sanity checks (such as asnumber>=0) required?
return arguments
:return: true if no remaining data to send
"""
# We assume there is a msg_out to send and socket is writable.
- # print "going to send", repr(self.msg_out)
self.timer.snapshot()
bytes_sent = self.socket.send(self.msg_out)
# Forget the part of message that was sent.
for t in thread_args:
thread.start_new_thread(job, (t, rpcqueue, storage))
except Exception:
- print "Error: unable to start thread."
+ print("Error: unable to start thread.")
raise SystemExit(2)
if arguments.usepeerip:
url = BASE_URL + "operations/dsbenchmark:cleanup-store"
r = requests.post(url, stream=False, auth=('admin', 'admin'))
- print r.status_code
+ print(r.status_code)
def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
if r.status_code == 200:
result = dict(result.items() + json.loads(r.content)['output'].items())
else:
- print 'Error %s, %s' % (r.status_code, r.content)
+ print('Error %s, %s' % (r.status_code, r.content))
return result
test run
:return: None
"""
- print '%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' % \
- (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError'])
+ print('%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' %
+ (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError']))
def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
total_build_time = 0.0
total_exec_time = 0.0
- print "Tx Type:", tx_type, "Operation:", operation, "Data Format:", data_fmt, "Datastore:", datastore,
- print "Outer Elements:", outer_elem, "Inner Elements:", inner_elem, "PutsPerTx:", ops_per_tx
+ print("Tx Type:", tx_type, "Operation:", operation, "Data Format:", data_fmt, "Datastore:", datastore,)
+ print("Outer Elements:", outer_elem, "Inner Elements:", inner_elem, "PutsPerTx:", ops_per_tx)
for idx in range(warmup_runs):
res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
print_results('WARMUP', idx, res)
:param value: The (measured) value
:return: none
"""
- plot_key = (datastore + '-' + data_fmt + '-' + tx_type + '-' + operation + '-' + str(outer_elem) + '/' +
- str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name)
+ plot_key = (datastore + '-' + data_fmt + '-' + tx_type + '-' + operation + '-' + str(outer_elem) + '/'
+ + str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name)
values[plot_key] = value
f = open('test.csv', 'wt')
try:
start_time = time.time()
- print "Start time: %f " % start_time
+ print("Start time: %f " % (start_time))
writer = csv.writer(f)
# Determine the impact of transaction type, data format and data structure on performance.
# Iterate over all transaction types, data formats, operation types, and different
# list-of-lists layouts; always use a single operation in each transaction
- print '\n#######################################'
- print 'Tx type, data format & data structure'
- print '#######################################'
+ print('\n#######################################')
+ print('Tx type, data format & data structure')
+ print('#######################################')
for tx_type in TX_TYPES:
- print '***************************************'
- print 'Transaction Type: %s' % tx_type
- print '***************************************'
+ print('***************************************')
+ print('Transaction Type: %s' % tx_type)
+ print('***************************************')
writer.writerow((('%s:' % tx_type), '', ''))
for fmt in DATA_FORMATS:
- print '---------------------------------------'
- print 'Data format: %s' % fmt
- print '---------------------------------------'
+ print('---------------------------------------')
+ print('Data format: %s' % fmt)
+ print('---------------------------------------')
writer.writerow(('', ('%s:' % fmt), ''))
for datastore in DATASTORES:
print
- print 'Data store: %s' % datastore
+ print('Data store: %s' % datastore)
print
for oper in OPERATIONS:
- print 'Operation: %s' % oper
+ print('Operation: %s' % oper)
writer.writerow(('', '', '%s:' % oper))
for elem in INNER_ELEMENTS:
# Determine the impact of number of writes per transaction on performance.
# Iterate over all transaction types, data formats, operation types, and
# operations-per-transaction; always use a list of lists where the inner list has one parameter
- print '\n#######################################'
- print 'Puts per tx'
- print '#######################################'
+ print('\n#######################################')
+ print('Puts per tx')
+ print('#######################################')
for tx_type in TX_TYPES:
- print '***************************************'
- print 'Transaction Type: %s' % tx_type
- print '***************************************'
+ print('***************************************')
+ print('Transaction Type: %s' % tx_type)
+ print('***************************************')
writer.writerow((('%s:' % tx_type), '', ''))
for fmt in DATA_FORMATS:
- print '---------------------------------------'
- print 'Data format: %s' % fmt
- print '---------------------------------------'
+ print('---------------------------------------')
+ print('Data format: %s' % fmt)
+ print('---------------------------------------')
writer.writerow(('', ('%s:' % fmt), ''))
for datastore in DATASTORES:
print
- print 'Data store: %s' % datastore
+ print('Data store: %s' % datastore)
print
for oper in OPERATIONS:
- print 'Operation: %s' % oper
+ print('Operation: %s' % oper)
writer.writerow(('', '', '%s:' % oper))
for wtx in OPS_PER_TX:
write_results_to_file(PLOT2, args.outfileops, PLOT_FILTER)
end_time = time.time()
- print "End time: %f " % end_time
- print "Total execution time: %f" % (end_time - start_time)
+ print("End time: %f " % (end_time))
+ print("Total execution time: %f" % ((end_time - start_time)))
finally:
f.close()
if r.status_code == 200:
result = dict(result.items() + json.loads(r.content)['output'].items())
else:
- print 'Error %s, %s' % (r.status_code, r.content)
+ print('Error %s, %s' % (r.status_code, r.content))
return result
test run
:return: None
"""
- print '%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d' % \
+ print('%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d' %
(run_type, idx,
res[u'producer-ok'], res[u'producer-error'], res[u'listener-ok'], res[u'producer-rate'],
- res[u'listener-rate'], res[u'producer-elapsed-time'], res[u'listener-elapsed-time'])
+ res[u'listener-rate'], res[u'producer-elapsed-time'], res[u'listener-elapsed-time']))
def run_test(warmup_runs, test_runs, producer_type, producers, listeners, payload_size, iterations):
for lis in args.listeners:
exec_time, prate, lrate = run_test(args.warm, args.run, args.ptype, prod, lis,
args.payload, args.iterations)
- print 'Producers: %d, Listeners: %d, prate: %d, lrate: %d' % (prod, lis, prate, lrate)
+ print('Producers: %d, Listeners: %d, prate: %d, lrate: %d' % (prod, lis, prate, lrate))
lrate_row.append(lrate)
prate_row.append(prate)
lrate_matrix.append(lrate_row)
prate_matrix.append(prate_row)
- print lrate_matrix
- print prate_matrix
+ print(lrate_matrix)
+ print(prate_matrix)
# writer.writerow((('%s:' % args.ptype), '', '', ''))
# writer.writerow(('', exec_time, prate, lrate))
if r.status_code == 200:
result = dict(result.items() + json.loads(r.content)['output'].items())
else:
- print 'Error %s, %s' % (r.status_code, r.content)
+ print('Error %s, %s' % (r.status_code, r.content))
return result
test run
:return: None
"""
- print '%s #%d: Ok: %d, Error: %d, Rate: %d, Exec time: %d' % \
+ print('%s #%d: Ok: %d, Error: %d, Rate: %d, Exec time: %d' %
(run_type, idx,
- res[u'global-rtc-client-ok'], res[u'global-rtc-client-error'], res[u'rate'], res[u'exec-time'])
+ res[u'global-rtc-client-ok'], res[u'global-rtc-client-error'], res[u'rate'], res[u'exec-time']))
def run_test(warmup_runs, test_runs, operation, clients, servers, payload_size, iterations):
run_test(args.warm, args.run, args.operation, client, svr, args.payload, args.iterations)
rate_row.append(rate)
rate_matrix.append(rate_row)
- print rate_matrix
+ print(rate_matrix)
writer.writerow(('RPC Rates:', ''))
writer.writerows(rate_matrix)
def handle_sigint(received_signal, frame): # This is a closure as it refers to the counter.
"""Upon SIGINT, print counter contents and exit gracefully."""
signal.signal(signal.SIGINT, signal.SIG_DFL)
- print sorted_repr(counter)
+ print(sorted_repr(counter))
sys.exit(0)
signal.signal(signal.SIGINT, handle_sigint)
if len(responses) > 0:
result = responses.popleft()
if result[0] is None:
- print "ERROR|" + result[1] + "|"
+ print("ERROR|" + result[1] + "|")
break
runtime = "%5.3f|%5.3f|%5.3f" % result[1]
- print "%03d|%s|%s|" % (result[0], runtime, result[2])
+ print("%03d|%s|%s|" % ((result[0], runtime, result[2])))
request_count -= 1
continue
time.sleep(args.refresh)
self.start_rloc = netaddr.IPAddress(start_rloc)
self.nmappings = nmappings
if v == "Li" or v == "li":
- print "Using the Lithium RPC URL"
+ print("Using the Lithium RPC URL")
rpc_url = self.RPC_URL_LI
else:
- print "Using the Beryllium and later RPC URL"
+ print("Using the Beryllium and later RPC URL")
rpc_url = self.RPC_URL_BE
self.post_url_template = 'http://' + self.host + ':' \
elif in_args.mode == "get":
mapping_rpc_blaster.get_n_mappings()
else:
- print "Unsupported mode:", in_args.mode
+ print("Unsupported mode:", in_args.mode)
sts = cleanup_config_fl(in_args.host, in_args.port)
exp = 204
else:
- print 'Unknown controller type'
+ print('Unknown controller type')
sys.exit(-1)
if sts != exp:
- print 'Failed to delete nodes in the config space, code %d' % sts
+ print('Failed to delete nodes in the config space, code %d' % sts)
else:
- print 'Nodes in config space deleted.'
+ print('Nodes in config space deleted.')
res = pat_rate.search(line)
if res is not None:
rate.append(res.groups('rate1')[0])
-print rate
+print(rate)
for line in log.splitlines():
res = pat_time.search(line)
if res is not None:
time.append(res.groups('time1')[0])
-print time
+print(time)
text_file = open("rates.csv", "w")
text_file.write('Add,Delete\n')
:return: None
"""
total_delay = 0
- print 'Waiting for stats to catch up:'
+ print('Waiting for stats to catch up:')
with Timer() as t:
while True:
crawler.crawl_inventory()
- print ' %d, %d' % (crawler.reported_flows, crawler.found_flows)
+ print(' %d, %d' % (crawler.reported_flows, crawler.found_flows))
if crawler.found_flows == exp_found or total_delay > timeout:
break
total_delay += delay
time.sleep(delay)
if total_delay < timeout:
- print 'Stats collected in %d seconds.' % t.secs
+ print('Stats collected in %d seconds.' % t.secs)
else:
- print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay
+ print('Stats collection did not finish in %d seconds. Aborting...' % total_delay)
if __name__ == "__main__":
reported = ic.reported_flows
found = ic.found_flows
- print 'Baseline:'
- print ' Reported flows: %d' % reported
- print ' Found flows: %d' % found
+ print('Baseline:')
+ print(' Reported flows: %d' % reported)
+ print(' Found flows: %d' % found)
# Run through <CYCLES> add cycles, where <THREADS> threads are started in
# each cycle and <FLOWS> flows are added from each thread
fct.add_blaster()
- print '\n*** Total flows added: %d' % fct.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
+ print('\n*** Total flows added: %d' % fct.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
# Wait for stats to catch up
wait_for_stats(ic, found + fct.get_ok_flows(), in_args.timeout, in_args.delay)
# in each cycle and <FLOWS> flows previously added in an add cycle are
# deleted in each thread
if in_args.bulk_delete:
- print '\nDeleting all flows in bulk:'
+ print('\nDeleting all flows in bulk:')
sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
if sts != 200:
- print ' Failed to delete flows, code %d' % sts
+ print(' Failed to delete flows, code %d' % sts)
else:
- print ' All flows deleted.'
+ print(' All flows deleted.')
else:
- print '\nDeleting flows one by one\n ',
+ print('\nDeleting flows one by one\n ',)
fct.delete_blaster()
- print '\n*** Total flows deleted: %d' % fct.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
+ print('\n*** Total flows deleted: %d' % fct.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
# Wait for stats to catch up back to baseline
wait_for_stats(ic, found, in_args.timeout, in_args.delay)
hosts = self.host.split(",")
host = hosts[flow_count % len(hosts)]
flow_url = self.assemble_post_url(host, node)
- # print flow_url
if not self.auth:
r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, timeout=self.TIMEOUT)
fmod = dict(self.flow_mode_template)
fmod['flow'] = flow_list
flow_data = json.dumps(fmod)
- # print flow_data
return flow_data
def add_flows(self, start_flow_id, tid):
n_nodes = self.get_num_nodes(s)
with self.print_lock:
- print ' Thread %d:\n Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+ print(' Thread %d:\n Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes))
nflows = 0
nb_actions = []
ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, flow_stats, t.secs)
with self.print_lock:
- print '\n Thread %d results (ADD): ' % tid
- print ' Elapsed time: %.2fs,' % t.secs
- print ' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)
- print ' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps)
- print ' Stats ({Requests}, {Flows}): ',
- print rqst_stats,
- print flow_stats
+ print('\n Thread %d results (ADD): ' % tid)
+ print(' Elapsed time: %.2fs,' % t.secs)
+ print(' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps))
+ print(' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps))
+ print(' Stats ({Requests}, {Flows}): ')
+ print(rqst_stats,)
+ print(flow_stats)
self.threads_done += 1
s.close()
hosts = self.host.split(",")
host = hosts[flow_count % len(hosts)]
flow_url = self.del_url_template % (host, node, flow_id)
- # print flow_url
if not self.auth:
r = session.delete(flow_url, headers=self.getheaders, timeout=self.TIMEOUT)
n_nodes = self.get_num_nodes(s)
with self.print_lock:
- print 'Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+ print('Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes))
with Timer() as t:
for flow in range(self.nflows):
ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, rqst_stats, t.secs)
with self.print_lock:
- print '\n Thread %d results (DELETE): ' % tid
- print ' Elapsed time: %.2fs,' % t.secs
- print ' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)
- print ' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps)
- print ' Stats ({Requests})',
- print rqst_stats
+ print('\n Thread %d results (DELETE): ' % tid)
+ print(' Elapsed time: %.2fs,' % t.secs)
+ print(' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps))
+ print(' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps))
+ print(' Stats ({Requests})',)
+ print(rqst_stats)
self.threads_done += 1
s.close()
for c in range(self.ncycles):
self.stats = self.FcbStats()
with self.print_lock:
- print '\nCycle %d:' % c
+ print('\nCycle %d:' % c)
threads = []
for i in range(self.nthreads):
thread.join()
with self.print_lock:
- print '\n*** Test summary:'
- print ' Elapsed time: %.2fs' % t.secs
- print ' Peak requests/s: %.2f OK, %.2f Total' % (
- self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate())
- print ' Peak flows/s: %.2f OK, %.2f Total' % (
- self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate())
- print ' Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
+ print('\n*** Test summary:')
+ print(' Elapsed time: %.2fs' % t.secs)
+ print(' Peak requests/s: %.2f OK, %.2f Total' % (
+ self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate()))
+ print(' Peak flows/s: %.2f OK, %.2f Total' % (
+ self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate()))
+ print(' Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
self.stats.get_ok_rqsts() / t.secs,
self.stats.get_total_rqsts() / t.secs,
- (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate())
- print ' Avg. flows/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
- self.stats.get_ok_flows() / t.secs,
- self.stats.get_total_flows() / t.secs,
- (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate())
+ (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate()))
+ print(' Avg. flows/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
+ self.stats.get_ok_flows() / t.secs,
+ self.stats.get_total_flows() / t.secs,
+ (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate()))
self.total_ok_flows += self.stats.get_ok_flows()
self.total_ok_rqsts += self.stats.get_ok_rqsts()
keys = ft['flow'][0].keys()
if (u'cookie' in keys) and (u'flow-name' in keys) and (u'id' in keys) and (u'match' in keys):
if u'ipv4-destination' in ft[u'flow'][0]['match'].keys():
- print 'File "%s" ok to use as flow template' % filename
+ print('File "%s" ok to use as flow template' % filename)
return ft
except ValueError:
- print 'JSON parsing of file %s failed' % filename
+ print('JSON parsing of file %s failed' % filename)
pass
return None
# <flows> are added from each thread
fct.add_blaster()
- print '\n*** Total flows added: %s' % fct.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
+ print('\n*** Total flows added: %s' % fct.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
if in_args.delay > 0:
- print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
+ print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
time.sleep(in_args.delay)
# Run through <cycles>, where <threads> are started in each cycle and
# <flows> previously added in an add cycle are deleted in each thread
if in_args.delete:
fct.delete_blaster()
- print '\n*** Total flows deleted: %s' % fct.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
+ print('\n*** Total flows deleted: %s' % fct.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
json_input = {'input': {'bulk-flow-ds-item': flow_list}}
flow_data = json.dumps(json_input)
- # print flow_data
return flow_data
# <flows> are added from each thread
fcbb.add_blaster()
- print '\n*** Total flows added: %s' % fcbb.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fcbb.get_ok_rqsts()
+ print('\n*** Total flows added: %s' % fcbb.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fcbb.get_ok_rqsts())
if in_args.delay > 0:
- print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
+ print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
time.sleep(in_args.delay)
# Run through <cycles>, where <threads> are started in each cycle and
# <flows> previously added in an add cycle are deleted in each thread
if in_args.delete:
fcbb.delete_blaster()
- print '\n*** Total flows deleted: %s' % fcbb.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fcbb.get_ok_rqsts()
+ print('\n*** Total flows deleted: %s' % fcbb.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fcbb.get_ok_rqsts())
clear_url = 'http://' + self.host + ":" + self.port + '/wm/staticflowpusher/clear/all/json'
r = requests.get(clear_url)
if r.status_code == 200:
- print "All flows cleared before the test"
+ print("All flows cleared before the test")
else:
- print "Failed to clear flows from the controller, your results may vary"
+ print("Failed to clear flows from the controller, your results may vary")
if __name__ == "__main__":
# Run through <cycles>, where <threads> are started in each cycle and <flows> are added from each thread
fct.add_blaster()
- print '\n*** Total flows added: %s' % fct.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
+ print('\n*** Total flows added: %s' % fct.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
if in_args.delay > 0:
- print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
+ print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
time.sleep(in_args.delay)
# Run through <cycles>, where <threads> are started in each cycle and <flows> previously added in an add cycle are
# deleted in each thread
if in_args.delete:
fct.delete_blaster()
- print '\n*** Total flows deleted: %s' % fct.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
+ print('\n*** Total flows deleted: %s' % fct.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
reported = ic.reported_flows
found = ic.found_flows
- print 'Baseline:'
- print ' Reported nodes: %d' % reported
- print ' Found nodes: %d' % found
+ print('Baseline:')
+ print(' Reported nodes: %d' % reported)
+ print(' Found nodes: %d' % found)
stats = []
stats.append((time.time(), ic.nodes, ic.reported_flows, ic.found_flows))
# each cycle and <FLOWS> flows are added from each thread
fct.add_blaster()
- print '\n*** Total flows added: %d' % fct.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
+ print('\n*** Total flows added: %d' % fct.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
# monitor stats and save results in the list
for stat_item in monitor_stats(ic, in_args.config_monitor, in_args.monitor_period):
- print stat_item
+ print(stat_item)
stats.append(stat_item)
# Run through <CYCLES> delete cycles, where <THREADS> threads are started
# in each cycle and <FLOWS> flows previously added in an add cycle are
# deleted in each thread
if in_args.bulk_delete:
- print '\nDeleting all flows in bulk:'
+ print('\nDeleting all flows in bulk:')
sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
if sts != 200:
- print ' Failed to delete flows, code %d' % sts
+ print(' Failed to delete flows, code %d' % sts)
else:
- print ' All flows deleted.'
+ print(' All flows deleted.')
else:
- print '\nDeleting flows one by one\n ',
+ print('\nDeleting flows one by one\n ',)
fct.delete_blaster()
- print '\n*** Total flows deleted: %d' % fct.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
+ print('\n*** Total flows deleted: %d' % fct.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
# monitor stats and append to the list
for stat_item in monitor_stats(ic, in_args.deconfig_monitor, in_args.monitor_period):
- print stat_item
+ print(stat_item)
stats.append(stat_item)
# if requested, write collected data into the file
"""
self.found_flows += len(flows)
if self.plevel > 1:
- print ' Flows found: %d\n' % len(flows)
+ print(' Flows found: %d\n' % len(flows))
if self.plevel > 2:
for f in flows:
s = json.dumps(f, sort_keys=True, indent=4, separators=(',', ': '))
s = s.rstrip('}')
s = s.replace('\n', '\n ')
s = s.lstrip('\n')
- print " Flow %s:" % f['id']
- print s
+ print(" Flow %s:" % (f['id']))
+ print(s)
def crawl_table(self, table):
"""
if active_flows > 0:
self.reported_flows += active_flows
if self.plevel > 1:
- print ' Table %s:' % table['id']
+ print(' Table %s:' % table['id'])
s = json.dumps(stats, sort_keys=True, indent=12, separators=(',', ': '))
s = s.replace('{\n', '')
s = s.replace('}', '')
- print s
+ print(s)
except KeyError:
if self.plevel > 1:
- print " Stats for Table '%s' not available." % table['id']
+ print(" Stats for Table '%s' not available." % (table['id']))
self.table_stats_unavailable += 1
pass
self.nodes += 1
if self.plevel > 1:
- print "\nNode '%s':" % (node['id'])
+ print("\nNode '%s':" % ((node['id'])))
elif self.plevel > 0:
- print "%s" % (node['id'])
+ print("%s" % ((node['id'])))
try:
tables = node['flow-node-inventory:table']
if self.plevel > 1:
- print ' Tables: %d' % len(tables)
+ print(' Tables: %d' % len(tables))
for t in tables:
self.crawl_table(t)
except KeyError:
if self.plevel > 1:
- print ' Data for tables not available.'
+ print(' Data for tables not available.')
def crawl_inventory(self):
"""
try:
self.crawl_node(sinv[n])
except:
- print 'Can not crawl %s' % sinv[n]['id']
+ print('Can not crawl %s' % sinv[n]['id'])
except KeyError:
- print 'Could not retrieve inventory, response not in JSON format'
+ print('Could not retrieve inventory, response not in JSON format')
else:
- print 'Could not retrieve inventory, HTTP error %d' % r.status_code
+ print('Could not retrieve inventory, HTTP error %d' % r.status_code)
s.close()
ic = InventoryCrawler(in_args.host, in_args.port, in_args.plevel, in_args.datastore, in_args.auth,
in_args.debug)
- print "Crawling '%s'" % ic.url
+ print("Crawling '%s'" % (ic.url))
ic.crawl_inventory()
- print '\nTotals:'
- print ' Nodes: %d' % ic.nodes
- print ' Reported flows: %d' % ic.reported_flows
- print ' Found flows: %d' % ic.found_flows
+ print('\nTotals:')
+ print(' Nodes: %d' % ic.nodes)
+ print(' Reported flows: %d' % ic.reported_flows)
+ print(' Found flows: %d' % ic.found_flows)
if in_args.debug:
n_missing = len(ic.table_stats_fails)
if n_missing > 0:
- print '\nMissing table stats (%d nodes):' % n_missing
- print "%s\n" % ", ".join([x for x in ic.table_stats_fails])
+ print('\nMissing table stats (%d nodes):' % n_missing)
+ print("%s\n" % (", ".join([x for x in ic.table_stats_fails])))
results = {}
with print_lock:
- print 'Thread %d: Getting %s' % (tnum, url)
+ print('Thread %d: Getting %s' % (tnum, url))
s = requests.Session()
with Timer() as t:
total_mb_rate.increment(mrate)
with print_lock:
- print '\nThread %d: ' % tnum
- print ' Elapsed time: %.2f,' % t.secs
- print ' Requests: %d, Requests/sec: %.2f' % (total, rate)
- print ' Volume: %.2f MB, Rate: %.2f MByte/s' % (mbytes, mrate)
- print ' Results: ',
- print results
+ print('\nThread %d: ' % tnum)
+ print(' Elapsed time: %.2f,' % t.secs)
+ print(' Requests: %d, Requests/sec: %.2f' % (total, rate))
+ print(' Volume: %.2f MB, Rate: %.2f MByte/s' % (mbytes, mrate))
+ print(' Results: ')
+ print(results)
with cond:
cond.notifyAll()
cond.wait()
finished = finished + 1
- print '\nAggregate requests: %d, Aggregate requests/sec: %.2f' % (total_requests.value,
- total_req_rate.value)
- print 'Aggregate Volume: %.2f MB, Aggregate Rate: %.2f MByte/s' % (total_mbytes.value,
- total_mb_rate.value)
+ print('\nAggregate requests: %d, Aggregate requests/sec: %.2f' % (total_requests.value,
+ total_req_rate.value))
+ print('Aggregate Volume: %.2f MB, Aggregate Rate: %.2f MByte/s' % (total_mbytes.value,
+ total_mb_rate.value))
# get_inventory(url, getheaders, int(in_args.requests))
stats[r.status_code] = stats.get(r.status_code, 0) + 1
with print_lock:
- print ' ', threading.current_thread().name, 'results:', stats
+ print(' %s results: %s' % (threading.current_thread().name, stats))
results_queue.put(stats)
# Aggregate the results
stats = functools.reduce(operator.add, map(collections.Counter, results.queue))
- print '\n*** Test summary:'
- print ' Elapsed time: %.2fs' % t.secs
- print ' HTTP[OK] results: %d\n' % stats[200]
+ print('\n*** Test summary:')
+ print(' Elapsed time: %.2fs' % t.secs)
+ print(' HTTP[OK] results: %d\n' % stats[200])
parser.add_argument('--outfile', default='', help='Stores add and delete flow rest api rate; default=""')
in_args = parser.parse_args(*argv)
- print in_args
+ print(in_args)
# get device ids
base_dev_ids = get_device_ids(controller=in_args.host)
base_num_flows = len(base_flow_ids)
- print "BASELINE:"
- print " devices:", len(base_dev_ids)
- print " flows :", base_num_flows
+ print("BASELINE:")
+ print(" devices:", len(base_dev_ids))
+ print(" flows :", base_num_flows)
# lets fill the queue for workers
nflows = 0
else:
result[k] += v
- print "Added", in_args.flows, "flows in", tmr.secs, "seconds", result
+ print("Added", in_args.flows, "flows in", tmr.secs, "seconds", result)
add_details = {"duration": tmr.secs, "flows": len(flow_details)}
# lets print some stats
- print "\n\nStats monitoring ..."
+ print("\n\nStats monitoring ...")
rounds = 200
with Timer() as t:
for i in range(rounds):
reported_flows = len(get_flow_ids(controller=in_args.host))
expected_flows = base_num_flows + in_args.flows
- print "Reported Flows: %d/%d" % (reported_flows, expected_flows)
+ print("Reported Flows: %d/%d" % ((reported_flows, expected_flows)))
if reported_flows >= expected_flows:
break
time.sleep(1)
if i < rounds:
- print "... monitoring finished in +%d seconds\n\n" % t.secs
+ print("... monitoring finished in +%d seconds\n\n" % (t.secs))
else:
- print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs)
+ print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
if in_args.no_delete:
return
# sleep in between
time.sleep(in_args.timeout)
- print "Flows to be removed: %d" % len(flow_details)
+ print("Flows to be removed: %d" % (len(flow_details)))
# lets fill the queue for workers
sendqueue = Queue.Queue()
for fld in flow_details:
else:
result[k] += v
- print "Removed", len(flow_details), "flows in", tmr.secs, "seconds", result
+ print("Removed", len(flow_details), "flows in", tmr.secs, "seconds", result)
del_details = {"duration": tmr.secs, "flows": len(flow_details)}
-# # lets print some stats
-# print "\n\nSome stats monitoring ...."
-# for i in range(100):
-# print get_flow_simple_stats(controller=in_args.host)
-# time.sleep(5)
-# print "... monitoring finished\n\n"
- # lets print some stats
- print "\n\nStats monitoring ..."
+ print("\n\nStats monitoring ...")
rounds = 200
with Timer() as t:
for i in range(rounds):
reported_flows = len(get_flow_ids(controller=in_args.host))
expected_flows = base_num_flows
- print "Reported Flows: %d/%d" % (reported_flows, expected_flows)
+ print("Reported Flows: %d/%d" % ((reported_flows, expected_flows)))
if reported_flows <= expected_flows:
break
time.sleep(1)
if i < rounds:
- print "... monitoring finished in +%d seconds\n\n" % t.secs
+ print("... monitoring finished in +%d seconds\n\n" % (t.secs))
else:
- print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs)
+ print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
if in_args.outfile != "":
addrate = add_details['flows'] / add_details['duration']
delrate = del_details['flows'] / del_details['duration']
- print "addrate", addrate
- print "delrate", delrate
+ print("addrate", addrate)
+ print("delrate", delrate)
with open(in_args.outfile, "wt") as fd:
fd.write("AddRate,DeleteRate\n")
if rsp.status_code != 200:
return
flows = json.loads(rsp.content)['flows']
- # print "Flows", flows
- # print "Details", flow_details
for dev_id, ip in flow_details:
- # print "looking for details", dev_id, ip
for f in flows:
# lets identify if it is our flow
if f["treatment"]["instructions"][0]["type"] != "DROP":
- # print "NOT DROP"
continue
if f["deviceId"] == dev_id:
if "ip" in f["selector"]["criteria"][0]:
item_idx = 1
else:
continue
- # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip))
if f["selector"]["criteria"][item_idx]["ip"] == '%s/32' % str(netaddr.IPAddress(ip)):
- # print dev_id, ip, f
yield dev_id, f["id"]
break
if rsp.status_code != 200:
return
flows = json.loads(rsp.content)['flows']
- # print "Flows", flows
- # print "Details", flow_details
for f in flows:
# lets identify if it is our flow
if f["treatment"]["instructions"][0]["type"] != "NOACTION":
- # print "NOT DROP"
continue
if "ip" in f["selector"]["criteria"][0]:
item_idx = 0
item_idx = 1
else:
continue
- # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip))
ipstr = f["selector"]["criteria"][item_idx]["ip"]
if '10.' in ipstr and '/32' in ipstr:
- # print dev_id, ip, f
yield (f["deviceId"], f["id"])
help='Port on which onos\'s RESTCONF is listening (default is 8181)')
in_args = parser.parse_args(*argv)
- print in_args
+ print(in_args)
# get device ids
base_dev_ids = get_device_ids(controller=in_args.host)
# prepare func
preparefnc = _prepare_post # noqa # FIXME: This script seems to be unfinished!
- print "BASELINE:"
- print " devices:", len(base_dev_ids)
- print " flows :", len(base_flow_ids)
+ print("BASELINE:")
+ print(" devices:", len(base_dev_ids))
+ print(" flows :", len(base_flow_ids))
# lets print some stats
- print "\n\nSome stats monitoring ...."
- print get_flow_simple_stats(controller=in_args.host)
+ print("\n\nSome stats monitoring ....")
+ print(get_flow_simple_stats(controller=in_args.host))
if __name__ == "__main__":
if rsp.status_code != 200:
return
flows = json.loads(rsp.content)['flows']
- # print "Flows", flows
- # print "Details", flow_details
for dev_id, ip in flow_details:
- # print "looking for details", dev_id, ip
for f in flows:
# lets identify if it is our flow
if f["treatment"]["instructions"][0]["type"] != "DROP":
- # print "NOT DROP"
continue
if f["deviceId"] == dev_id:
if "ip" in f["selector"]["criteria"][0]:
item_idx = 1
else:
continue
- # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip))
if f["selector"]["criteria"][item_idx]["ip"] == '%s/32' % str(netaddr.IPAddress(ip)):
- # print dev_id, ip, f
yield dev_id, f["id"]
break
if rsp.status_code != 200:
return
flows = json.loads(rsp.content)['flows']
- # print "Flows", flows
- # print "Details", flow_details
for f in flows:
# lets identify if it is our flow
if f["treatment"]["instructions"][0]["type"] != "NOACTION":
- # print "NOT DROP"
continue
if "ip" in f["selector"]["criteria"][0]:
item_idx = 0
item_idx = 1
else:
continue
- # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip))
ipstr = f["selector"]["criteria"][item_idx]["ip"]
if '10.' in ipstr and '/32' in ipstr:
- # print dev_id, ip, f
yield (f["deviceId"], f["id"])
parser.add_argument('--outfile', default='', help='Stores add and delete flow rest api rate; default=""')
in_args = parser.parse_args(*argv)
- print in_args
+ print(in_args)
# get device ids
base_dev_ids = get_device_ids(controller=in_args.host)
base_num_flows = len(base_flow_ids)
- print "BASELINE:"
- print " devices:", len(base_dev_ids)
- print " flows :", base_num_flows
+ print("BASELINE:")
+ print(" devices:", len(base_dev_ids))
+ print(" flows :", base_num_flows)
# lets fill the queue for workers
nflows = 0
else:
result[k] += v
- print "Added", in_args.flows, "flows in", tmr.secs, "seconds", result
+ print("Added", in_args.flows, "flows in", tmr.secs, "seconds", result)
add_details = {"duration": tmr.secs, "flows": len(flow_details)}
# lets print some stats
- print "\n\nStats monitoring ..."
+ print("\n\nStats monitoring ...")
rounds = 200
with Timer() as t:
for i in range(rounds):
flow_stats = get_flow_simple_stats(controller=in_args.host)
- print flow_stats
+ print(flow_stats)
try:
pending_adds = int(flow_stats[u'PENDING_ADD']) # noqa # FIXME: Print this somewhere.
except KeyError:
time.sleep(1)
if i < rounds:
- print "... monitoring finished in +%d seconds\n\n" % t.secs
+ print("... monitoring finished in +%d seconds\n\n" % (t.secs))
else:
- print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs)
+ print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
if in_args.no_delete:
return
# for a in get_flow_device_pairs(controller=in_args.host, flow_details=flow_details):
for a in get_flow_to_remove(controller=in_args.host):
flows_remove_details.append(a)
- print "Flows to be removed: ", len(flows_remove_details)
+ print("Flows to be removed: ", len(flows_remove_details))
# lets fill the queue for workers
nflows = 0
else:
result[k] += v
- print "Removed", len(flows_remove_details), "flows in", tmr.secs, "seconds", result
+ print("Removed", len(flows_remove_details), "flows in", tmr.secs, "seconds", result)
del_details = {"duration": tmr.secs, "flows": len(flows_remove_details)}
-# # lets print some stats
-# print "\n\nSome stats monitoring ...."
-# for i in range(100):
-# print get_flow_simple_stats(controller=in_args.host)
-# time.sleep(5)
-# print "... monitoring finished\n\n"
- # lets print some stats
- print "\n\nStats monitoring ..."
+ print("\n\nStats monitoring ...")
rounds = 200
with Timer() as t:
for i in range(rounds):
flow_stats = get_flow_simple_stats(controller=in_args.host)
- print flow_stats
+ print(flow_stats)
try:
pending_rems = int(flow_stats[u'PENDING_REMOVE']) # noqa # FIXME: Print this somewhere.
except KeyError:
time.sleep(1)
if i < rounds:
- print "... monitoring finished in +%d seconds\n\n" % t.secs
+ print("... monitoring finished in +%d seconds\n\n" % (t.secs))
else:
- print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs)
+ print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
if in_args.outfile != "":
addrate = add_details['flows'] / add_details['duration']
delrate = del_details['flows'] / del_details['duration']
- print "addrate", addrate
- print "delrate", delrate
+ print("addrate", addrate)
+ print("delrate", delrate)
with open(in_args.outfile, "wt") as fd:
fd.write("AddRate,DeleteRate\n")
data = sys.stdin.readlines()
payload = json.loads(data.pop(0))
s = json.dumps(payload, sort_keys=True, indent=4, separators=(',', ': '))
- print '%s\n\n' % s
+ print('%s\n\n' % s)
s = requests.Session()
with self.print_lock:
- print ' Thread %d: Performing %d requests' % (tid, self.requests)
+ print(' Thread %d: Performing %d requests' % (tid, self.requests))
with Timer() as t:
for r in range(self.requests):
total_rate = sum(res.values()) / t.secs
with self.print_lock:
- print 'Thread %d done:' % tid
- print ' Time: %.2f,' % t.secs
- print ' Success rate: %.2f, Total rate: %.2f' % (ok_rate, total_rate)
- print ' Per-thread stats: ',
- print res
+ print('Thread %d done:' % tid)
+ print(' Time: %.2f,' % t.secs)
+ print(' Success rate: %.2f, Total rate: %.2f' % (ok_rate, total_rate))
+ print(' Per-thread stats: ',)
+ print(res)
self.threads_done += 1
self.total_rate += total_rate
self.cond.wait()
# Print summary results. Each worker prints its owns results too.
- print '\nSummary Results:'
- print ' Requests/sec (total_sum): %.2f' % ((self.threads * self.requests) / t.secs)
- print ' Requests/sec (measured): %.2f' % ((self.threads * self.requests) / t.secs)
- print ' Time: %.2f' % t.secs
+ print('\nSummary Results:')
+ print(' Requests/sec (total_sum): %.2f' % ((self.threads * self.requests) / t.secs))
+ print(' Requests/sec (measured): %.2f' % ((self.threads * self.requests) / t.secs))
+ print(' Time: %.2f' % t.secs)
self.threads_done = 0
if self.plevel > 0:
- print ' Per URL Counts: ',
+ print(' Per URL Counts: ',)
for i in range(len(urls)):
- print '%d' % self.url_counters[i].value,
- print '\n'
+ print('%d' % self.url_counters[i].value)
+ print('\n')
class TestUrlGenerator(object):
:param data: Bulk resource data (JSON) from which to generate the URLs
:return: List of generated Resources
"""
- print "Abstract class '%s' should never be used standalone" % self.__class__.__name__
+ print("Abstract class '%s' should never be used standalone" % (self.__class__.__name__))
return []
def generate(self):
r = requests.get(t_url, headers=headers, stream=False, auth=('admin', 'admin'))
if r.status_code != 200:
- print "Failed to get HTTP response from '%s', code %d" % (t_url, r.status_code)
+ print("Failed to get HTTP response from '%s', code %d" % ((t_url, r.status_code)))
else:
try:
r_url = self.url_generator(json.loads(r.content))
except:
- print "Failed to get json from '%s'. Please make sure you are connected to mininet." % r_url
+ print("Failed to get json from '%s'. Please make sure you are connected to mininet." % (r_url))
return r_url
url_list.append(t_url)
return url_list
except KeyError:
- print 'Error parsing topology json'
+ print('Error parsing topology json')
return []
url_list.append(i_url)
return url_list
except KeyError:
- print 'Error parsing inventory json'
+ print('Error parsing inventory json')
return []
tg = TopoUrlGenerator(in_args.host, in_args.port, in_args.auth)
topo_urls += tg.generate()
if len(topo_urls) == 0:
- print 'Failed to generate topology URLs'
+ print('Failed to generate topology URLs')
sys.exit(-1)
# If required, get inventory resource URLs
ig = InvUrlGenerator(in_args.host, in_args.port, in_args.auth)
inv_urls += ig.generate()
if len(inv_urls) == 0:
- print 'Failed to generate inventory URLs'
+ print('Failed to generate inventory URLs')
sys.exit(-1)
if in_args.resource == 'topo+inv' or in_args.resource == 'all':
# To have balanced test results, the number of URLs for topology and inventory must be the same
if len(topo_urls) != len(inv_urls):
- print "The number of topology and inventory URLs don't match"
+ print("The number of topology and inventory URLs don't match")
sys.exit(-1)
st = ShardPerformanceTester(in_args.host, in_args.port, in_args.auth, in_args.threads, in_args.requests,
in_args.plevel)
if in_args.resource == 'all' or in_args.resource == 'topo':
- print '==================================='
- print 'Testing topology shard performance:'
- print '==================================='
+ print('===================================')
+ print('Testing topology shard performance:')
+ print('===================================')
st.run_test(topo_urls)
if in_args.resource == 'all' or in_args.resource == 'inv':
- print '===================================='
- print 'Testing inventory shard performance:'
- print '===================================='
+ print('====================================')
+ print('Testing inventory shard performance:')
+ print('====================================')
st.run_test(inv_urls)
if in_args.resource == 'topo+inv' or in_args.resource == 'all':
- print '==============================================='
- print 'Testing combined shards (topo+inv) performance:'
- print '==============================================='
+ print('===============================================')
+ print('Testing combined shards (topo+inv) performance:')
+ print('===============================================')
st.run_test(topo_urls + inv_urls)
data = patch_data_template.substitute(mapping)
response = session.put(url=url, auth=auth, headers=headers, data=data)
if response.status_code not in [200, 201, 204]:
- print "status: {}".format(response.status_code)
- print "text: {}".format(response.text)
+ print("status: {}".format(response.status_code))
+ print("text: {}".format(response.text))
sys.exit(1)
'node-id': 'ovsdb://%s:%s'
% (vswitch_ip,
vswitch_ovsdb_port),
- 'post-url': urlprefix +
- OvsdbConfigBlaster.return_ovsdb_url(
+ 'post-url': urlprefix
+ + OvsdbConfigBlaster.return_ovsdb_url(
vswitch_ip,
vswitch_ovsdb_port),
- 'get-config-url': urlprefix +
- OvsdbConfigBlaster.return_ovsdb_url(
+ 'get-config-url': urlprefix
+ + OvsdbConfigBlaster.return_ovsdb_url(
vswitch_ip,
vswitch_ovsdb_port),
- 'get-oper-url': urlprefix +
- OvsdbConfigBlaster.return_ovsdb_url(
+ 'get-oper-url': urlprefix
+ + OvsdbConfigBlaster.return_ovsdb_url(
vswitch_ip,
vswitch_ovsdb_port)}})
}
self.send_rest(self.session,
self.vswitch_dict[vswitch_name]
- .get('post-url') +
- '%2Fbridge%2F' +
- bridge_name,
+ .get('post-url')
+ + '%2Fbridge%2F'
+ + bridge_name,
add_bridge_body)
self.session.close()
bridge_name = unicode('br-' + str(br_num) + '-test')
self.send_rest_del(self.session,
self.vswitch_dict[vswitch_names]
- .get('post-url') +
- '%2Fbridge%2F' +
- bridge_name)
+ .get('post-url')
+ + '%2Fbridge%2F'
+ + bridge_name)
self.session.close()
def delete_port(self, num_ports):
else:
ovsdb_config_blaster.add_port()
else:
- print "please use: python ovsdbconfigblaster.py --help " \
- "\nUnsupported mode: ", args.mode
+ print("please use: python ovsdbconfigblaster.py --help "
+ "\nUnsupported mode: ", args.mode)
list_data[1] = pcc_ip
list_data[4] = pcc_ip
whole_data = ''.join(list_data)
- # print 'DEBUG:', whole_data + '\n'
worker = (lsp * pccs + pcc) % workers
post_kwargs = {"data": whole_data, "headers": headers}
yield worker, post_kwargs
def classify(resp_tuple):
"""Return 'pass' or a reason what is wrong with response."""
- # print 'DEBUG: received', response
prepend = ''
status = resp_tuple[0]
- # print 'DEBUG: verifying status', status
if (status != 200) and (status != 204): # is it int?
- # print 'DEBUG:', response.content
prepend = 'status: ' + str(status) + ' '
content = resp_tuple[1]
- # print 'DEBUG: verifying content', content
if prepend or (content != expected and content != ''):
return prepend + 'content: ' + str(content)
return 'pass'
# Main.
list_q_msg = [collections.deque() for _ in range(args.workers)]
for worker, post_kwargs in iterable_msg(args.pccs, args.lsps, args.workers, args.hop):
- # print 'DEBUG: worker', repr(worker), 'message', repr(message)
list_q_msg[worker].append(post_kwargs)
queue_responses = collections.deque() # thread safe
threads = []
threads.append(thread)
tasks = sum(map(len, list_q_msg)) # fancy way of counting, should equal to pccs*lsps.
counter = CounterDown(tasks)
-print 'work is going to start with', tasks, 'tasks'
+print('work is going to start with %s tasks' % tasks)
time_start = time.time()
for thread in threads:
thread.start()
continue
left = len(queue_responses)
if left:
- print 'error: more responses left inqueue', left
+ print('error: more responses left inqueue', left)
else:
- print 'Time is up!'
+ print('Time is up!')
left = len(queue_responses) # can be still increasing
- # if left:
- # print 'WARNING: left', left
for _ in range(left):
resp_tuple = queue_responses.popleft() # thread safe
result = classify(resp_tuple)
break # may leave late items in queue_reponses
time_stop = time.time()
timedelta_duration = time_stop - time_start
-print 'took', timedelta_duration
-print repr(counter.counter)
+print('took', timedelta_duration)
+print(repr(counter.counter))
# for message in debug_list:
# print message
pyplot.subplots_adjust(hspace=.7)
else:
pyplot.subplots_adjust(hspace=.7)
- print "WARNING: That's a lot of graphs. Add a second column?"
+ print("WARNING: That's a lot of graphs. Add a second column?")
pyplot.show()
# Print stats