honeynode_executable = simulators.honeynode_executable
samples_directory = simulators.samples_directory
-HONEYNODE_OK_START_MSG = re.escape("Netconf SSH endpoint started successfully at 0.0.0.0")
-KARAF_OK_START_MSG = re.escape("Blueprint container for bundle "
- "org.opendaylight.netconf.restconf") + ".* was successfully created"
+HONEYNODE_OK_START_MSG = "Netconf SSH endpoint started successfully at 0.0.0.0"
+KARAF_OK_START_MSG = re.escape(
+ "Blueprint container for bundle org.opendaylight.netconf.restconf")+".* was successfully created"
-TYPE_APPLICATION_JSON = {'content-type': 'application/json'}
+
+RESTCONF_BASE_URL = "http://localhost:8181/restconf"
+ODL_LOGIN = "admin"
+ODL_PWD = "admin"
+NODES_LOGIN = "admin"
+NODES_PWD = "admin"
+
+TYPE_APPLICATION_JSON = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+TYPE_APPLICATION_XML = {'Content-Type': 'application/xml', 'Accept': 'application/xml'}
+
+CODE_SHOULD_BE_200 = 'Http status code should be 200'
+CODE_SHOULD_BE_201 = 'Http status code should be 201'
log_directory = os.path.dirname(os.path.realpath(__file__))
process_list = []
+if "USE_LIGHTY" in os.environ and os.environ['USE_LIGHTY'] == 'True':
+ tpce_log = 'odl.log'
+else:
+ tpce_log = karaf_log
def start_sims(sims_list):
for sim in sims_list:
def start_tpce():
- print("starting opendaylight...")
+ print("starting OpenDaylight...")
if "USE_LIGHTY" in os.environ and os.environ['USE_LIGHTY'] == 'True':
process = start_lighty()
# TODO: add some sort of health check similar to Karaf below
else:
process = start_karaf()
if wait_until_log_contains(karaf_log, KARAF_OK_START_MSG, time_to_wait=60):
- print("opendaylight started")
+ print("OpenDaylight started !")
else:
- print("opendaylight failed to start")
+ print("OpenDaylight failed to start !")
shutdown_process(process)
for pid in process_list:
shutdown_process(pid)
auth=(username, password))
-def generate_connect_data(node_id: str, node_port: str):
- data = {"node": [{
+def mount_device(node_id, sim):
+ url = ("{}/config/network-topology:network-topology/topology/topology-netconf/node/"
+ + node_id).format(RESTCONF_BASE_URL)
+ headers = {"node": [{
"node-id": node_id,
- "netconf-node-topology:username": "admin",
- "netconf-node-topology:password": "admin",
+ "netconf-node-topology:username": NODES_LOGIN,
+ "netconf-node-topology:password": NODES_PWD,
"netconf-node-topology:host": "127.0.0.1",
- "netconf-node-topology:port": node_port,
+ "netconf-node-topology:port": sims[sim]['port'],
"netconf-node-topology:tcp-only": "false",
"netconf-node-topology:pass-through": {}}]}
- return data
+ response = put_request(url, headers, ODL_LOGIN, ODL_PWD)
+ if wait_until_log_contains(tpce_log, re.escape("Triggering notification stream NETCONF for node "+node_id), 60):
+ print("Node "+node_id+" correctly added to tpce topology", end='... ', flush=True)
+ else:
+ print("Node "+node_id+" still not added to tpce topology", end='... ', flush=True)
+ if response.status_code == requests.codes.ok:
+ print("It was probably loaded at start-up", end='... ', flush=True)
+ # TODO an else-clause to abort test would probably be nice here
+ return response
+
+
+def unmount_device(node_id):
+ url = ("{}/config/network-topology:network-topology/topology/topology-netconf/node/"
+ + node_id).format(RESTCONF_BASE_URL)
+ response = delete_request(url, ODL_LOGIN, ODL_PWD)
+ if wait_until_log_contains(tpce_log, re.escape("onDeviceDisConnected: "+node_id), 60):
+ print("Node "+node_id+" correctly deleted from tpce topology", end='... ', flush=True)
+ else:
+ print("Node "+node_id+" still not deleted from tpce topology", end='... ', flush=True)
+ return response
def generate_link_data(xpdr_node: str, xpdr_num: str, network_num: str, rdm_node: str, srg_num: str,
stdout=outfile, stderr=outfile)
-def wait_until_log_contains(log_file, searched_string, time_to_wait=20):
+def wait_until_log_contains(log_file, regexp, time_to_wait=20):
found = False
tail = None
try:
with timeout(seconds=time_to_wait):
- print("Waiting for " + searched_string)
+ print("Searching for pattern '"+regexp+"' in "+os.path.basename(log_file), end='... ', flush=True)
tail = subprocess.Popen(['tail', '-F', log_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- regexp = re.compile(searched_string)
+ compiled_regexp = re.compile(regexp)
while True:
line = tail.stdout.readline().decode('utf-8')
- if regexp.search(line):
- print("Searched string found.")
+ if compiled_regexp.search(line):
+ print("String found!", end=' ')
found = True
break
except TimeoutError:
- print("Cannot find string "+searched_string+" after waiting for "+str(time_to_wait))
+ print("String not found after "+str(time_to_wait), end=" seconds! ", flush=True)
finally:
if tail is not None:
- print("Stopping tail command")
+ print("Stopping tail command", end='... ', flush=True)
tail.stderr.close()
tail.stdout.close()
tail.kill()
tail.wait()
return found
+# TODO try to find an alternative to subprocess+tail -f (such as https://pypi.org/project/tailhead/)
class timeout: