--- /dev/null
+*** Settings ***
+Documentation Basic tests for BGP application peer.
+...
+... Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+...
+... This program and the accompanying materials are made available under the
+... terms of the Eclipse Public License v1.0 which accompanies this distribution,
+... and is available at http://www.eclipse.org/legal/epl-v10.html
+...
+... Test suite performs basic BGP functional test cases for
+... BGP application peer.
+...
+... Brief description how to configure BGP application peer and
+... how to use restconf application peer interface:
+... https://wiki.opendaylight.org/view/BGP_LS_PCEP:User_Guide#BGP_Application_Peer
+... https://wiki.opendaylight.org/view/BGP_LS_PCEP:Programmer_Guide#BGP
+Suite Setup Setup_Everything
+Suite Teardown Teardown_Everything
+Test Setup SetupUtils.Setup_Test_With_Logging_And_Without_Fast_Failing
+Test Teardown FailFast.Start_Failing_Fast_If_This_Failed
+Library OperatingSystem
+Library SSHLibrary timeout=10s
+Library RequestsLibrary
+Library ${CURDIR}/../../../libraries/HsfJson/hsf_json.py
+Variables ${CURDIR}/../../../variables/Variables.py
+Variables ${CURDIR}/../../../variables/bgpuser/variables.py ${TOOLS_SYSTEM_IP}
+Resource ${CURDIR}/../../../libraries/BGPSpeaker.robot
+Resource ${CURDIR}/../../../libraries/ConfigViaRestconf.robot
+Resource ${CURDIR}/../../../libraries/FailFast.robot
+Resource ${CURDIR}/../../../libraries/KarafKeywords.robot
+Resource ${CURDIR}/../../../libraries/KillPythonTool.robot
+Resource ${CURDIR}/../../../libraries/SetupUtils.robot
+Resource ${CURDIR}/../../../libraries/SSHKeywords.robot
+Resource ${CURDIR}/../../../libraries/Utils.robot
+Resource ${CURDIR}/../../../libraries/WaitForFailure.robot
+
+*** Variables ***
+${ACTUAL_RESPONSES_FOLDER} ${TEMPDIR}/actual
+${EXPECTED_RESPONSES_FOLDER} ${TEMPDIR}/expected
+${BGP_VARIABLES_FOLDER} ${CURDIR}/../../../variables/bgpuser/
+${TOOLS_SYSTEM_PROMPT} ${DEFAULT_LINUX_PROMPT}
+${HOLDTIME} 180
+${BGP_PEER_LOG_LEVEL} debug
+${BGP_APP_PEER_LOG_LEVEL} debug
+${CONTROLLER_LOG_LEVEL} INFO
+${CONTROLLER_BGP_LOG_LEVEL} DEFAULT
+${BGP_PEER_COMMAND} python play.py --amount 0 --myip=${TOOLS_SYSTEM_IP} --myport=${BGP_TOOL_PORT} --peerip=${ODL_SYSTEM_IP} --peerport=${ODL_BGP_PORT} --${BGP_PEER_LOG_LEVEL}
+${BGP_PEER_OPTIONS} ${EMPTY}
+${BGP_APP_PEER_ID} 10.0.0.10
+${BGP_APP_PEER_POST_COMMAND} python bgp_app_peer.py --host ${ODL_SYSTEM_IP} --port ${RESTCONFPORT} --command post --count 3 --prefix 8.0.1.0 --prefixlen 28 --${BGP_APP_PEER_LOG_LEVEL}
+${BGP_APP_PEER_PUT_COMMAND} python bgp_app_peer.py --host ${ODL_SYSTEM_IP} --port ${RESTCONFPORT} --command put --count 3 --prefix 8.0.1.0 --prefixlen 28 --${BGP_APP_PEER_LOG_LEVEL}
+${BGP_APP_PEER_DELETE_COMMAND} python bgp_app_peer.py --host ${ODL_SYSTEM_IP} --port ${RESTCONFPORT} --command delete --count 3 --prefix 8.0.1.0 --prefixlen 28 --${BGP_APP_PEER_LOG_LEVEL}
+${BGP_APP_PEER_DELETE_ALL_COMMAND} python bgp_app_peer.py --host ${ODL_SYSTEM_IP} --port ${RESTCONFPORT} --command delete-all --${BGP_APP_PEER_LOG_LEVEL}
+${BGP_APP_PEER_GET_COMMAND} python bgp_app_peer.py --host ${ODL_SYSTEM_IP} --port ${RESTCONFPORT} --command get --${BGP_APP_PEER_LOG_LEVEL}
+${BGP_APP_PEER_OPTIONS} &>/dev/null
+${BGP_APP_PEER_TIMEOUT} 30s
+
+*** Test Cases ***
+Reconfigure_ODL_To_Accept_BGP_Peer_Connection
+ [Documentation] Configure BGP peer module with initiate-connection set to false.
+ [Tags] critical
+ ${template_as_string}= BuiltIn.Set_Variable {'NAME': 'example-bgp-peer', 'IP': '${TOOLS_SYSTEM_IP}', 'HOLDTIME': '${HOLDTIME}', 'PEER_PORT': '${BGP_TOOL_PORT}', 'INITIATE': 'false'}
+ ConfigViaRestconf.Put_Xml_Template_Folder_Config_Via_Restconf ${BGP_VARIABLES_FOLDER}${/}bgp_peer ${template_as_string}
+
+Reconfigure_ODL_To_Accept_BGP_Application_Peer
+ [Documentation] Configure BGP application peer module.
+ [Tags] critical
+ ${template_as_string}= BuiltIn.Set_Variable {'NAME': 'example-bgp-peer-app', 'IP': '${BGP_APP_PEER_ID}'}
+ ConfigViaRestconf.Put_Xml_Template_Folder_Config_Via_Restconf ${BGP_VARIABLES_FOLDER}${/}bgp_application_peer ${template_as_string}
+
+Check_For_Empty_Example-IPv4-Topology
+ [Documentation] Sanity check example-ipv4-topology is up but empty.
+ [Tags] critical
+ Wait_For_Topology_To_Change_To ${empty_json} 000_Empty.json timeout=120s
+
+TC1_Connect_BGP_Peer
+ [Documentation] Start BGP peer tool
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ Start_Console_Tool ${BGP_PEER_COMMAND} ${BGP_PEER_OPTIONS}
+ Read_And_Fail_If_Prompt_Is_Seen
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 0
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 0
+
+TC1_BGP_Application_Peer_Post_3_Initial_Routes
+ [Documentation] Start BGP application peer tool and give it ${BGP_APP_PEER_TIMEOUT}
+ [Tags] critical
+ Switch_To_BGP_Application_Peer_Console
+ Start_Console_Tool ${BGP_APP_PEER_POST_COMMAND} ${BGP_APP_PEER_OPTIONS}
+ Wait_Until_Console_Tool_Finish ${BGP_APP_PEER_TIMEOUT}
+ Store_File_To_Workspace bgp_app_peer.log bgp_app_peer_initial_post_tc1.log
+
+TC1_Check_Example-IPv4-Topology_Is_Filled_With_3_Routes
+ [Documentation] See new routes in example-ipv4-topology as a proof that synchronization was correct.
+ [Tags] critical
+ Wait_For_Topology_To_Change_To ${filled_json} 010_Filled.json
+
+TC1_BGP_Peer_Check_Incomming_Updates_For_3_Introduced_Prefixes
+ [Documentation] Check incomming updates for new routes
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ BuiltIn.Wait_Until_Keyword_Succeeds 60s 1s Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 3
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 8.0.1.0/28 1
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 8.0.1.16/28 1
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 8.0.1.32/28 1
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 0
+
+TC1_BGP_Application_Peer_Delete_3_Initial_Routes
+ [Documentation] Start BGP application peer tool and give him ${BGP_APP_PEER_TIMEOUT}
+ [Tags] critical
+ Switch_To_BGP_Application_Peer_Console
+ Start_Console_Tool ${BGP_APP_PEER_DELETE_COMMAND} ${BGP_APP_PEER_OPTIONS}
+ Wait_Until_Console_Tool_Finish ${BGP_APP_PEER_TIMEOUT}
+ Store_File_To_Workspace bgp_app_peer.log bgp_app_peer_delete_tc1.log
+
+TC1_Check_Example-IPv4-Topology_Is_Empty
+ [Documentation] See new routes are deleted.
+ [Tags] critical
+ Wait_For_Topology_To_Change_To ${empty_json} 011_Empty.json
+
+TC1_Peer_Check_Incomming_Updates_For_3_Withdrawn_Prefixes
+ [Documentation] Check incomming updates for new routes
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ BuiltIn.Wait_Until_Keyword_Succeeds 60s 1s Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 3
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 8.0.1.0/28 1
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 8.0.1.16/28 1
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 8.0.1.32/28 1
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 3
+
+TC1_Stop_BGP_Peer
+ [Documentation] Stop BGP peer tool
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ Stop_Console_Tool
+ Store_File_To_Workspace bgp_peer.log bgp_peer_tc1.log
+
+TC2_Connect_BGP_Peer
+ [Documentation] Start BGP peer tool
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ Start_Console_Tool ${BGP_PEER_COMMAND} ${BGP_PEER_OPTIONS}
+ Read_And_Fail_If_Prompt_Is_Seen
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 0
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 0
+
+TC2_BGP_Application_Peer_Put_3_Routes
+ [Documentation] Start BGP application peer tool and give him ${BGP_APP_PEER_TIMEOUT}
+ [Tags] critical
+ Switch_To_BGP_Application_Peer_Console
+ Start_Console_Tool ${BGP_APP_PEER_PUT_COMMAND} ${BGP_APP_PEER_OPTIONS}
+ Wait_Until_Console_Tool_Finish ${BGP_APP_PEER_TIMEOUT}
+ Store_File_To_Workspace bgp_app_peer.log bgp_app_peer_put_tc2.log
+
+TC2_Check_Example-IPv4-Topology_Is_Filled_With_3_Routes
+ [Documentation] See new routes in example-ipv4-topology as a proof that synchronization was correct.
+ [Tags] critical
+ Wait_For_Topology_To_Change_To ${filled_json} 020_Filled.json
+
+TC2_BGP_Peer_Check_Incomming_Updates_For_3_Introduced_Prefixes
+ [Documentation] Check incomming updates for new routes
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ BuiltIn.Wait_Until_Keyword_Succeeds 60s 1s Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 3
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 8.0.1.0/28 1
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 8.0.1.16/28 1
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 8.0.1.32/28 1
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 0
+
+TC2_BGP_Application_Peer_Delete_All_Routes
+ [Documentation] Start BGP application peer tool and give him ${BGP_APP_PEER_TIMEOUT}
+ [Tags] critical
+ Switch_To_BGP_Application_Peer_Console
+ Start_Console_Tool ${BGP_APP_PEER_DELETE_ALL_COMMAND} ${BGP_APP_PEER_OPTIONS}
+ Wait_Until_Console_Tool_Finish ${BGP_APP_PEER_TIMEOUT}
+ Store_File_To_Workspace bgp_app_peer.log bgp_app_peer_delete_all_tc2.log
+
+TC2_Check_Example-IPv4-Topology_Is_Empty
+ [Documentation] See new routes are deleted.
+ [Tags] critical
+ Wait_For_Topology_To_Change_To ${empty_json} 021_Empty.json
+
+TC2_BGP_Peer_Check_Incomming_Updates_For_3_Withdrawn_Prefixes
+ [Documentation] Check incomming updates for new routes
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ BuiltIn.Wait_Until_Keyword_Succeeds 60s 1s Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 3
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 8.0.1.0/28 1
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 8.0.1.16/28 1
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 8.0.1.32/28 1
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 3
+
+TC2_Stop_BGP_Peer
+ [Documentation] Stop BGP peer tool
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ Stop_Console_Tool
+ Store_File_To_Workspace bgp_peer.log bgp_peer_tc2.log
+
+TC3_BGP_Application_Peer_Put_3_Routes
+ [Documentation] Start BGP application peer tool and give him ${BGP_APP_PEER_TIMEOUT}
+ [Tags] critical
+ Switch_To_BGP_Application_Peer_Console
+ Start_Console_Tool ${BGP_APP_PEER_PUT_COMMAND} ${BGP_APP_PEER_OPTIONS}
+ Wait_Until_Console_Tool_Finish ${BGP_APP_PEER_TIMEOUT}
+ Store_File_To_Workspace bgp_app_peer.log bgp_app_peer_put_tc3.log
+
+TC3_Check_Example-IPv4-Topology_Is_Filled_With_3_Routes
+ [Documentation] See new routes in example-ipv4-topology as a proof that synchronization was correct.
+ [Tags] critical
+ Wait_For_Topology_To_Change_To ${filled_json} 030_Filled.json
+
+TC3_Reconnect_BGP_Peer_And_Check_Incomming_Updates_For_3_Introduced_Prefixes
+ [Documentation] Start BGP peer tool
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ Start_Console_Tool ${BGP_PEER_COMMAND} ${BGP_PEER_OPTIONS}
+ Read_And_Fail_If_Prompt_Is_Seen
+ BuiltIn.Wait_Until_Keyword_Succeeds 60s 1s Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 3
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 3
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 8.0.1.0/28 1
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 8.0.1.16/28 1
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 8.0.1.32/28 1
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 0
+
+TC3_BGP_Application_Peer_Delete_All_Routes
+ [Documentation] Start BGP application peer tool and give him ${BGP_APP_PEER_TIMEOUT}
+ [Tags] critical
+ Switch_To_BGP_Application_Peer_Console
+ Start_Console_Tool ${BGP_APP_PEER_DELETE_ALL_COMMAND} ${BGP_APP_PEER_OPTIONS}
+ Wait_Until_Console_Tool_Finish ${BGP_APP_PEER_TIMEOUT}
+ Store_File_To_Workspace bgp_app_peer.log bgp_app_peer_delete_all_tc3.log
+
+TC3_Check_Example-IPv4-Topology_Is_Empty
+ [Documentation] See new routes are deleted.
+ [Tags] critical
+ Wait_For_Topology_To_Change_To ${empty_json} 031_Empty.json
+
+TC3_BGP_Peer_Check_Incomming_Updates_For_3_Withdrawn_Prefixes
+ [Documentation] Check incomming updates for new routes
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ BuiltIn.Wait_Until_Keyword_Succeeds 60s 1s Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 3
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 8.0.1.0/28 1
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 8.0.1.16/28 1
+ Check_File_For_Word_Count bgp_peer.log withdrawn_prefix_received: 8.0.1.32/28 1
+ Check_File_For_Word_Count bgp_peer.log nlri_prefix_received: 3
+
+TC3_Stop_BGP_Peer
+ [Documentation] Stop BGP peer tool
+ [Tags] critical
+ Switch_To_BGP_Peer_Console
+ Stop_Console_Tool
+ Store_File_To_Workspace bgp_peer.log bgp_peer_tc3.log
+
+Delete_Bgp_Peer_Configuration
+ [Documentation] Revert the BGP configuration to the original state: without any configured peers.
+ [Tags] critical
+ ${template_as_string}= BuiltIn.Set_Variable {'NAME': 'example-bgp-peer'}
+ ConfigViaRestconf.Delete_Xml_Template_Folder_Config_Via_Restconf ${BGP_VARIABLES_FOLDER}${/}bgp_peer ${template_as_string}
+
+Delete_Bgp_Application_Peer_Configuration
+ [Documentation] Revert the BGP configuration to the original state: without any configured peers.
+ [Tags] critical
+ ${template_as_string}= BuiltIn.Set_Variable {'NAME': 'example-bgp-peer-app'}
+ ConfigViaRestconf.Delete_Xml_Template_Folder_Config_Via_Restconf ${BGP_VARIABLES_FOLDER}${/}bgp_application_peer ${template_as_string}
+
+*** Keywords ***
+Setup_Everything
+ [Documentation] SSH-login to mininet machine, create HTTP session,
+ ... prepare directories for responses, put Python tool to mininet machine, setup imported resources.
+ SetupUtils.Setup_Utils_For_Setup_And_Teardown
+ SSHLibrary.Set_Default_Configuration prompt=${TOOLS_SYSTEM_PROMPT}
+ Open_BGP_Peer_Console
+ SSHLibrary.Put_File ${CURDIR}/../../../../tools/fastbgp/play.py
+ Open_BGP_Aplicationp_Peer_Console
+ SSHLibrary.Put_File ${CURDIR}/../../../../tools/fastbgp/bgp_app_peer.py
+ SSHLibrary.Put_File ${CURDIR}/../../../../tools/fastbgp/ipv4-routes-template.xml
+ OperatingSystem.Remove_Directory ${EXPECTED_RESPONSES_FOLDER} recursive=True
+ OperatingSystem.Remove_Directory ${ACTUAL_RESPONSES_FOLDER} recursive=True
+ # The previous suite may have been using the same directories.
+ OperatingSystem.Create_Directory ${EXPECTED_RESPONSES_FOLDER}
+ OperatingSystem.Create_Directory ${ACTUAL_RESPONSES_FOLDER}
+ RequestsLibrary.Create_Session operational http://${ODL_SYSTEM_IP}:${RESTCONFPORT}${OPERATIONAL_TOPO_API} auth=${AUTH}
+ ConfigViaRestconf.Setup_Config_Via_Restconf
+ KarafKeywords.Execute_Controller_Karaf_Command_On_Background log:set ${CONTROLLER_LOG_LEVEL}
+ KarafKeywords.Execute_Controller_Karaf_Command_On_Background log:set ${CONTROLLER_BGP_LOG_LEVEL} org.opendaylight.bgpcep
+ KarafKeywords.Execute_Controller_Karaf_Command_On_Background log:set ${CONTROLLER_BGP_LOG_LEVEL} org.opendaylight.protocol
+
+Teardown_Everything
+ [Documentation] Create and Log the diff between expected and actual responses, make sure Python tool was killed.
+ ... Tear down imported Resources.
+ ${diff}= OperatingSystem.Run diff -dur ${EXPECTED_RESPONSES_FOLDER} ${ACTUAL_RESPONSES_FOLDER}
+ BuiltIn.Log ${diff}
+ KillPythonTool.Search_And_Kill_Remote_Python 'play\.py'
+ KillPythonTool.Search_And_Kill_Remote_Python 'bgp_app_peer\.py'
+ ConfigViaRestconf.Teardown_Config_Via_Restconf
+ RequestsLibrary.Delete_All_Sessions
+ SSHLibrary.Close_All_Connections
+
+Open_BGP_Peer_Console
+ [Documentation] Create a session for BGP peer.
+ SSHLibrary.Open_Connection ${TOOLS_SYSTEM_IP} alias=bgp_peer_console
+ Utils.Flexible_Mininet_Login
+
+Open_BGP_Aplicationp_Peer_Console
+ [Documentation] Create a session for BGP peer.
+ SSHLibrary.Open_Connection ${TOOLS_SYSTEM_IP} alias=bgp_app_peer_console
+ Utils.Flexible_Mininet_Login
+
+Switch_To_BGP_Peer_Console
+ SSHLibrary.Switch Connection bgp_peer_console
+
+Switch_To_BGP_Application_Peer_Console
+ SSHLibrary.Switch Connection bgp_app_peer_console
+
+Start_Console_Tool
+ [Arguments] ${command} ${tool_opt}
+ [Documentation] Start the tool ${command} ${tool_opt}
+ BuiltIn.Log ${command}
+ ${output}= SSHLibrary.Write ${command} ${tool_opt}
+ BuiltIn.Log ${output}
+
+Wait_Until_Console_Tool_Finish
+ [Arguments] ${timeout}
+ [Documentation] Wait ${timeout} for the tool exit.
+ BuiltIn.Wait Until Keyword Succeeds ${timeout} 1s SSHLibrary.Read Until Prompt
+
+Stop_Console_Tool
+ [Documentation] Stop the tool if still running.
+ Utils.Write_Bare_Ctrl_C
+ ${output}= SSHLibrary.Read delay=1s
+ BuiltIn.Log ${output}
+
+Wait_For_Topology_To_Change_To
+ [Arguments] ${json_topology} ${filename} ${timeout}=10s ${refresh}=1s
+ [Documentation] Normalize the expected json topology and save it to ${EXPECTED_RESPONSES_FOLDER}.
+ ... Wait until Compare_Topology matches. ${ACTUAL_RESPONSES_FOLDER} will hold its last result.
+ ${topology_normalized}= Normalize_And_Save_Expected_Json ${json_topology} ${filename} ${EXPECTED_RESPONSES_FOLDER}
+ BuiltIn.Wait_Until_Keyword_Succeeds ${timeout} ${refresh} Compare_Topology ${topology_normalized} ${filename}
+
+Verify_That_Topology_Does_Not_Change_From
+ [Arguments] ${json_topology} ${filename} ${timeout}=10s ${refresh}=1s
+ [Documentation] Normalize the expected json topology and save it to ${EXPECTED_RESPONSES_FOLDER}.
+ ... Verify that Compare_Topology keeps passing. ${ACTUAL_RESPONSES_FOLDER} will hold its last result.
+ ${topology_normalized}= Normalize_And_Save_Expected_Json ${json_topology} ${filename} ${EXPECTED_RESPONSES_FOLDER}
+ WaitForFailure.Verify_Keyword_Does_Not_Fail_Within_Timeout ${timeout} ${refresh} Compare_Topology ${topology_normalized} ${filename}
+
+Compare_Topology
+ [Arguments] ${expected_normalized} ${filename}
+ [Documentation] Get current example-ipv4-topology as json, normalize it, save to ${ACTUAL_RESPONSES_FOLDER}.
+ ... Check that status code is 200, check that normalized jsons match exactly.
+ ${response}= RequestsLibrary.Get Request operational topology/example-ipv4-topology
+ BuiltIn.Log ${response.status_code}
+ BuiltIn.Log ${response.text}
+ ${actual_normalized}= Normalize_And_Save_Expected_Json ${response.text} ${filename} ${ACTUAL_RESPONSES_FOLDER}
+ BuiltIn.Should_Be_Equal_As_Strings ${response.status_code} 200
+ BuiltIn.Should_Be_Equal ${actual_normalized} ${expected_normalized}
+
+Normalize_And_Save_Expected_Json
+ [Arguments] ${json_text} ${filename} ${directory}
+ [Documentation] Normalize given json using hsf_json library. Log and save the result to given filename under given directory.
+ ${json_normalized}= hsf_json.Hsf_Json ${json_text}
+ BuiltIn.Log ${json_normalized}
+ OperatingSystem.Create_File ${directory}${/}${filename} ${json_normalized}
+ # TODO: Should we prepend .json to the filename? When we detect it is not already prepended?
+ [Return] ${json_normalized}
+
+Read_And_Fail_If_Prompt_Is_Seen
+ [Documentation] Try to read SSH to see prompt, but expect to see no prompt within SSHLibrary's timeout.
+ ${passed}= BuiltIn.Run_Keyword_And_Return_Status BuiltIn.Run_Keyword_And_Expect_Error No match found for '${TOOLS_SYSTEM_PROMPT}' in *. Read_Text_Before_Prompt
+ BuiltIn.Return_From_Keyword_If ${passed}
+ BGPSpeaker.Dump_BGP_Speaker_Logs
+ Builtin.Fail The prompt was seen but it was not expected yet
+
+Read_Text_Before_Prompt
+ [Documentation] Log text gathered by SSHLibrary.Read_Until_Prompt.
+ ... This needs to be a separate keyword just because how Read_And_Fail_If_Prompt_Is_Seen is implemented.
+ ${text}= SSHLibrary.Read_Until_Prompt
+ BuiltIn.Log ${text}
+
+Store_File_To_Workspace
+ [Arguments] ${source_file_name} ${target_file_name}
+ [Documentation] Store the ${source_file_name} to the workspace as ${target_file_name}.
+ ${output_log}= SSHLibrary.Execute_Command cat ${source_file_name}
+ BuiltIn.Log ${output_log}
+ Create File ${target_file_name} ${output_log}
+
+Check_File_For_Word_Count
+ [Arguments] ${file_name} ${word} ${expected_count}
+ [Documentation] Count ${word} in ${file_name}. Expect ${expected_count} occurence(s)
+ ${output_log}= SSHLibrary.Execute_Command grep -o '${word}' ${file_name} | wc -l
+ BuiltIn.Log ${output_log}
+ BuiltIn.Should_Be_Equal_As_Strings ${output_log} ${expected_count}
--- /dev/null
+"""This program performs required BGP application peer operations."""
+
+# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+#
+# This program and the accompanying materials are made available under the
+# terms of the Eclipse Public License v1.0 which accompanies this distribution,
+# and is available at http://www.eclipse.org/legal/epl-v10.html
+
+__author__ = "Radovan Sajben"
+__copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
+__license__ = "Eclipse Public License v1.0"
+__email__ = "rsajben@cisco.com"
+
+import requests
+import ipaddr
+import argparse
+import logging
+import time
+import xml.dom.minidom as md
+
+
+def _build_url(odl_ip, port, uri):
+ """Compose URL from generic IP, port and URI fragment.
+
+ Args:
+ :param odl_ip: controller's ip address or hostname
+
+ :param port: controller's restconf port
+
+ :param uri: URI without /restconf/ to complete URL
+
+ Returns:
+ :returns url: full restconf url corresponding to params
+ """
+
+ url = "http://" + str(odl_ip) + ":" + port + "/restconf/" + uri
+ return url
+
+
+def _build_data(xml_template, prefix_base, prefix_len, count, element="ipv4-routes"):
+ """Generate list of routes based on xml templates.
+
+ Args:
+ :xml_template: xml template for routes
+
+ :prefix_base: first prefix IP address
+
+ :prefix_len: prefix length in bits
+
+ :count: number of routes to be generated
+
+ :element: element to be returned
+
+ Returns:
+ :returns xml_data: requested element as xml data
+ """
+ global total_build_data_time_counter
+ build_data_timestamp = time.time()
+
+ routes = md.parse(xml_template)
+
+ routes_node = routes.getElementsByTagName("ipv4-routes")[0]
+ route_node = routes.getElementsByTagName("ipv4-route")[0]
+ if element == routes_node.tagName:
+ routes_node.removeChild(route_node)
+ if count:
+ prefix_gap = 2 ** (32 - prefix_len)
+
+ for prefix_index in range(count):
+ new_route_node = route_node.cloneNode(True)
+ new_route_prefix = new_route_node.getElementsByTagName("prefix")[0]
+
+ prefix = prefix_base + prefix_index * prefix_gap
+ new_route_prefix.childNodes[0].nodeValue = str(prefix) + "/" + str(prefix_len)
+
+ routes_node.appendChild(new_route_node)
+
+ xml_data = routes_node.toxml()
+ elif element == route_node.tagName:
+ route_node.setAttribute("xmlns", routes_node.namespaceURI)
+ route_prefix = route_node.getElementsByTagName("prefix")[0]
+ route_prefix.childNodes[0].nodeValue = str(prefix_base) + "/" + str(prefix_len)
+ xml_data = route_node.toxml()
+ else:
+ xml_data = ""
+ routes.unlink()
+ logger.debug("xml data generated:\n%s", xml_data)
+ total_build_data_time_counter += time.time() - build_data_timestamp
+ return xml_data
+
+
+def send_request(operation, odl_ip, port, uri, auth, xml_data=None, expect_status_code=200):
+ """Send a http request.
+
+ Args:
+ :operation: GET, POST, PUT, DELETE
+
+ :param odl_ip: controller's ip address or hostname
+
+ :param port: controller's restconf port
+
+ :param uri: URI without /restconf/ to complete URL
+
+ :param auth: authentication credentials
+
+ :param xml_data: list of routes as xml data
+
+ Returns:
+ :returns http response object
+ """
+ global total_response_time_counter
+ global total_number_of_responses_counter
+
+ ses = requests.Session()
+
+ url = _build_url(odl_ip, port, uri)
+ header = {"Content-Type": "application/xml"}
+ req = requests.Request(operation, url, headers=header, data=xml_data, auth=auth)
+ prep = req.prepare()
+ try:
+ send_request_timestamp = time.time()
+ rsp = ses.send(prep, timeout=60)
+ total_response_time_counter += time.time() - send_request_timestamp
+ total_number_of_responses_counter += 1
+ except requests.exceptions.Timeout:
+ logger.error("No response from %s", odl_ip)
+ else:
+ logger.debug("%s %s", rsp.request, rsp.request.url)
+ logger.debug("Request headers: %s:", rsp.request.headers)
+ logger.debug("Request body: %s", rsp.request.body)
+ logger.debug("Response: %s", rsp.text)
+ if rsp.status_code == expect_status_code:
+ logger.debug("%s %s", rsp.request, rsp.request.url)
+ logger.debug("Request headers: %s:", rsp.request.headers)
+ logger.debug("Request body: %s", rsp.request.body)
+ logger.debug("Response: %s", rsp.text)
+ logger.debug("%s %s", rsp, rsp.reason)
+ else:
+ logger.error("%s %s", rsp.request, rsp.request.url)
+ logger.error("Request headers: %s:", rsp.request.headers)
+ logger.error("Request body: %s", rsp.request.body)
+ logger.error("Response: %s", rsp.text)
+ logger.error("%s %s", rsp, rsp.reason)
+ return rsp
+
+
+def get_prefixes(odl_ip, port, uri, auth, prefix_base=None, prefix_len=None,
+ count=None, xml_template=None):
+ """Send a http GET request for getting all prefixes.
+
+ Args:
+ :param odl_ip: controller's ip address or hostname
+
+ :param port: controller's restconf port
+
+ :param uri: URI without /restconf/ to complete URL
+
+ :param auth: authentication tupple as (user, password)
+
+ :param prefix_base: IP address of the first prefix
+
+ :prefix_len: length of the prefix in bites (specifies the increment as well)
+
+ :param count: number of prefixes to be processed
+
+ :param xml_template: xml template for building the xml data
+
+ Returns:
+ :returns None
+ """
+
+ logger.info("Get all prefixes from %s:%s/restconf/%s", odl_ip, port, uri)
+ rsp = send_request("GET", odl_ip, port, uri, auth)
+ if rsp is not None:
+ s = rsp.text
+ s = s.replace("{", "")
+ s = s.replace("}", "")
+ s = s.replace("[", "")
+ s = s.replace("]", "")
+ prefixes = ''
+ prefix_count = 0
+ for item in s.split(","):
+ if "prefix" in item:
+ prefixes += item + ","
+ prefix_count += 1
+ prefixes = prefixes[:len(prefixes)-1]
+ logger.debug("prefix_list=%s", prefixes)
+ logger.info("prefix_count=%s", prefix_count)
+
+
+def post_prefixes(odl_ip, port, uri, auth, prefix_base=None, prefix_len=None,
+ count=0, xml_template=None):
+ """Send a http POST request for creating a prefix list.
+
+ Args:
+ :param odl_ip: controller's ip address or hostname
+
+ :param port: controller's restconf port
+
+ :param uri: URI without /restconf/ to complete URL
+
+ :param auth: authentication tupple as (user, password)
+
+ :param prefix_base: IP address of the first prefix
+
+ :prefix_len: length of the prefix in bites (specifies the increment as well)
+
+ :param count: number of prefixes to be processed
+
+ :param xml_template: xml template for building the xml data (not used)
+
+ Returns:
+ :returns None
+ """
+ logger.info("Post %s prefix(es) in a single request (starting from %s/%s) into %s:%s/restconf/%s",
+ count, prefix_base, prefix_len, odl_ip, port, uri)
+ xml_data = _build_data(xml_template, prefix_base, prefix_len, count)
+ send_request("POST", odl_ip, port, uri, auth, xml_data=xml_data, expect_status_code=204)
+
+
+def put_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
+ xml_template=None):
+ """Send a http PUT request for updating the prefix list.
+
+ Args:
+ :param odl_ip: controller's ip address or hostname
+
+ :param port: controller's restconf port
+
+ :param uri: URI without /restconf/ to complete URL
+
+ :param auth: authentication tupple as (user, password)
+
+ :param prefix_base: IP address of the first prefix
+
+ :prefix_len: length of the prefix in bites (specifies the increment as well)
+
+ :param count: number of prefixes to be processed
+
+ :param xml_template: xml template for building the xml data (not used)
+
+ Returns:
+ :returns None
+ """
+ uri_add_prefix = uri + _uri_suffix_ipv4_routes
+ logger.info("Put %s prefix(es) in a single request (starting from %s/%s) into %s:%s/restconf/%s",
+ count, prefix_base, prefix_len, odl_ip, port, uri_add_prefix)
+ xml_data = _build_data(xml_template, prefix_base, prefix_len, count)
+ send_request("PUT", odl_ip, port, uri_add_prefix, auth, xml_data=xml_data)
+
+
+def add_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
+ xml_template=None):
+ """Send a consequent http POST request for adding prefixes.
+
+ Args:
+ :param odl_ip: controller's ip address or hostname
+
+ :param port: controller's restconf port
+
+ :param uri: URI without /restconf/ to complete URL
+
+ :param auth: authentication tupple as (user, password)
+
+ :param prefix_base: IP address of the first prefix
+
+ :prefix_len: length of the prefix in bites (specifies the increment as well)
+
+ :param count: number of prefixes to be processed
+
+ :param xml_template: xml template for building the xml data (not used)
+
+ Returns:
+ :returns None
+ """
+ logger.info("Add %s prefixes (starting from %s/%s) into %s:%s/restconf/%s",
+ count, prefix_base, prefix_len, odl_ip, port, uri)
+ uri_add_prefix = uri + _uri_suffix_ipv4_routes
+ prefix_gap = 2 ** (32 - prefix_len)
+ for prefix_index in range(count):
+ prefix = prefix_base + prefix_index * prefix_gap
+ logger.info("Adding prefix %s/%s to %s:%s/restconf/%s",
+ prefix, prefix_len, odl_ip, port, uri)
+ xml_data = _build_data(xml_template, prefix, prefix_len, 1, "ipv4-route")
+ send_request("POST", odl_ip, port, uri_add_prefix, auth,
+ xml_data=xml_data, expect_status_code=204)
+
+
+def delete_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
+ xml_template=None):
+ """Send a http DELETE requests for deleting prefixes.
+
+ Args:
+ :param odl_ip: controller's ip address or hostname
+
+ :param port: controller's restconf port
+
+ :param uri: URI without /restconf/ to complete URL
+
+ :param auth: authentication tupple as (user, password)
+
+ :param prefix_base: IP address of the first prefix
+
+ :prefix_len: length of the prefix in bites (specifies the increment as well)
+
+ :param count: number of prefixes to be processed
+
+ :param xml_template: xml template for building the xml data (not used)
+
+ Returns:
+ :returns None
+ """
+ logger.info("Delete %s prefix(es) (starting from %s/%s) from %s:%s/restconf/%s",
+ count, prefix_base, prefix_len, odl_ip, port, uri)
+ uri_del_prefix = uri + _uri_suffix_ipv4_routes + _uri_suffix_ipv4_route
+ prefix_gap = 2 ** (32 - prefix_len)
+ for prefix_index in range(count):
+ prefix = prefix_base + prefix_index * prefix_gap
+ logger.info("Deleting prefix %s/%s from %s:%s/restconf/%s",
+ prefix, prefix_len, odl_ip, port, uri)
+ send_request("DELETE", odl_ip, port,
+ uri_del_prefix + str(prefix) + "%2F" + str(prefix_len), auth)
+
+
+def delete_all_prefixes(odl_ip, port, uri, auth, prefix_base=None,
+ prefix_len=None, count=None, xml_template=None):
+ """Send a http DELETE request for deleting all prefixes.
+
+ Args:
+ :param odl_ip: controller's ip address or hostname
+
+ :param port: controller's restconf port
+
+ :param uri: URI without /restconf/ to complete URL
+
+ :param auth: authentication tupple as (user, password)
+
+ :param prefix_base: IP address of the first prefix (not used)
+
+ :prefix_len: length of the prefix in bites (not used)
+
+ :param count: number of prefixes to be processed (not used)
+
+ :param xml_template: xml template for building the xml data (not used)
+
+ Returns:
+ :returns None
+ """
+ logger.info("Delete all prefixes from %s:%s/restconf/%s", odl_ip, port, uri)
+ uri_del_all_prefixes = uri + _uri_suffix_ipv4_routes
+ send_request("DELETE", odl_ip, port, uri_del_all_prefixes, auth)
+
+
+_commands = ["post", "put", "add", "delete", "delete-all", "get"]
+_uri_suffix_ipv4_routes = "bgp-inet:ipv4-routes/"
+_uri_suffix_ipv4_route = "bgp-inet:ipv4-route/" # followed by IP address like 1.1.1.1%2F32
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="BGP application peer script")
+ parser.add_argument("--host", type=ipaddr.IPv4Address, default="127.0.0.1",
+ help="ODL controller IP address")
+ parser.add_argument("--port", default="8181",
+ help="ODL RESTCONF port")
+ parser.add_argument("--command", choices=_commands, metavar="command",
+ help="Command to be performed."
+ "post, put, add, delete, delete-all, get")
+ parser.add_argument("--prefix", type=ipaddr.IPv4Address, default="8.0.1.0",
+ help="First prefix IP address")
+ parser.add_argument("--prefixlen", type=int, help="Prefix length in bites",
+ default=28)
+ parser.add_argument("--count", type=int, help="Number of prefixes",
+ default=1)
+ parser.add_argument("--user", help="Restconf user name", default="admin")
+ parser.add_argument("--password", help="Restconf password", default="admin")
+ parser.add_argument("--uri", help="The uri part of requests",
+ default="config/bgp-rib:application-rib/example-app-rib/"
+ "tables/bgp-types:ipv4-address-family/"
+ "bgp-types:unicast-subsequent-address-family/")
+ parser.add_argument("--xml", help="File name of the xml data template",
+ default="ipv4-routes-template.xml")
+ parser.add_argument("--error", dest="loglevel", action="store_const",
+ const=logging.ERROR, default=logging.INFO,
+ help="Set log level to error (default is info)")
+ parser.add_argument("--warning", dest="loglevel", action="store_const",
+ const=logging.WARNING, default=logging.INFO,
+ help="Set log level to warning (default is info)")
+ parser.add_argument("--info", dest="loglevel", action="store_const",
+ const=logging.INFO, default=logging.INFO,
+ help="Set log level to info (default is info)")
+ parser.add_argument("--debug", dest="loglevel", action="store_const",
+ const=logging.DEBUG, default=logging.INFO,
+ help="Set log level to debug (default is info)")
+ parser.add_argument("--logfile", default="bgp_app_peer.log", help="Log file name")
+
+ args = parser.parse_args()
+
+ logger = logging.getLogger("logger")
+ log_formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s")
+ console_handler = logging.StreamHandler()
+ file_handler = logging.FileHandler(args.logfile, mode="w")
+ console_handler.setFormatter(log_formatter)
+ file_handler.setFormatter(log_formatter)
+ logger.addHandler(console_handler)
+ logger.addHandler(file_handler)
+ logger.setLevel(args.loglevel)
+
+ auth = (args.user, args.password)
+
+ odl_ip = args.host
+ port = args.port
+ command = args.command
+ prefix_base = args.prefix
+ prefix_len = args.prefixlen
+ count = args.count
+ auth = (args.user, args.password)
+ uri = args.uri
+ xml_template = args.xml
+
+ test_start_time = time.time()
+ total_build_data_time_counter = 0
+ total_response_time_counter = 0
+ total_number_of_responses_counter = 0
+
+ if command == "post":
+ post_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
+ xml_template)
+ if command == "put":
+ put_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
+ xml_template)
+ if command == "add":
+ add_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
+ xml_template)
+ elif command == "delete":
+ delete_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count)
+ elif command == "delete-all":
+ delete_all_prefixes(odl_ip, port, uri, auth)
+ elif command == "get":
+ get_prefixes(odl_ip, port, uri, auth)
+
+ total_test_execution_time = time.time() - test_start_time
+
+ logger.info("Total test execution time: %.3fs", total_test_execution_time)
+ logger.info("Total build data time: %.3fs", total_build_data_time_counter)
+ logger.info("Total response time: %.3fs", total_response_time_counter)
+ logger.info("Total number of response(s): %s", total_number_of_responses_counter)
+ file_handler.close()
parser.add_argument("--debug", dest="loglevel", action="store_const",
const=logging.DEBUG, default=logging.INFO,
help=str_help)
+ str_help = "Log file name"
+ parser.add_argument("--logfile", default="bgp_peer.log", help=str_help)
str_help = "Trailing part of the csv result files for plotting purposes"
parser.add_argument("--results", default="bgp.csv", type=str, help=str_help)
str_help = "Minimum number of updates to reach to include result into csv."
parser.add_argument("--threshold", default="1000", type=int, help=str_help)
+ str_help = "RFC 4760 Multiprotocol Extensions for BGP-4 supported"
+ parser.add_argument("--rfc4760", default="yes", type=str, help=str_help)
arguments = parser.parse_args()
# TODO: Are sanity checks (such as asnumber>=0) required?
return arguments
:return: socket.
"""
if arguments.listen:
- stdout_logger.info("Connecting in the listening mode.")
- stdout_logger.debug("Local IP address: " + str(arguments.myip))
- stdout_logger.debug("Local port: " + str(arguments.myport))
+ logger.info("Connecting in the listening mode.")
+ logger.debug("Local IP address: " + str(arguments.myip))
+ logger.debug("Local port: " + str(arguments.myport))
listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listening_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind need single tuple as argument
# TODO: Verify client IP is cotroller IP.
listening_socket.close()
else:
- stdout_logger.info("Connecting in the talking mode.")
- stdout_logger.debug("Local IP address: " + str(arguments.myip))
- stdout_logger.debug("Local port: " + str(arguments.myport))
- stdout_logger.debug("Remote IP address: " + str(arguments.peerip))
- stdout_logger.debug("Remote port: " + str(arguments.peerport))
+ logger.info("Connecting in the talking mode.")
+ logger.debug("Local IP address: " + str(arguments.myip))
+ logger.debug("Local port: " + str(arguments.myport))
+ logger.debug("Remote IP address: " + str(arguments.peerip))
+ logger.debug("Remote port: " + str(arguments.peerport))
talking_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
talking_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind to force specified address and port
# socket does not spead ipaddr, hence str()
talking_socket.connect((str(arguments.peerip), arguments.peerport))
bgp_socket = talking_socket
- stdout_logger.info("Connected to ODL.")
+ logger.info("Connected to ODL.")
return bgp_socket
return short_int
+def get_prefix_list_from_hex(prefixes_hex):
+ """Get decoded list of prefixes (rfc4271#section-4.3)
+
+ Arguments:
+ :prefixes_hex: list of prefixes to be decoded in hex
+ Returns:
+ :return: list of prefixes in the form of ip address (X.X.X.X/X)
+ """
+ prefix_list = []
+ offset = 0
+ while offset < len(prefixes_hex):
+ prefix_bit_len_hex = prefixes_hex[offset]
+ prefix_bit_len = int(binascii.b2a_hex(prefix_bit_len_hex), 16)
+ prefix_len = ((prefix_bit_len - 1) / 8) + 1
+ prefix_hex = prefixes_hex[offset + 1: offset + 1 + prefix_len]
+ prefix = ".".join(str(i) for i in struct.unpack("BBBB", prefix_hex))
+ offset += 1 + prefix_len
+ prefix_list.append(prefix + "/" + str(prefix_bit_len))
+ return prefix_list
+
+
class MessageError(ValueError):
"""Value error with logging optimized for hexlified messages."""
# Some validation.
if len(msg_in) < 37:
# 37 is minimal length of open message with 4-byte AS number.
- stdout_logger.error("Got something else than open with 4-byte AS number: " +
- binascii.hexlify(msg_in))
- raise MessageError("Got something else than open with 4-byte AS number",
- msg_in)
+ logger.error("Got something else than open with 4-byte AS number: " +
+ binascii.hexlify(msg_in))
+ raise MessageError("Got something else than open with 4-byte AS number", msg_in)
# TODO: We could check BGP marker, but it is defined only later;
# decide what to do.
reported_length = get_short_int_from_message(msg_in)
if len(msg_in) != reported_length:
- stdout_logger.error("Message length is not " + str(reported_length) +
- " as stated in " + binascii.hexlify(msg_in))
+ logger.error("Message length is not " + str(reported_length) +
+ " as stated in " + binascii.hexlify(msg_in))
raise MessageError("Message length is not " + reported_length +
" as stated in ", msg_in)
- stdout_logger.info("Open message received.")
+ logger.info("Open message received.")
return msg_in
self.remaining_prefixes_threshold = self.total_prefix_amount - args.prefill
self.results_file_name_default = args.results
self.performance_threshold_default = args.threshold
+ self.rfc4760 = args.rfc4760 == "yes"
# Default values used for randomized part
s1_slots = ((self.total_prefix_amount -
self.remaining_prefixes_threshold - 1) /
- conditional calling of logger methods enclosed inside condition: 8,6s
"""
- stdout_logger.info("Generator initialisation")
- stdout_logger.info(" Target total number of prefixes to be introduced: " +
- str(self.total_prefix_amount))
- stdout_logger.info(" Prefix base: " + str(self.prefix_base_default) + "/" +
- str(self.prefix_length_default))
- stdout_logger.info(" My Autonomous System number: " +
- str(self.my_autonomous_system_default))
- stdout_logger.info(" My Hold Time: " + str(self.hold_time_default))
- stdout_logger.info(" My BGP Identifier: " + str(self.bgp_identifier_default))
- stdout_logger.info(" Next Hop: " + str(self.next_hop_default))
- stdout_logger.info(" Prefix count to be inserted at once: " +
- str(self.prefix_count_to_add_default))
- stdout_logger.info(" Prefix count to be withdrawn at once: " +
- str(self.prefix_count_to_del_default))
- stdout_logger.info(" Fast pre-fill up to " +
- str(self.total_prefix_amount -
- self.remaining_prefixes_threshold) + " prefixes")
- stdout_logger.info(" Remaining number of prefixes to be processed " +
- "in parallel with withdrawals: " +
- str(self.remaining_prefixes_threshold))
- stdout_logger.debug(" Prefix index range used after pre-fill procedure [" +
- str(self.randomize_lowest_default) + ", " +
- str(self.randomize_highest_default) + "]")
+ logger.info("Generator initialisation")
+ logger.info(" Target total number of prefixes to be introduced: " +
+ str(self.total_prefix_amount))
+ logger.info(" Prefix base: " + str(self.prefix_base_default) + "/" +
+ str(self.prefix_length_default))
+ logger.info(" My Autonomous System number: " +
+ str(self.my_autonomous_system_default))
+ logger.info(" My Hold Time: " + str(self.hold_time_default))
+ logger.info(" My BGP Identifier: " + str(self.bgp_identifier_default))
+ logger.info(" Next Hop: " + str(self.next_hop_default))
+ logger.info(" Prefix count to be inserted at once: " +
+ str(self.prefix_count_to_add_default))
+ logger.info(" Prefix count to be withdrawn at once: " +
+ str(self.prefix_count_to_del_default))
+ logger.info(" Fast pre-fill up to " +
+ str(self.total_prefix_amount -
+ self.remaining_prefixes_threshold) + " prefixes")
+ logger.info(" Remaining number of prefixes to be processed " +
+ "in parallel with withdrawals: " +
+ str(self.remaining_prefixes_threshold))
+ logger.debug(" Prefix index range used after pre-fill procedure [" +
+ str(self.randomize_lowest_default) + ", " +
+ str(self.randomize_highest_default) + "]")
if self.single_update_default:
- stdout_logger.info(" Common single UPDATE will be generated " +
- "for both NLRI & WITHDRAWN lists")
+ logger.info(" Common single UPDATE will be generated " +
+ "for both NLRI & WITHDRAWN lists")
else:
- stdout_logger.info(" Two separate UPDATEs will be generated " +
- "for each NLRI & WITHDRAWN lists")
+ logger.info(" Two separate UPDATEs will be generated " +
+ "for each NLRI & WITHDRAWN lists")
if self.randomize_updates_default:
- stdout_logger.info(" Generation of UPDATE messages will be randomized")
- stdout_logger.info(" Let\"s go ...\n")
+ logger.info(" Generation of UPDATE messages will be randomized")
+ logger.info(" Let\'s go ...\n")
# TODO: Notification for hold timer expiration can be handy.
totals2 = None
performance2 = None
- stdout_logger.info("#" * 10 + " Final results " + "#" * 10)
- stdout_logger.info("Number of iterations: " + str(self.iteration))
- stdout_logger.info("Number of UPDATE messages sent in the pre-fill phase: " +
- str(self.phase1_updates_sent))
- stdout_logger.info("The pre-fill phase duration: " +
- str(self.phase1_stop_time - self.phase1_start_time) + "s")
- stdout_logger.info("Number of UPDATE messages sent in the 2nd test phase: " +
- str(self.phase2_updates_sent))
- stdout_logger.info("The 2nd test phase duration: " +
- str(self.phase2_stop_time - self.phase2_start_time) + "s")
- stdout_logger.info("Threshold for performance reporting: " + str(threshold))
+ logger.info("#" * 10 + " Final results " + "#" * 10)
+ logger.info("Number of iterations: " + str(self.iteration))
+ logger.info("Number of UPDATE messages sent in the pre-fill phase: " +
+ str(self.phase1_updates_sent))
+ logger.info("The pre-fill phase duration: " +
+ str(self.phase1_stop_time - self.phase1_start_time) + "s")
+ logger.info("Number of UPDATE messages sent in the 2nd test phase: " +
+ str(self.phase2_updates_sent))
+ logger.info("The 2nd test phase duration: " +
+ str(self.phase2_stop_time - self.phase2_start_time) + "s")
+ logger.info("Threshold for performance reporting: " + str(threshold))
# making labels
phase1_label = ("pre-fill " + str(self.prefix_count_to_add_default) +
second_line = second_line[:-2]
f.write(first_line + "\n")
f.write(second_line + "\n")
- stdout_logger.info("Message generator performance results stored in " +
- file_name + ':')
- stdout_logger.info(" " + first_line)
- stdout_logger.info(" " + second_line)
+ logger.info("Message generator performance results stored in " +
+ file_name + ":")
+ logger.info(" " + first_line)
+ logger.info(" " + second_line)
finally:
f.close()
indexes.append(prefix_index)
prefixes.append(prefix_base + prefix_index * prefix_gap)
if self.log_debug:
- stdout_logger.debug(" Prefix slot index: " + str(slot_index))
- stdout_logger.debug(" Prefix slot size: " + str(slot_size))
- stdout_logger.debug(" Prefix count: " + str(prefix_count))
- stdout_logger.debug(" Prefix indexes: " + str(indexes))
- stdout_logger.debug(" Prefix list: " + str(prefixes))
+ logger.debug(" Prefix slot index: " + str(slot_index))
+ logger.debug(" Prefix slot size: " + str(slot_size))
+ logger.debug(" Prefix count: " + str(prefix_count))
+ logger.debug(" Prefix indexes: " + str(indexes))
+ logger.debug(" Prefix list: " + str(prefixes))
return prefixes
def compose_update_message(self, prefix_count_to_add=None,
prefix_count_to_del = self.prefix_count_to_del_default
# logging
if self.log_info and not (self.iteration % 1000):
- stdout_logger.info("Iteration: " + str(self.iteration) +
- " - total remaining prefixes: " +
- str(self.remaining_prefixes))
+ logger.info("Iteration: " + str(self.iteration) +
+ " - total remaining prefixes: " +
+ str(self.remaining_prefixes))
if self.log_debug:
- stdout_logger.debug("#" * 10 + " Iteration: " +
- str(self.iteration) + " " + "#" * 10)
- stdout_logger.debug("Remaining prefixes: " +
- str(self.remaining_prefixes))
+ logger.debug("#" * 10 + " Iteration: " +
+ str(self.iteration) + " " + "#" * 10)
+ logger.debug("Remaining prefixes: " +
+ str(self.remaining_prefixes))
# scenario type & one-shot counter
straightforward_scenario = (self.remaining_prefixes >
self.remaining_prefixes_threshold)
if straightforward_scenario:
prefix_count_to_del = 0
if self.log_debug:
- stdout_logger.debug("--- STARAIGHTFORWARD SCENARIO ---")
+ logger.debug("--- STARAIGHTFORWARD SCENARIO ---")
if not self.phase1_start_time:
self.phase1_start_time = time.time()
else:
if self.log_debug:
- stdout_logger.debug("--- COMBINED SCENARIO ---")
+ logger.debug("--- COMBINED SCENARIO ---")
if not self.phase2_start_time:
self.phase2_start_time = time.time()
# tailor the number of prefixes if needed
slot_index_to_del = slot_index_to_add - self.slot_gap_default
# getting lists of prefixes for insertion in this iteration
if self.log_debug:
- stdout_logger.debug("Prefixes to be inserted in this iteration:")
+ logger.debug("Prefixes to be inserted in this iteration:")
prefix_list_to_add = self.get_prefix_list(slot_index_to_add,
prefix_count=prefix_count_to_add)
# getting lists of prefixes for withdrawal in this iteration
if self.log_debug:
- stdout_logger.debug("Prefixes to be withdrawn in this iteration:")
+ logger.debug("Prefixes to be withdrawn in this iteration:")
prefix_list_to_del = self.get_prefix_list(slot_index_to_del,
prefix_count=prefix_count_to_del)
# generating the mesage
bgp_identifier_hex = struct.pack(">I", bgp_identifier)
# Optional Parameters
- optional_parameters_hex = (
- "\x02" # Param type ("Capability Ad")
- "\x06" # Length (6 bytes)
- "\x01" # Capability type (NLRI Unicast),
- # see RFC 4760, secton 8
- "\x04" # Capability value length
- "\x00\x01" # AFI (Ipv4)
- "\x00" # (reserved)
- "\x01" # SAFI (Unicast)
+ optional_parameters_hex = ""
+ if self.rfc4760:
+ optional_parameter_hex = (
+ "\x02" # Param type ("Capability Ad")
+ "\x06" # Length (6 bytes)
+ "\x01" # Capability type (NLRI Unicast),
+ # see RFC 4760, secton 8
+ "\x04" # Capability value length
+ "\x00\x01" # AFI (Ipv4)
+ "\x00" # (reserved)
+ "\x01" # SAFI (Unicast)
+ )
+ optional_parameters_hex += optional_parameter_hex
+ optional_parameter_hex = (
"\x02" # Param type ("Capability Ad")
"\x06" # Length (6 bytes)
"\x41" # "32 bit AS Numbers Support"
# My AS in 32 bit format
+ struct.pack(">I", my_autonomous_system)
)
+ optional_parameters_hex += optional_parameter_hex
# Optional Parameters Length
optional_parameters_length = len(optional_parameters_hex)
)
if self.log_debug:
- stdout_logger.debug("OPEN Message encoding")
- stdout_logger.debug(" Marker=0x" + binascii.hexlify(marker_hex))
- stdout_logger.debug(" Length=" + str(length) + " (0x" +
- binascii.hexlify(length_hex) + ")")
- stdout_logger.debug(" Type=" + str(type) + " (0x" +
- binascii.hexlify(type_hex) + ")")
- stdout_logger.debug(" Version=" + str(version) + " (0x" +
- binascii.hexlify(version_hex) + ")")
- stdout_logger.debug(" My Autonomous System=" +
- str(my_autonomous_system_2_bytes) + " (0x" +
- binascii.hexlify(my_autonomous_system_hex_2_bytes) +
- ")")
- stdout_logger.debug(" Hold Time=" + str(hold_time) + " (0x" +
- binascii.hexlify(hold_time_hex) + ")")
- stdout_logger.debug(" BGP Identifier=" + str(bgp_identifier) +
- " (0x" + binascii.hexlify(bgp_identifier_hex) + ")")
- stdout_logger.debug(" Optional Parameters Length=" +
- str(optional_parameters_length) + " (0x" +
- binascii.hexlify(optional_parameters_length_hex) +
- ")")
- stdout_logger.debug(" Optional Parameters=0x" +
- binascii.hexlify(optional_parameters_hex))
- stdout_logger.debug(" OPEN Message encoded: 0x" +
- binascii.b2a_hex(message_hex))
+ logger.debug("OPEN message encoding")
+ logger.debug(" Marker=0x" + binascii.hexlify(marker_hex))
+ logger.debug(" Length=" + str(length) + " (0x" +
+ binascii.hexlify(length_hex) + ")")
+ logger.debug(" Type=" + str(type) + " (0x" +
+ binascii.hexlify(type_hex) + ")")
+ logger.debug(" Version=" + str(version) + " (0x" +
+ binascii.hexlify(version_hex) + ")")
+ logger.debug(" My Autonomous System=" +
+ str(my_autonomous_system_2_bytes) + " (0x" +
+ binascii.hexlify(my_autonomous_system_hex_2_bytes) +
+ ")")
+ logger.debug(" Hold Time=" + str(hold_time) + " (0x" +
+ binascii.hexlify(hold_time_hex) + ")")
+ logger.debug(" BGP Identifier=" + str(bgp_identifier) +
+ " (0x" + binascii.hexlify(bgp_identifier_hex) + ")")
+ logger.debug(" Optional Parameters Length=" +
+ str(optional_parameters_length) + " (0x" +
+ binascii.hexlify(optional_parameters_length_hex) +
+ ")")
+ logger.debug(" Optional Parameters=0x" +
+ binascii.hexlify(optional_parameters_hex))
+ logger.debug("OPEN message encoded: 0x%s",
+ binascii.b2a_hex(message_hex))
return message_hex
)
if self.log_debug:
- stdout_logger.debug("UPDATE Message encoding")
- stdout_logger.debug(" Marker=0x" + binascii.hexlify(marker_hex))
- stdout_logger.debug(" Length=" + str(length) + " (0x" +
- binascii.hexlify(length_hex) + ")")
- stdout_logger.debug(" Type=" + str(type) + " (0x" +
- binascii.hexlify(type_hex) + ")")
- stdout_logger.debug(" withdrawn_routes_length=" +
- str(withdrawn_routes_length) + " (0x" +
- binascii.hexlify(withdrawn_routes_length_hex) + ")")
- stdout_logger.debug(" Withdrawn_Routes=" + str(wr_prefixes) + "/" +
- str(wr_prefix_length) + " (0x" +
- binascii.hexlify(withdrawn_routes_hex) + ")")
- stdout_logger.debug(" Total Path Attributes Length=" +
- str(total_path_attributes_length) + " (0x" +
- binascii.hexlify(total_path_attributes_length_hex) +
- ")")
- stdout_logger.debug(" Path Attributes=" + "(0x" +
- binascii.hexlify(path_attributes_hex) + ")")
- stdout_logger.debug(" Network Layer Reachability Information=" +
- str(nlri_prefixes) + "/" + str(nlri_prefix_length) +
- " (0x" + binascii.hexlify(nlri_hex) + ")")
- stdout_logger.debug(" UPDATE Message encoded: 0x" +
- binascii.b2a_hex(message_hex))
+ logger.debug("UPDATE message encoding")
+ logger.debug(" Marker=0x" + binascii.hexlify(marker_hex))
+ logger.debug(" Length=" + str(length) + " (0x" +
+ binascii.hexlify(length_hex) + ")")
+ logger.debug(" Type=" + str(type) + " (0x" +
+ binascii.hexlify(type_hex) + ")")
+ logger.debug(" withdrawn_routes_length=" +
+ str(withdrawn_routes_length) + " (0x" +
+ binascii.hexlify(withdrawn_routes_length_hex) + ")")
+ logger.debug(" Withdrawn_Routes=" + str(wr_prefixes) + "/" +
+ str(wr_prefix_length) + " (0x" +
+ binascii.hexlify(withdrawn_routes_hex) + ")")
+ logger.debug(" Total Path Attributes Length=" +
+ str(total_path_attributes_length) + " (0x" +
+ binascii.hexlify(total_path_attributes_length_hex) +
+ ")")
+ logger.debug(" Path Attributes=" + "(0x" +
+ binascii.hexlify(path_attributes_hex) + ")")
+ logger.debug(" Network Layer Reachability Information=" +
+ str(nlri_prefixes) + "/" + str(nlri_prefix_length) +
+ " (0x" + binascii.hexlify(nlri_hex) + ")")
+ logger.debug("UPDATE message encoded: 0x" +
+ binascii.b2a_hex(message_hex))
# updating counter
self.updates_sent += 1
)
if self.log_debug:
- stdout_logger.debug("NOTIFICATION Message encoding")
- stdout_logger.debug(" Marker=0x" + binascii.hexlify(marker_hex))
- stdout_logger.debug(" Length=" + str(length) + " (0x" +
- binascii.hexlify(length_hex) + ")")
- stdout_logger.debug(" Type=" + str(type) + " (0x" +
- binascii.hexlify(type_hex) + ")")
- stdout_logger.debug(" Error Code=" + str(error_code) + " (0x" +
- binascii.hexlify(error_code_hex) + ")")
- stdout_logger.debug(" Error Subode=" + str(error_subcode) + " (0x" +
- binascii.hexlify(error_subcode_hex) + ")")
- stdout_logger.debug(" Data=" + " (0x" + binascii.hexlify(data_hex) + ")")
- stdout_logger.debug(" NOTIFICATION Message encoded: 0x" +
- binascii.b2a_hex(message_hex))
+ logger.debug("NOTIFICATION message encoding")
+ logger.debug(" Marker=0x" + binascii.hexlify(marker_hex))
+ logger.debug(" Length=" + str(length) + " (0x" +
+ binascii.hexlify(length_hex) + ")")
+ logger.debug(" Type=" + str(type) + " (0x" +
+ binascii.hexlify(type_hex) + ")")
+ logger.debug(" Error Code=" + str(error_code) + " (0x" +
+ binascii.hexlify(error_code_hex) + ")")
+ logger.debug(" Error Subode=" + str(error_subcode) + " (0x" +
+ binascii.hexlify(error_subcode_hex) + ")")
+ logger.debug(" Data=" + " (0x" + binascii.hexlify(data_hex) + ")")
+ logger.debug("NOTIFICATION message encoded: 0x%s",
+ binascii.b2a_hex(message_hex))
return message_hex
)
if self.log_debug:
- stdout_logger.debug("KEEP ALIVE Message encoding")
- stdout_logger.debug(" Marker=0x" + binascii.hexlify(marker_hex))
- stdout_logger.debug(" Length=" + str(length) + " (0x" +
- binascii.hexlify(length_hex) + ")")
- stdout_logger.debug(" Type=" + str(type) + " (0x" +
- binascii.hexlify(type_hex) + ")")
- stdout_logger.debug(" KEEP ALIVE Message encoded: 0x" +
- binascii.b2a_hex(message_hex))
+ logger.debug("KEEP ALIVE message encoding")
+ logger.debug(" Marker=0x" + binascii.hexlify(marker_hex))
+ logger.debug(" Length=" + str(length) + " (0x" +
+ binascii.hexlify(length_hex) + ")")
+ logger.debug(" Type=" + str(type) + " (0x" +
+ binascii.hexlify(type_hex) + ")")
+ logger.debug("KEEP ALIVE message encoded: 0x%s",
+ binascii.b2a_hex(message_hex))
return message_hex
if hold_timedelta > peer_hold_timedelta:
hold_timedelta = peer_hold_timedelta
if hold_timedelta != 0 and hold_timedelta < 3:
- stdout_logger.error("Invalid hold timedelta value: " + str(hold_timedelta))
+ logger.error("Invalid hold timedelta value: " + str(hold_timedelta))
raise ValueError("Invalid hold timedelta value: ", hold_timedelta)
self.hold_timedelta = hold_timedelta
# If we do not hear from peer this long, we assume it has died.
if self.hold_timedelta != 0:
# time.time() may be too strict
if snapshot_time > self.peer_hold_time:
- stdout_logger.error("Peer has overstepped the hold timer.")
+ logger.error("Peer has overstepped the hold timer.")
raise RuntimeError("Peer has overstepped the hold timer.")
# TODO: Include hold_timedelta?
# TODO: Add notification sending (attempt). That means
self.bytes_to_read = self.header_length
# Incremental buffer for message under read.
self.msg_in = ""
+ # Initialising counters
+ self.updates_received = 0
+ self.prefixes_introduced = 0
+ self.prefixes_withdrawn = 0
+ self.rx_idle_time = 0
def read_message_chunk(self):
"""Read up to one message
# Prepare state for reading another message.
message_type_hex = self.msg_in[self.header_length]
if message_type_hex == "\x01":
- stdout_logger.info("OPEN message received: 0x%s",
- binascii.b2a_hex(self.msg_in))
+ logger.info("OPEN message received: 0x%s",
+ binascii.b2a_hex(self.msg_in))
elif message_type_hex == "\x02":
- stdout_logger.debug("UPDATE message received: 0x%s",
- binascii.b2a_hex(self.msg_in))
+ logger.debug("UPDATE message received: 0x%s",
+ binascii.b2a_hex(self.msg_in))
+ self.decode_update_message(self.msg_in)
elif message_type_hex == "\x03":
- stdout_logger.info("NOTIFICATION message received: 0x%s",
- binascii.b2a_hex(self.msg_in))
+ logger.info("NOTIFICATION message received: 0x%s",
+ binascii.b2a_hex(self.msg_in))
elif message_type_hex == "\x04":
- stdout_logger.info("KEEP ALIVE message received: 0x%s",
- binascii.b2a_hex(self.msg_in))
+ logger.info("KEEP ALIVE message received: 0x%s",
+ binascii.b2a_hex(self.msg_in))
else:
- stdout_logger.warning("Unexpected message received: 0x%s",
- binascii.b2a_hex(self.msg_in))
+ logger.warning("Unexpected message received: 0x%s",
+ binascii.b2a_hex(self.msg_in))
self.msg_in = ""
self.reading_header = True
self.bytes_to_read = self.header_length
# something right now.
return
+ def decode_path_attributes(self, path_attributes_hex):
+ """Decode the Path Attributes field (rfc4271#section-4.3)
+
+ Arguments:
+ :path_attributes: path_attributes field to be decoded in hex
+ Returns:
+ :return: None
+ """
+ hex_to_decode = path_attributes_hex
+
+ while len(hex_to_decode):
+ attr_flags_hex = hex_to_decode[0]
+ attr_flags = int(binascii.b2a_hex(attr_flags_hex), 16)
+# attr_optional_bit = attr_flags & 128
+# attr_transitive_bit = attr_flags & 64
+# attr_partial_bit = attr_flags & 32
+ attr_extended_length_bit = attr_flags & 16
+
+ attr_type_code_hex = hex_to_decode[1]
+ attr_type_code = int(binascii.b2a_hex(attr_type_code_hex), 16)
+
+ if attr_extended_length_bit:
+ attr_length_hex = hex_to_decode[2:4]
+ attr_length = int(binascii.b2a_hex(attr_length_hex), 16)
+ attr_value_hex = hex_to_decode[4:4 + attr_length]
+ hex_to_decode = hex_to_decode[4 + attr_length:]
+ else:
+ attr_length_hex = hex_to_decode[2]
+ attr_length = int(binascii.b2a_hex(attr_length_hex), 16)
+ attr_value_hex = hex_to_decode[3:3 + attr_length]
+ hex_to_decode = hex_to_decode[3 + attr_length:]
+
+ if attr_type_code == 1:
+ logger.debug("Attribute type = 1 (ORIGIN, flags:0x%s)",
+ binascii.b2a_hex(attr_flags_hex))
+ logger.debug("Attribute value = 0x%s", binascii.b2a_hex(attr_value_hex))
+ elif attr_type_code == 2:
+ logger.debug("Attribute type = 2 (AS_PATH, flags:0x%s)",
+ binascii.b2a_hex(attr_flags_hex))
+ logger.debug("Attribute value = 0x%s", binascii.b2a_hex(attr_value_hex))
+ elif attr_type_code == 3:
+ logger.debug("Attribute type = 3 (NEXT_HOP, flags:0x%s)",
+ binascii.b2a_hex(attr_flags_hex))
+ logger.debug("Attribute value = 0x%s", binascii.b2a_hex(attr_value_hex))
+ elif attr_type_code == 4:
+ logger.debug("Attribute type = 4 (MULTI_EXIT_DISC, flags:0x%s)",
+ binascii.b2a_hex(attr_flags_hex))
+ logger.debug("Attribute value = 0x%s", binascii.b2a_hex(attr_value_hex))
+ elif attr_type_code == 5:
+ logger.debug("Attribute type = 5 (LOCAL_PREF, flags:0x%s)",
+ binascii.b2a_hex(attr_flags_hex))
+ logger.debug("Attribute value = 0x%s", binascii.b2a_hex(attr_value_hex))
+ elif attr_type_code == 6:
+ logger.debug("Attribute type = 6 (ATOMIC_AGGREGATE, flags:0x%s)",
+ binascii.b2a_hex(attr_flags_hex))
+ logger.debug("Attribute value = 0x%s", binascii.b2a_hex(attr_value_hex))
+ elif attr_type_code == 7:
+ logger.debug("Attribute type = 7 (AGGREGATOR, flags:0x%s)",
+ binascii.b2a_hex(attr_flags_hex))
+ logger.debug("Attribute value = 0x%s", binascii.b2a_hex(attr_value_hex))
+ elif attr_type_code == 14: # rfc4760#section-3
+ logger.debug("Attribute type = 14 (MP_REACH_NLRI, flags:0x%s)",
+ binascii.b2a_hex(attr_flags_hex))
+ logger.debug("Attribute value = 0x%s", binascii.b2a_hex(attr_value_hex))
+ address_family_identifier_hex = attr_value_hex[0:2]
+ logger.debug(" Address Family Identifier = 0x%s",
+ binascii.b2a_hex(address_family_identifier_hex))
+ subsequent_address_family_identifier_hex = attr_value_hex[2]
+ logger.debug(" Subsequent Address Family Identifier = 0x%s",
+ binascii.b2a_hex(subsequent_address_family_identifier_hex))
+ next_hop_netaddr_len_hex = attr_value_hex[3]
+ next_hop_netaddr_len = int(binascii.b2a_hex(next_hop_netaddr_len_hex), 16)
+ logger.debug(" Length of Next Hop Network Address = 0x%s (%s)",
+ binascii.b2a_hex(next_hop_netaddr_len_hex),
+ next_hop_netaddr_len)
+ next_hop_netaddr_hex = attr_value_hex[4:4 + next_hop_netaddr_len]
+ logger.debug(" Network Address of Next Hop = 0x%s",
+ binascii.b2a_hex(next_hop_netaddr_hex))
+ reserved_hex = attr_value_hex[4 + next_hop_netaddr_len]
+ logger.debug(" Reserved = 0x%s",
+ binascii.b2a_hex(reserved_hex))
+ nlri_hex = attr_value_hex[4 + next_hop_netaddr_len + 1:]
+ logger.debug(" Network Layer Reachability Information = 0x%s",
+ binascii.b2a_hex(nlri_hex))
+ nlri_prefix_list = get_prefix_list_from_hex(nlri_hex)
+ logger.debug(" NLRI prefix list: %s", nlri_prefix_list)
+ for prefix in nlri_prefix_list:
+ logger.debug(" nlri_prefix_received: %s", prefix)
+ self.prefixes_introduced += len(nlri_prefix_list) # update counter
+ elif attr_type_code == 15: # rfc4760#section-4
+ logger.debug("Attribute type = 15 (MP_UNREACH_NLRI, flags:0x%s)",
+ binascii.b2a_hex(attr_flags_hex))
+ logger.debug("Attribute value = 0x%s", binascii.b2a_hex(attr_value_hex))
+ address_family_identifier_hex = attr_value_hex[0:2]
+ logger.debug(" Address Family Identifier = 0x%s",
+ binascii.b2a_hex(address_family_identifier_hex))
+ subsequent_address_family_identifier_hex = attr_value_hex[2]
+ logger.debug(" Subsequent Address Family Identifier = 0x%s",
+ binascii.b2a_hex(subsequent_address_family_identifier_hex))
+ wd_hex = attr_value_hex[3:]
+ logger.debug(" Withdrawn Routes = 0x%s",
+ binascii.b2a_hex(wd_hex))
+ wdr_prefix_list = get_prefix_list_from_hex(wd_hex)
+ logger.debug(" Withdrawn routes prefix list: %s",
+ wdr_prefix_list)
+ for prefix in wdr_prefix_list:
+ logger.debug(" withdrawn_prefix_received: %s", prefix)
+ self.prefixes_withdrawn += len(wdr_prefix_list) # update counter
+ else:
+ logger.debug("Unknown attribute type = %s, flags:0x%s)", attr_type_code,
+ binascii.b2a_hex(attr_flags_hex))
+ logger.debug("Unknown attribute value = 0x%s", binascii.b2a_hex(attr_value_hex))
+ return None
+
+ def decode_update_message(self, msg):
+ """Decode an UPDATE message (rfc4271#section-4.3)
+
+ Arguments:
+ :msg: message to be decoded in hex
+ Returns:
+ :return: None
+ """
+ logger.debug("Decoding update message:")
+ # message header - marker
+ marker_hex = msg[:16]
+ logger.debug("Message header marker: 0x%s",
+ binascii.b2a_hex(marker_hex))
+ # message header - message length
+ msg_length_hex = msg[16:18]
+ msg_length = int(binascii.b2a_hex(msg_length_hex), 16)
+ logger.debug("Message lenght: 0x%s (%s)",
+ binascii.b2a_hex(msg_length_hex), msg_length)
+ # message header - message type
+ msg_type_hex = msg[18:19]
+ msg_type = int(binascii.b2a_hex(msg_type_hex), 16)
+ if msg_type == 2:
+ logger.debug("Message type: 0x%s (update)",
+ binascii.b2a_hex(msg_type_hex))
+ # withdrawn routes length
+ wdr_length_hex = msg[19:21]
+ wdr_length = int(binascii.b2a_hex(wdr_length_hex), 16)
+ logger.debug("Withdrawn routes lenght: 0x%s (%s)",
+ binascii.b2a_hex(wdr_length_hex), wdr_length)
+ # withdrawn routes
+ wdr_hex = msg[21:21 + wdr_length]
+ logger.debug("Withdrawn routes: 0x%s",
+ binascii.b2a_hex(wdr_hex))
+ wdr_prefix_list = get_prefix_list_from_hex(wdr_hex)
+ logger.debug("Withdrawn routes prefix list: %s",
+ wdr_prefix_list)
+ for prefix in wdr_prefix_list:
+ logger.debug("withdrawn_prefix_received: %s", prefix)
+ # total path attribute length
+ total_pa_length_offset = 21 + wdr_length
+ total_pa_length_hex = msg[total_pa_length_offset:total_pa_length_offset+2]
+ total_pa_length = int(binascii.b2a_hex(total_pa_length_hex), 16)
+ logger.debug("Total path attribute lenght: 0x%s (%s)",
+ binascii.b2a_hex(total_pa_length_hex), total_pa_length)
+ # path attributes
+ pa_offset = total_pa_length_offset + 2
+ pa_hex = msg[pa_offset:pa_offset+total_pa_length]
+ logger.debug("Path attributes: 0x%s", binascii.b2a_hex(pa_hex))
+ self.decode_path_attributes(pa_hex)
+ # network layer reachability information length
+ nlri_length = msg_length - 23 - total_pa_length - wdr_length
+ logger.debug("Calculated NLRI length: %s", nlri_length)
+ # network layer reachability information
+ nlri_offset = pa_offset + total_pa_length
+ nlri_hex = msg[nlri_offset:nlri_offset+nlri_length]
+ logger.debug("NLRI: 0x%s", binascii.b2a_hex(nlri_hex))
+ nlri_prefix_list = get_prefix_list_from_hex(nlri_hex)
+ logger.debug("NLRI prefix list: %s", nlri_prefix_list)
+ for prefix in nlri_prefix_list:
+ logger.debug("nlri_prefix_received: %s", prefix)
+ # Updating counters
+ self.updates_received += 1
+ self.prefixes_introduced += len(nlri_prefix_list)
+ self.prefixes_withdrawn += len(wdr_prefix_list)
+ else:
+ logger.error("Unexpeced message type 0x%s in 0x%s",
+ binascii.b2a_hex(msg_type_hex), binascii.b2a_hex(msg))
+
def wait_for_read(self):
"""Read message until timeout (next expected event).
# Compute time to the first predictable state change
event_time = self.timer.get_next_event_time()
# snapshot_time would be imprecise
- wait_timedelta = event_time - time.time()
+ wait_timedelta = min(event_time - time.time(), 10)
if wait_timedelta < 0:
# The program got around to waiting to an event in "very near
# future" so late that it became a "past" event, thus tell
# select.error("Invalid parameter") (for everything else).
wait_timedelta = 0
# And wait for event or something to read.
+
+ logger.info("total_received_update_message_counter: %s",
+ self.updates_received)
+ logger.info("total_received_nlri_prefix_counter: %s",
+ self.prefixes_introduced)
+ logger.info("total_received_withdrawn_prefix_counter: %s",
+ self.prefixes_withdrawn)
+
+ start_time = time.time()
select.select([self.socket], [], [self.socket], wait_timedelta)
- # Not checking anything, that will be done in next iteration.
+ timedelta = time.time() - start_time
+ self.rx_idle_time += timedelta
+
+ logger.info("... idle for %.3fs", timedelta)
+ logger.info("total_rx_idle_time_counter: %.3fs", self.rx_idle_time)
return
if not self.writer.sending_message:
# We need to schedule a keepalive ASAP.
self.writer.enqueue_message_for_sending(self.generator.keepalive_message())
+ logger.info("KEEP ALIVE is sent.")
# We are sending a message now, so let's prioritize it.
self.prioritize_writing = True
# Now we know what our priorities are, we have to check
# Lists are unpacked, each is either [] or [self.socket],
# so we will test them as boolean.
if except_list:
- stdout_logger.error("Exceptional state on the socket.")
+ logger.error("Exceptional state on the socket.")
raise RuntimeError("Exceptional state on socket", self.socket)
# We will do either read or write.
if not (self.prioritize_writing and write_list):
if not self.generator.remaining_prefixes:
# We have just finished update generation,
# end-of-rib is due.
- stdout_logger.info("All update messages generated.")
- stdout_logger.info("Storing performance results.")
+ logger.info("All update messages generated.")
+ logger.info("Storing performance results.")
self.generator.store_results()
- stdout_logger.info("Finally an END-OF-RIB is going to be sent.")
+ logger.info("Finally an END-OF-RIB is sent.")
msg_out += self.generator.update_message(wr_prefixes=[],
nlri_prefixes=[])
self.writer.enqueue_message_for_sending(msg_out)
# Attempt for real sending to be done in next iteration.
return
- # Nothing to write anymore, except occasional keepalives.
- stdout_logger.info("Everything has been done." +
- "Now just waiting for possible incomming message.")
+ # Nothing to write anymore.
# To avoid busy loop, we do idle waiting here.
self.reader.wait_for_read()
return
# We can neither read nor write.
- stdout_logger.warning("Input and output both blocked for " +
- str(self.timer.report_timedelta) + " seconds.")
+ logger.warning("Input and output both blocked for " +
+ str(self.timer.report_timedelta) + " seconds.")
# FIXME: Are we sure select has been really waiting
# the whole period?
return
Establish BGP connection and run iterations.
"""
arguments = parse_arguments()
- logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s")
- stdout_logger = logging.getLogger("stdout_logger")
- stdout_logger.setLevel(arguments.loglevel)
+ logger = logging.getLogger("logger")
+ log_formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s")
+ console_handler = logging.StreamHandler()
+ file_handler = logging.FileHandler(arguments.logfile, mode="w")
+ console_handler.setFormatter(log_formatter)
+ file_handler.setFormatter(log_formatter)
+ logger.addHandler(console_handler)
+ logger.addHandler(file_handler)
+ logger.setLevel(arguments.loglevel)
bgp_socket = establish_connection(arguments)
# Initial handshake phase. TODO: Can it be also moved to StateTracker?
# Receive open message before sending anything.
timer = TimeTracker(msg_in)
generator = MessageGenerator(arguments)
msg_out = generator.open_message()
- stdout_logger.debug("Sending the OPEN message: " + binascii.hexlify(msg_out))
+ logger.debug("Sending the OPEN message: " + binascii.hexlify(msg_out))
# Send our open message to the peer.
bgp_socket.send(msg_out)
# Wait for confirming keepalive.
# Using exact keepalive length to not to see possible updates.
msg_in = bgp_socket.recv(19)
if msg_in != generator.keepalive_message():
- stdout_logger.error("Open not confirmed by keepalive, instead got " +
- binascii.hexlify(msg_in))
+ logger.error("Open not confirmed by keepalive, instead got " +
+ binascii.hexlify(msg_in))
raise MessageError("Open not confirmed by keepalive, instead got",
msg_in)
timer.reset_peer_hold_time()
# Send the keepalive to indicate the connection is accepted.
timer.snapshot() # Remember this time.
msg_out = generator.keepalive_message()
- stdout_logger.debug("Sending a KEEP ALIVE message: " + binascii.hexlify(msg_out))
+ logger.debug("Sending a KEEP ALIVE message: " + binascii.hexlify(msg_out))
bgp_socket.send(msg_out)
# Use the remembered time.
timer.reset_my_keepalive_time(timer.snapshot_time)