Variables ../variables/Variables.py
*** Variables ***
-
+${WORKSPACE} /opt/jenkins-integration/workspace/shared-controller
+${BUNDLEFOLDER} distribution-karaf-0.3.0-SNAPSHOT
+${AUTHN_CFG_FILE} ${WORKSPACE}/${BUNDLEFOLDER}/etc/org.opendaylight.aaa.authn.cfg
*** Keywords ***
AAA Login
... ${data}
[Return] ${data}
+Disable Authentication On Controller
+ [Arguments] ${controller_ip}
+ [Documentation] Will disable token based authentication. Currently, that is done with a config file change
+ SSHLibrary.Open Connection ${controller_ip}
+ Login With Public Key ${MININET_USER} ${USER_HOME}/.ssh/id_rsa any
+ ${cmd}= Set Variable sed -i 's/^authEnabled=.*$/authEnabled=false/g' ${AUTHN_CFG_FILE}
+ SSHLibrary.Execute Command ${cmd}
+
+Enable Authentication On Controller
+ [Arguments] ${controller_ip}
+ [Documentation] Will enable token based authentication. Currently, that is done with a config file change
+ SSHLibrary.Open Connection ${controller_ip}
+ Login With Public Key ${MININET_USER} ${USER_HOME}/.ssh/id_rsa any
+ ${cmd}= Set Variable sed -i 's/^authEnabled=.*$/authEnabled=true/g' ${AUTHN_CFG_FILE}
+ SSHLibrary.Execute Command ${cmd}
+
+Get Auth Token
+ [Arguments] ${user}=${USER} ${password}=${PWD} ${scope}=${SCOPE} ${client_id}=${EMPTY} ${client_secret}=${EMPTY}
+ [Documentation] Wrapper used to login to controller and retrieve an auth token. Optional argumented available for client based credentials.
+ ${auth_data}= Create Auth Data ${USER} ${PWD} ${scope} ${client_id} ${client_secret}
+ ${resp}= AAA Login ${CONTROLLER} ${auth_data}
+ Should Be Equal As Strings ${resp.status_code} 201
+ ${auth_token}= Extract Value From Content ${resp.content} /access_token strip
+ [Return] ${auth_token}
+
+Revoke Auth Token
+ [Arguments] ${token}
+ [Documentation] Requests the given token be revoked via POST to ${REVOKE_TOKEN_API}
+ ${headers}= Create Dictionary Content-Type application/x-www-form-urlencoded
+ ${resp}= RequestsLibrary.POST ODL_SESSION ${REVOKE_TOKEN_API} data=${token} headers=${headers}
+ Should Be Equal As Strings ${resp.status_code} 204
+
Validate Token Format
[Arguments] ${token}
[Documentation] Validates the given string is in the proper "token" format
--- /dev/null
+*** Settings ***
+Documentation Start the controllers
+Library Collections
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/SettingsLibrary.py
+Library ../../../libraries/UtilLibrary.py
+Variables ../../../variables/Variables.py
+
+*** Variables ***
+${REST_CONTEXT} /restconf/config/
+
+*** Test Cases ***
+Stop All Controllers
+ [Documentation] Stop all the controllers in the cluster
+ Stopcontroller ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 30
+ KillController ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ KillController ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ KillController ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+
+
+Clean All Journals
+ [Documentation] Clean the journals of all the controllers in the cluster
+ CleanJournal ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ CleanJournal ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ CleanJournal ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 5
+
+Start All Controllers
+ [Documentation] Start all the controllers in the cluster
+ Startcontroller ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Startcontroller ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Startcontroller ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 120
\ No newline at end of file
*** Settings ***
-Documentation Test suite for testing Distributed Datastore main operations performed from leader
+Documentation This test finds the leader for shards in a 3-Node cluster and executes CRUD operations on them
Library Collections
-Library ../../../../libraries/RequestsLibrary.py
-Library ../../../../libraries/Common.py
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/SettingsLibrary.py
-Library ../../../../libraries/UtilLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
-Variables ../../../../variables/Variables.py
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/SettingsLibrary.py
+Library ../../../libraries/UtilLibrary.py
+Library ../../../libraries/ClusterStateLibrary.py
+Variables ../../../variables/Variables.py
*** Variables ***
*** Test Cases ***
Add cars and get cars from Leader
[Documentation] Add 100 cars and get added cars from Leader
- ${CURRENT_CAR_LEADER} GetLeader ${SHARD_CAR_NAME} ${3} ${3} ${1} ${PORT} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${CURRENT_CAR_LEADER} GetLeader ${SHARD_CAR_NAME} ${3} ${3} ${1} ${PORT} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Log CURRENT_CAR_SHARD_LEADER ${CURRENT_CAR_LEADER}
Set Suite Variable ${CURRENT_CAR_LEADER}
${resp} AddCar ${CURRENT_CAR_LEADER} ${PORT} ${100}
Add persons and get persons from Leader
[Documentation] Add 100 persons and get persons
[Documentation] Note: There should be one person added first to enable rpc
- ${CURRENT_PEOPLE_LEADER} GetLeader ${SHARD_PEOPLE_NAME} ${3} ${3} ${1} ${PORT} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${CURRENT_PEOPLE_LEADER} GetLeader ${SHARD_PEOPLE_NAME} ${3} ${3} ${1} ${PORT} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Set Suite Variable ${CURRENT_PEOPLE_LEADER}
${resp} AddPerson ${CURRENT_PEOPLE_LEADER} ${PORT} ${0}
${resp} AddPerson ${CURRENT_PEOPLE_LEADER} ${PORT} ${100}
Add car-person mapping and get car-person mapping from Leader
[Documentation] Add car-person and get car-person from Leader
[Documentation] Note: This is done to enable working of rpc
- ${CURRENT_CAR_PERSON_LEADER} GetLeader ${SHARD_CAR_PERSON_NAME} ${3} ${3} ${1} ${PORT} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${CURRENT_CAR_PERSON_LEADER} GetLeader ${SHARD_CAR_PERSON_NAME} ${3} ${3} ${1} ${PORT} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Set Suite Variable ${CURRENT_CAR_PERSON_LEADER}
${resp} AddCarPerson ${CURRENT_CAR_PERSON_LEADER} ${PORT} ${0}
Sleep 2
Purchase 100 cars using Leader
[Documentation] Purchase 100 cars using Leader
-
${resp} BuyCar ${CURRENT_CAR_PERSON_LEADER} ${PORT} ${100}
Sleep 2
${resp} GetCarPersonMappings ${CURRENT_CAR_PERSON_LEADER} ${PORT} ${0}
Get car-person mappings using Follower1
[Documentation] Get car-person mappings using Follower1 to see 100 entry
- ${FOLLOWERS} GetFollowers ${SHARD_CAR_PERSON_NAME} ${3} ${3} ${1} ${PORT} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${FOLLOWERS} GetFollowers ${SHARD_CAR_PERSON_NAME} ${3} ${3} ${1} ${PORT} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Log ${FOLLOWERS}
SET SUITE VARIABLE ${FOLLOWERS}
${resp} GetCarPersonMappings ${FOLLOWERS[0]} ${PORT} ${0}
*** Settings ***
-Documentation Test suite for testing Distributed Datastore main operations performed from follower1
+Documentation This test finds the followers of certain shards in a 3-Node cluster and executes CRUD operations on any one follower
Library Collections
-Library ../../../../libraries/RequestsLibrary.py
-Library ../../../../libraries/Common.py
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/SettingsLibrary.py
-Library ../../../../libraries/UtilLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
-Variables ../../../../variables/Variables.py
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/SettingsLibrary.py
+Library ../../../libraries/UtilLibrary.py
+Library ../../../libraries/ClusterStateLibrary.py
+Variables ../../../variables/Variables.py
*** Variables ***
${REST_CONTEXT} /restconf/config/
*** Test Cases ***
Add cars and get cars from Follower1
[Documentation] Add 100 cars and get added cars from Follower1
- ${FOLLOWERS} GetFollowers ${SHARD_CAR_PERSON_NAME} ${3} ${3} ${1} ${PORT} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${FOLLOWERS} GetFollowers ${SHARD_CAR_PERSON_NAME} ${3} ${3} ${1} ${PORT} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Log ${FOLLOWERS}
SET SUITE VARIABLE ${FOLLOWERS}
Get car-person mappings using Leader
[Documentation] Get car-person mappings using Leader to see 100 entry
- ${CURRENT_CAR_LEADER} GetLeader ${SHARD_CAR_PERSON_NAME} ${3} ${3} ${1} ${PORT} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${CURRENT_CAR_LEADER} GetLeader ${SHARD_CAR_PERSON_NAME} ${3} ${3} ${1} ${PORT} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Log ${CURRENT_CAR_LEADER}
Sleep 1
${resp} GetCarPersonMappings ${CURRENT_CAR_LEADER} ${PORT} ${0}
*** Settings ***
-Documentation Run this test after running test no 03
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/UtilLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
+Documentation This test brings down the current leader of the "car" shard and then executes CRUD operations on the new leader
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/UtilLibrary.py
+Library ../../../libraries/ClusterStateLibrary.py
*** Variables ***
${SHARD} shard-car-config
*** Test Cases ***
Switch Leader
[Documentation] stop leader and elect new leader
- ${OLD_LEADER} GetLeader ${SHARD} ${3} ${3} ${2} ${8181} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${OLD_LEADER} GetLeader ${SHARD} ${3} ${3} ${2} ${8181} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Stopcontroller ${OLD_LEADER} ${USERNAME} ${PASSWORD} ${KARAFHOME}
Sleep 30
- ${NEW_LEADER} GetLeader ${SHARD} ${3} ${3} ${2} ${8181} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${NEW_LEADER} GetLeader ${SHARD} ${3} ${3} ${2} ${8181} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Log ${NEW_LEADER}
Set Suite Variable ${NEW_LEADER}
*** Settings ***
-Documentation Run this test after running test no 05
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
+Documentation This test tries to read the data that was written by the previous test from any one follower
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/ClusterStateLibrary.py
*** Variables ***
${SHARD} shard-car-config
*** Test Cases ***
Find follower
[Documentation] find follower
- ${FOLLOWERS} GetFollowers ${SHARD} ${3} ${3} ${2} ${8181} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${FOLLOWERS} GetFollowers ${SHARD} ${3} ${3} ${2} ${8181} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Log ${FOLLOWERS}
${LAST_FOLLOWER} Set Variable ${FOLLOWERS[0]}
Set Suite Variable ${LAST_FOLLOWER}
*** Settings ***
-Documentation Run this test after running test no 06
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
+Documentation This test executes CRUD operations on any one follower after the old leader has been brought down
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/ClusterStateLibrary.py
*** Variables ***
${SHARD} shard-car-config
*** Test Cases ***
Find follower
[Documentation] find follower
- ${FOLLOWERS} GetFollowers ${SHARD} ${3} ${3} ${2} ${8181} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${FOLLOWERS} GetFollowers ${SHARD} ${3} ${3} ${2} ${8181} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Log ${FOLLOWERS}
${LAST_FOLLOWER} Set Variable ${FOLLOWERS[0]}
Set Suite Variable ${LAST_FOLLOWER}
*** Settings ***
-Documentation Run this test after running test no 07
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
+Documentation This test reads the data from the leader that was written to the follower by the previous test
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/ClusterStateLibrary.py
*** Variables ***
${SHARD} shard-car-config
*** Test Cases ***
Find Leader
[Documentation] find new leader
- ${NEW_LEADER} GetLeader ${SHARD} ${3} ${3} ${2} ${8181} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
+ ${NEW_LEADER} GetLeader ${SHARD} ${3} ${3} ${2} ${8181} ${MEMBER1} ${MEMBER2} ${MEMBER3}
Log ${NEW_LEADER}
Set Suite Variable ${NEW_LEADER}
--- /dev/null
+*** Settings ***
+Documentation This test kills the leader and verifies that on restart the old leader is able to rejoin the cluster
+Library Collections
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/SettingsLibrary.py
+Library ../../../libraries/UtilLibrary.py
+Library ../../../libraries/ClusterStateLibrary.py
+Variables ../../../variables/Variables.py
+
+*** Variables ***
+${REST_CONTEXT} /restconf/config/
+${KARAF_HOME} /root/odl/dist
+${USER_NAME} root
+${PASSWORD} Ecp123
+${CAR_SHARD} shard-car-config
+
+*** Test Cases ***
+Stop All Controllers
+ [Documentation] Stop all the controllers in the cluster
+ Stopcontroller ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 30
+ KillController ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ KillController ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ KillController ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+
+
+Clean All Journals
+ [Documentation] Clean the journals of all the controllers in the cluster
+ CleanJournal ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ CleanJournal ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ CleanJournal ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 5
+
+Start All Controllers
+ [Documentation] Start all the controllers in the cluster
+ Startcontroller ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Startcontroller ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Startcontroller ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 120
+
+Delete all cars
+ [Documentation] Delete all the cars from the system
+ ${resp} DeleteAllCars ${MEMBER1} ${PORT} 0
+ ${resp} GetCars ${MEMBER1} ${PORT} 0
+ Should Be Equal As Strings ${resp.status_code} 404
+
+
+Delete all people
+ [Documentation] Delete all the people from the system
+ ${resp} DeleteAllPersons ${MEMBER1} ${PORT} 0
+ ${resp} GetPersons ${MEMBER1} ${PORT} 0
+ Should Be Equal As Strings ${resp.status_code} 404
+
+Add 200 cars
+ [Documentation] Add 200 cars
+ ${resp} AddCar ${MEMBER1} ${PORT} ${200}
+ Should Be Equal As Strings ${resp.status_code} 204
+
+Add 200 people
+ [Documentation] Add 200 people
+ ${resp} AddPerson ${MEMBER1} ${PORT} ${0}
+ ${resp} AddPerson ${MEMBER1} ${PORT} ${200}
+ Should Be Equal As Strings ${resp.status_code} 204
+
+Add Car Person mapping
+ [Documentation] Add Car Persons
+ ${resp} AddCarPerson ${MEMBER1} ${PORT} ${0}
+ ${resp} BuyCar ${MEMBER1} ${PORT} ${200}
+
+Stop the Leader
+ ${CAR_LEADER} GetLeader ${CAR_SHARD} ${3} ${3} ${1} 8181 ${MEMBER1} ${MEMBER2} ${MEMBER3}
+ Set Suite Variable ${CAR_LEADER}
+ Stopcontroller ${CAR_LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 30
+ KillController ${CAR_LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+
+Get all the cars from Follower 1
+ ${followers} GetFollowers ${CAR_SHARD} ${3} ${3} ${1} 8181 ${MEMBER1} ${MEMBER2} ${MEMBER3}
+ ${resp} Getcars ${followers[0]} ${PORT} ${0}
+ Should Be Equal As Strings ${resp.status_code} 200
+ Should Contain ${resp.content} manufacturer1
+
+Restart the Leader
+ Startcontroller ${CAR_LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 120
+
+Get all the cars from Leader
+ ${resp} Getcars ${CAR_LEADER} ${PORT} ${0}
+ Should Be Equal As Strings ${resp.status_code} 200
+ Should Contain ${resp.content} manufacturer1
+
+Cleanup All Controllers
+ [Documentation] Stop all the controllers in the cluster
+ Stopcontroller ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 30
+
+
+
--- /dev/null
+*** Settings ***
+Documentation This test kills any of the followers and verifies that when that follower is restarted it can join the cluster
+Library Collections
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/SettingsLibrary.py
+Library ../../../libraries/UtilLibrary.py
+Library ../../../libraries/ClusterStateLibrary.py
+Variables ../../../variables/Variables.py
+
+*** Variables ***
+${REST_CONTEXT} /restconf/config/
+${KARAF_HOME} /root/odl/dist
+${USER_NAME} root
+${PASSWORD} Ecp123
+${CAR_SHARD} shard-car-config
+
+*** Test Cases ***
+Stop All Controllers
+ [Documentation] Stop all the controllers in the cluster
+ Stopcontroller ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 30
+ KillController ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ KillController ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ KillController ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+
+
+Clean All Journals
+ [Documentation] Clean the journals of all the controllers in the cluster
+ CleanJournal ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ CleanJournal ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ CleanJournal ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 5
+
+Start All Controllers
+ [Documentation] Start all the controllers in the cluster
+ Startcontroller ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Startcontroller ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Startcontroller ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 120
+
+Delete all cars
+ [Documentation] Delete all the cars from the system
+ ${resp} DeleteAllCars ${MEMBER1} ${PORT} 0
+ ${resp} GetCars ${MEMBER1} ${PORT} 0
+ Should Be Equal As Strings ${resp.status_code} 404
+
+
+Delete all people
+ [Documentation] Delete all the people from the system
+ ${resp} DeleteAllPersons ${MEMBER1} ${PORT} 0
+ ${resp} GetPersons ${MEMBER1} ${PORT} 0
+ Should Be Equal As Strings ${resp.status_code} 404
+
+Add 200 cars
+ [Documentation] Add 200 cars
+ ${resp} AddCar ${MEMBER1} ${PORT} ${200}
+ Should Be Equal As Strings ${resp.status_code} 204
+
+Add 200 people
+ [Documentation] Add 200 people
+ ${resp} AddPerson ${MEMBER1} ${PORT} ${0}
+ ${resp} AddPerson ${MEMBER1} ${PORT} ${200}
+ Should Be Equal As Strings ${resp.status_code} 204
+
+Add Car Person mapping
+ [Documentation] Add Car Persons
+ ${resp} AddCarPerson ${MEMBER1} ${PORT} ${0}
+ ${resp} BuyCar ${MEMBER1} ${PORT} ${200}
+
+Stop one of the followers
+ ${followers} GetFollowers ${CAR_SHARD} ${3} ${3} ${1} 8181 ${MEMBER1} ${MEMBER2} ${MEMBER3}
+ ${CAR_FOLLOWER} Set Variable ${followers[0]}
+ Set Suite Variable ${CAR_FOLLOWER}
+ Stopcontroller ${CAR_FOLLOWER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 30
+ KillController ${CAR_FOLLOWER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+
+Get all the cars from the other Follower
+ ${followers} GetFollowers ${CAR_SHARD} ${3} ${3} ${1} 8181 ${MEMBER1} ${MEMBER2} ${MEMBER3}
+ ${resp} Getcars ${followers[0]} ${PORT} ${0}
+ Should Be Equal As Strings ${resp.status_code} 200
+ Should Contain ${resp.content} manufacturer1
+
+Restart the Stopped Follower
+ Startcontroller ${CAR_FOLLOWER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 120
+
+Get all the cars from Stopped Follower
+ ${resp} Getcars ${CAR_FOLLOWER} ${PORT} ${0}
+ Should Be Equal As Strings ${resp.status_code} 200
+ Should Contain ${resp.content} manufacturer1
+
+Cleanup All Controllers
+ [Documentation] Stop all the controllers in the cluster
+ Stopcontroller ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 30
+
+
+
*** Settings ***
-Documentation Test suite for MD-SAL NSF
+Documentation Test suite for Clustering Datastore
Library SSHLibrary
+++ /dev/null
-*** Settings ***
-Documentation Run this test after running test no 05
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/UtilLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
-
-*** Variables ***
-${SHARD} shard-car-config
-
-*** Test Cases ***
-Stop Leader
- [Documentation] find new leader
- ${FOLLOWERS} GetFollowers ${SHARD} ${3} ${3} ${2} ${8181} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
- Log ${FOLLOWERS}
- ${LAST_FOLLOWER} Set Variable ${FOLLOWERS[0]}
- Set Suite Variable ${LAST_FOLLOWER}
- ${NEW_LEADER} GetLeader ${SHARD} ${3} ${3} ${2} ${8181} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
- Log ${NEW_LEADER}
- Stopcontroller ${NEW_LEADER} ${USERNAME} ${PASSWORD} ${KARAFHOME}
- Sleep 30
-
-
-Get cars from last follower
- [Documentation] get cars from last follower
- ${resp} Getcars ${LAST_FOLLOWER} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 500
-
-Add cars and get cars from last follower
- [Documentation] Add 80 cars and get added cars from last follower
- ${resp} AddCar ${LAST_FOLLOWER} ${PORT} ${80}
- Should Be Equal As Strings ${resp.status_code} 500
+++ /dev/null
-*** Settings ***
-Documentation Test suite for testing Distributed Datastore main operations performed from follower2
-
-Library Collections
-Library ../../../../libraries/RequestsLibrary.py
-Library ../../../../libraries/Common.py
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/SettingsLibrary.py
-Library ../../../../libraries/UtilLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
-Variables ../../../../variables/Variables.py
-
-*** Variables ***
-${REST_CONTEXT} /restconf/config/
-${SHARD_CAR_NAME} shard-car-config
-${SHARD_PEOPLE_NAME} shard-people-config
-${SHARD_CAR_PERSON_NAME} shard-car-people-config
-
-
-*** Test Cases ***
-Add cars and get cars from Follower2
- [Documentation] Add 100 cars and get added cars from Follower2
- ${FOLLOWERS} GetFollowers ${SHARD_CAR_PERSON_NAME} ${3} ${3} ${1} ${PORT} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
- Log ${FOLLOWERS}
- SET SUITE VARIABLE ${FOLLOWERS}
-
- ${resp} AddCar ${FOLLOWERS[1]} ${PORT} ${100}
- Sleep 1
- ${resp} Getcars ${FOLLOWERS[1]} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} manufacturer1
-
-Add persons and get persons from Follower2
- [Documentation] Add 100 persons and get persons from Follower2
- [Documentation] Note: There should be one person added first to enable rpc
- ${resp} AddPerson ${FOLLOWERS[1]} ${PORT} ${0}
- ${resp} AddPerson ${FOLLOWERS[1]} ${PORT} ${100}
- Sleep 1
- ${resp} GetPersons ${FOLLOWERS[1]} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} user5
-
-Add car-person mapping and get car-person mapping from Follower2
- [Documentation] Add car-person and get car-person from Follower2
- [Documentation] Note: This is done to enable working of rpc
-
- ${resp} AddCarPerson ${FOLLOWERS[1]} ${PORT} ${0}
- ${resp} GetCarPersonMappings ${FOLLOWERS[1]} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} user0
-
-Purchase 100 cars using Follower1
- [Documentation] Purchase 100 cars using Follower2
-
- ${resp} BuyCar ${FOLLOWERS[1]} ${PORT} ${100}
- Sleep 1
- ${resp} GetCarPersonMappings ${FOLLOWERS[1]} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
-
-Get car-person mappings using Follower2
- [Documentation] Get car-person mappings using follower2 to see 100 entry
- ${resp} GetCarPersonMappings ${FOLLOWERS[1]} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} user100
- Should Contain ${resp.content} user5
-
-Get car-person mappings using Leader
- [Documentation] Get car-person mappings using Leader to see 100 entry
- ${CURRENT_CAR_LEADER} GetLeader ${SHARD_CAR_PERSON_NAME} ${3} ${3} ${1} ${PORT} ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
- Log ${CURRENT_CAR_LEADER}
- Sleep 1
- ${resp} GetCarPersonMappings ${CURRENT_CAR_LEADER} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} user100
-
-Get car-person mappings using Follower1
- [Documentation] Get car-person mappings using Follower1 to see 100 entry
- ${resp} GetCarPersonMappings ${FOLLOWERS[0]} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} user0
- Should Contain ${resp.content} user100
+++ /dev/null
-*** Settings ***
-Documentation Test suite for RESTCONF RPC CAR PERSON
-Library Collections
-Library ../../../../libraries/RequestsLibrary.py
-Library ../../../../libraries/Common.py
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/SettingsLibrary.py
-Library ../../../../libraries/UtilLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
-Variables ../../../../variables/Variables.py
-
-*** Variables ***
-${REST_CONTEXT} /restconf/config/
-${KARAF_HOME} /root/odl/dist
-${USER_NAME} root
-${PASSWORD} Ecp123
-${CAR_SHARD} shard-car-config
-
-*** Test Cases ***
-Stop All Controllers
- [Documentation] Stop all the controllers in the cluster
- Stopcontroller ${LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Stopcontroller ${FOLLOWER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Stopcontroller ${FOLLOWER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 30
- KillController ${LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- KillController ${FOLLOWER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- KillController ${FOLLOWER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
-
-
-Clean All Journals
- [Documentation] Clean the journals of all the controllers in the cluster
- CleanJournal ${LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- CleanJournal ${FOLLOWER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- CleanJournal ${FOLLOWER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 5
-
-Start All Controllers
- [Documentation] Start all the controllers in the cluster
- Startcontroller ${LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Startcontroller ${FOLLOWER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Startcontroller ${FOLLOWER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 120
-
-Delete all cars
- [Documentation] Delete all the cars from the system
- ${resp} DeleteAllCars ${LEADER} ${PORT} 0
- ${resp} GetCars ${LEADER} ${PORT} 0
- Should Be Equal As Strings ${resp.status_code} 404
-
-
-Delete all people
- [Documentation] Delete all the people from the system
- ${resp} DeleteAllPersons ${LEADER} ${PORT} 0
- ${resp} GetPersons ${LEADER} ${PORT} 0
- Should Be Equal As Strings ${resp.status_code} 404
-
-Add 20000 cars
- [Documentation] Add 200 cars
- ${resp} AddCar ${LEADER} ${PORT} ${200}
- Should Be Equal As Strings ${resp.status_code} 204
-
-Add 20000 people
- [Documentation] Add 200 people
- ${resp} AddPerson ${LEADER} ${PORT} ${0}
- ${resp} AddPerson ${LEADER} ${PORT} ${200}
- Should Be Equal As Strings ${resp.status_code} 204
-
-Add Car Person mapping
- [Documentation] Add Car Persons
- ${resp} AddCarPerson ${LEADER} ${PORT} ${0}
- ${resp} BuyCar ${LEADER} ${PORT} ${200}
-
-Stop the Leader
- ${CAR_LEADER} GetLeader ${CAR_SHARD} ${3} ${3} ${1} 8181 ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
- Set Suite Variable ${CAR_LEADER}
- Stopcontroller ${CAR_LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 30
- KillController ${CAR_LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
-
-Get all the cars from Follower 1
- ${followers} GetFollowers ${CAR_SHARD} ${3} ${3} ${1} 8181 ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
- ${resp} Getcars ${followers[0]} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} manufacturer1
-
-Restart the Leader
- Startcontroller ${CAR_LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 120
-
-Get all the cars from Leader
- ${resp} Getcars ${CAR_LEADER} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} manufacturer1
-
-Cleanup All Controllers
- [Documentation] Stop all the controllers in the cluster
- Stopcontroller ${LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Stopcontroller ${FOLLOWER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Stopcontroller ${FOLLOWER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 30
-
-
-
+++ /dev/null
-*** Settings ***
-Documentation Test suite for RESTCONF RPC CAR PERSON
-Library Collections
-Library ../../../../libraries/RequestsLibrary.py
-Library ../../../../libraries/Common.py
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/SettingsLibrary.py
-Library ../../../../libraries/UtilLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
-Variables ../../../../variables/Variables.py
-
-*** Variables ***
-${REST_CONTEXT} /restconf/config/
-${KARAF_HOME} /root/odl/dist
-${USER_NAME} root
-${PASSWORD} Ecp123
-${CAR_SHARD} shard-car-config
-
-*** Test Cases ***
-Stop All Controllers
- [Documentation] Stop all the controllers in the cluster
- Stopcontroller ${LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Stopcontroller ${FOLLOWER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Stopcontroller ${FOLLOWER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 30
- KillController ${LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- KillController ${FOLLOWER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- KillController ${FOLLOWER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
-
-
-Clean All Journals
- [Documentation] Clean the journals of all the controllers in the cluster
- CleanJournal ${LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- CleanJournal ${FOLLOWER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- CleanJournal ${FOLLOWER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 5
-
-Start All Controllers
- [Documentation] Start all the controllers in the cluster
- Startcontroller ${LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Startcontroller ${FOLLOWER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Startcontroller ${FOLLOWER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 120
-
-Delete all cars
- [Documentation] Delete all the cars from the system
- ${resp} DeleteAllCars ${LEADER} ${PORT} 0
- ${resp} GetCars ${LEADER} ${PORT} 0
- Should Be Equal As Strings ${resp.status_code} 404
-
-
-Delete all people
- [Documentation] Delete all the people from the system
- ${resp} DeleteAllPersons ${LEADER} ${PORT} 0
- ${resp} GetPersons ${LEADER} ${PORT} 0
- Should Be Equal As Strings ${resp.status_code} 404
-
-Add 20000 cars
- [Documentation] Add 200 cars
- ${resp} AddCar ${LEADER} ${PORT} ${200}
- Should Be Equal As Strings ${resp.status_code} 204
-
-Add 20000 people
- [Documentation] Add 200 people
- ${resp} AddPerson ${LEADER} ${PORT} ${0}
- ${resp} AddPerson ${LEADER} ${PORT} ${200}
- Should Be Equal As Strings ${resp.status_code} 204
-
-Add Car Person mapping
- [Documentation] Add Car Persons
- ${resp} AddCarPerson ${LEADER} ${PORT} ${0}
- ${resp} BuyCar ${LEADER} ${PORT} ${200}
-
-Stop of of the followers
- ${followers} GetFollowers ${CAR_SHARD} ${3} ${3} ${1} 8181 ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
- ${CAR_FOLLOWER} Set Variable ${followers[0]}
- Set Suite Variable ${CAR_FOLLOWER}
- Stopcontroller ${CAR_FOLLOWER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 30
- KillController ${CAR_FOLLOWER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
-
-Get all the cars from the other Follower
- ${followers} GetFollowers ${CAR_SHARD} ${3} ${3} ${1} 8181 ${LEADER} ${FOLLOWER1} ${FOLLOWER2}
- ${resp} Getcars ${followers[0]} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} manufacturer1
-
-Restart the Stopped Follower
- Startcontroller ${CAR_FOLLOWER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 120
-
-Get all the cars from Stopped Follower
- ${resp} Getcars ${CAR_FOLLOWER} ${PORT} ${0}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} manufacturer1
-
-Cleanup All Controllers
- [Documentation] Stop all the controllers in the cluster
- Stopcontroller ${LEADER} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Stopcontroller ${FOLLOWER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Stopcontroller ${FOLLOWER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
- Sleep 30
-
-
-
--- /dev/null
+*** Settings ***
+Documentation Start the controllers
+Library Collections
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/SettingsLibrary.py
+Library ../../../libraries/UtilLibrary.py
+Variables ../../../variables/Variables.py
+
+*** Variables ***
+${REST_CONTEXT} /restconf/config/
+
+*** Test Cases ***
+Stop All Controllers
+ [Documentation] Stop all the controllers in the cluster
+ Stopcontroller ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Stopcontroller ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 30
+ KillController ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ KillController ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ KillController ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+
+
+Clean All Journals
+ [Documentation] Clean the journals of all the controllers in the cluster
+ CleanJournal ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ CleanJournal ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ CleanJournal ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 5
+
+Start All Controllers
+ [Documentation] Start all the controllers in the cluster
+ Startcontroller ${MEMBER1} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Startcontroller ${MEMBER2} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Startcontroller ${MEMBER3} ${USER_NAME} ${PASSWORD} ${KARAF_HOME}
+ Sleep 120
\ No newline at end of file
*** Settings ***
Documentation Test suite for Routed RPC.
Library Collections
-Library ../../../../libraries/RequestsLibrary.py
-Library ../../../../libraries/Common.py
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/SettingsLibrary.py
-Library ../../../../libraries/UtilLibrary.py
-Variables ../../../../variables/Variables.py
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/SettingsLibrary.py
+Library ../../../libraries/UtilLibrary.py
+Variables ../../../variables/Variables.py
*** Variables ***
${REST_CONTEXT} /restconf/config/
*** Test Cases ***
Add cars and get cars from Leader
[Documentation] Add 100 cars and get added cars from Leader
- ${resp} AddCar ${LEADER} ${PORT} ${100}
- ${resp} Getcars ${LEADER} ${PORT} ${0}
+ ${resp} AddCar ${MEMBER1} ${PORT} ${100}
+ ${resp} Getcars ${MEMBER1} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} manufacturer1
Add persons and get persons from Leader
[Documentation] Add 100 persons and get persons
[Documentation] Note: There should be one person added first to enable rpc
- ${resp} AddPerson ${LEADER} ${PORT} ${0}
- ${resp} AddPerson ${LEADER} ${PORT} ${100}
- ${resp} GetPersons ${LEADER} ${PORT} ${0}
+ ${resp} AddPerson ${MEMBER1} ${PORT} ${0}
+ ${resp} AddPerson ${MEMBER1} ${PORT} ${100}
+ ${resp} GetPersons ${MEMBER1} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user5
SLEEP 10
Add car-person mapping and get car-person mapping from Follower1
[Documentation] Add car-person and get car-person from Leader
[Documentation] Note: This is done to enable working of rpc
- ${resp} AddCarPerson ${FOLLOWER1} ${PORT} ${0}
- ${resp} GetCarPersonMappings ${FOLLOWER1} ${PORT} ${0}
+ ${resp} AddCarPerson ${MEMBER2} ${PORT} ${0}
+ ${resp} GetCarPersonMappings ${MEMBER2} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user0
SLEEP 5
Purchase 100 cars using Follower1
[Documentation] Purchase 100 cars using Follower1
- ${resp} BuyCar ${FOLLOWER1} ${PORT} ${100}
- ${resp} GetCarPersonMappings ${FOLLOWER1} ${PORT} ${0}
+ ${resp} BuyCar ${MEMBER2} ${PORT} ${100}
+ ${resp} GetCarPersonMappings ${MEMBER2} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Get Cars from Leader
[Documentation] Get 100 using Leader
- ${resp} Getcars ${LEADER} ${PORT} ${0}
+ ${resp} Getcars ${MEMBER1} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} manufacturer99
Get persons from Leader
[Documentation] Get 101 Persons from Leader
- ${resp} GetPersons ${LEADER} ${PORT} ${0}
+ ${resp} GetPersons ${MEMBER1} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user100
Get car-person mappings using Leader
[Documentation] Get 101 car-person mappings using Leader to see 100 entry
- ${resp} GetCarPersonMappings ${LEADER} ${PORT} ${0}
+ ${resp} GetCarPersonMappings ${MEMBER1} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user100
*** Settings ***
Documentation Test suite for Routed RPC.
Library Collections
-Library ../../../../libraries/RequestsLibrary.py
-Library ../../../../libraries/Common.py
-Library ../../../../libraries/CrudLibrary.py
-Library ../../../../libraries/SettingsLibrary.py
-Library ../../../../libraries/UtilLibrary.py
-Library ../../../../libraries/ClusterStateLibrary.py
-Variables ../../../../variables/Variables.py
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Library ../../../libraries/CrudLibrary.py
+Library ../../../libraries/SettingsLibrary.py
+Library ../../../libraries/UtilLibrary.py
+Library ../../../libraries/ClusterStateLibrary.py
+Variables ../../../variables/Variables.py
*** Variables ***
${REST_CONTEXT} /restconf/config/
*** Test Cases ***
Add cars and get cars from Leader
[Documentation] Add 100 cars and get added cars from Leader
- ${resp} AddCar ${LEADER} ${PORT} ${100}
- ${resp} Getcars ${LEADER} ${PORT} ${0}
+ ${resp} AddCar ${MEMBER1} ${PORT} ${100}
+ ${resp} Getcars ${MEMBER1} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} manufacturer1
Add persons and get persons from Leader
[Documentation] Add 100 persons and get persons
[Documentation] Note: There should be one person added first to enable rpc
- ${resp} AddPerson ${LEADER} ${PORT} ${0}
- ${resp} AddPerson ${LEADER} ${PORT} ${100}
- ${resp} GetPersons ${LEADER} ${PORT} ${0}
+ ${resp} AddPerson ${MEMBER1} ${PORT} ${0}
+ ${resp} AddPerson ${MEMBER1} ${PORT} ${100}
+ ${resp} GetPersons ${MEMBER1} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user5
Add car-person mapping and get car-person mapping from Follower1
[Documentation] Add car-person and get car-person from Follower1
[Documentation] Note: This is done to enable working of rpc
- ${resp} AddCarPerson ${FOLLOWER1} ${PORT} ${0}
- ${resp} GetCarPersonMappings ${FOLLOWER1} ${PORT} ${0}
+ ${resp} AddCarPerson ${MEMBER2} ${PORT} ${0}
+ ${resp} GetCarPersonMappings ${MEMBER2} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user0
Purchase 100 cars using Follower
[Documentation] Purchase 100 cars using Follower
SLEEP 10
- ${resp} BuyCar ${FOLLOWER1} ${PORT} ${100}
- ${resp} GetCarPersonMappings ${FOLLOWER1} ${PORT} ${0}
+ ${resp} BuyCar ${MEMBER2} ${PORT} ${100}
+ ${resp} GetCarPersonMappings ${MEMBER2} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Get Cars from Leader
[Documentation] Get 100 using Leader
- ${resp} Getcars ${LEADER} ${PORT} ${0}
+ ${resp} Getcars ${MEMBER1} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} manufacturer9
Get persons from Leader
[Documentation] Get 11 Persons from Leader
- ${resp} GetPersons ${LEADER} ${PORT} ${0}
+ ${resp} GetPersons ${MEMBER1} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user100
Get car-person mappings using Leader
[Documentation] Get car-person mappings using Leader to see 100 entry
- ${resp} GetCarPersonMappings ${LEADER} ${PORT} ${0}
+ ${resp} GetCarPersonMappings ${MEMBER1} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user100
Stop Leader
[Documentation] Stop Leader controller
- ${resp} Stopcontroller ${LEADER} root Ecp123 /opt/clustering/dist
+ ${resp} Stopcontroller ${MEMBER1} ${USERNAME} ${PASSWORD} ${KARAF_HOME}
SLEEP 30
- ${resp} Killcontroller ${LEADER} root Ecp123 /opt/clustering/dist
+ ${resp} Killcontroller ${MEMBER1} ${USERNAME} ${PASSWORD} ${KARAF_HOME}
Add cars and get cars from Follower1
[Documentation] Add 100 cars and get added cars from Follower
- ${resp} AddCar ${FOLLOWER1} ${PORT} ${100}
- ${resp} Getcars ${FOLLOWER1} ${PORT} ${0}
+ ${resp} AddCar ${MEMBER2} ${PORT} ${100}
+ ${resp} Getcars ${MEMBER2} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} manufacturer1
Add persons and get persons from Follower1
[Documentation] Add 100 persons and get persons
[Documentation] Note: There should be one person added first to enable rpc
- ${resp} AddPerson ${FOLLOWER1} ${PORT} ${0}
- ${resp} AddPerson ${FOLLOWER1} ${PORT} ${100}
- ${resp} GetPersons ${FOLLOWER1} ${PORT} ${0}
+ ${resp} AddPerson ${MEMBER2} ${PORT} ${0}
+ ${resp} AddPerson ${MEMBER2} ${PORT} ${100}
+ ${resp} GetPersons ${MEMBER2} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user5
SLEEP 10
Purchase 100 cars using Follower2
[Documentation] Purchase 100 cars using Follower2
- ${resp} BuyCar ${FOLLOWER2} ${PORT} ${100}
+ ${resp} BuyCar ${MEMBER3} ${PORT} ${100}
SLEEP 10
- ${resp} GetCarPersonMappings ${FOLLOWER2} ${PORT} ${0}
+ ${resp} GetCarPersonMappings ${MEMBER3} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Get Cars from Follower1
[Documentation] Get 100 using Follower1
- ${resp} Getcars ${FOLLOWER1} ${PORT} ${0}
+ ${resp} Getcars ${MEMBER2} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} manufacturer9
Get persons from Follower1
[Documentation] Get 11 Persons from Follower1
- ${resp} GetPersons ${FOLLOWER1} ${PORT} ${0}
+ ${resp} GetPersons ${MEMBER2} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user100
Get car-person mappings using Follower1
[Documentation] Get car-person mappings using Follower1 to see 100 entry
- ${resp} GetCarPersonMappings ${FOLLOWER1} ${PORT} ${0}
+ ${resp} GetCarPersonMappings ${MEMBER2} ${PORT} ${0}
Should Be Equal As Strings ${resp.status_code} 200
Should Contain ${resp.content} user100
Start Leader
[Documentation] Start Leader controller
- ${resp} Startcontroller ${LEADER} root Ecp123 /opt/clustering/dist
+ ${resp} Startcontroller ${MEMBER1} ${USERNAME} ${PASSWORD} ${KARAF_HOME}
SLEEP 20
*** Variables ***
${FILE} ${CURDIR}/../../../variables/xmls/netconf.xml
-${REST_CONT_CONF} /restconf/config/opendaylight-inventory:nodes
+${REST_CONT_CONF} /restconf/config/opendaylight-inventory:nodes
${REST_CONT_OPER} /restconf/operational/opendaylight-inventory:nodes
-${REST_NTPR_CONF} node/controller-config/yang-ext:mount/config:modules/config:module/netopeer
+${REST_NTPR_CONF} node/controller-config/yang-ext:mount/config:modules
${REST_NTPR_MOUNT} node/netopeer/yang-ext:mount/
*** Test Cases ***
[Documentation] Add NetConf device using REST
[Tags] netconf
${XML1} Get File ${FILE}
- ${XML2} Replace String 127.0.0.1 ${MININET} ${XML1}
- ${body} Replace String mininet ${MININET_USER} ${XML2}
+ ${XML2} Replace String ${XML1} 127.0.0.1 ${MININET}
+ ${body} Replace String ${XML2} mininet ${MININET_USER}
Log ${body}
- ${resp} Putxml session ${REST_CONT_CONF}/${REST_NTPR_CONF} data=${body}
+ ${resp} Post session ${REST_CONT_CONF}/${REST_NTPR_CONF} data=${body}
Log ${resp.content}
- Should Be Equal As Strings ${resp.status_code} 200
+ Should Be Equal As Strings ${resp.status_code} 204
Get Controller Inventory
[Documentation] Get Controller operational inventory
[Tags] netconf
- ${resp} Get session ${REST_CONT_OPER}
- Log ${resp.content}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} "id":"netopeer"
- Should Contain ${resp.content} "netconf-node-inventory:connected":true
- Should Contain ${resp.content} "netconf-node-inventory:initial-capability"
+ Wait Until Keyword Succeeds 10s 2s Get Inventory
Pull External Device configuration
[Documentation] Pull Netopeer configuration
[Tags] netconf
- Wait Until Keyword Succeeds 10s 2s Pull Config
+ ${resp} Get session ${REST_CONT_CONF}/${REST_NTPR_MOUNT}
+ Log ${resp.content}
+ Should Be Equal As Strings ${resp.status_code} 200
+ Should Contain ${resp.content} {}
Verify Device Operational data
[Documentation] Verify Netopeer operational data
Should Contain ${resp.content} datastores
*** Keywords ***
-Pull Config
- ${resp} Get session ${REST_CONT_CONF}/${REST_NTPR_MOUNT}
+Get Inventory
+ ${resp} Get session ${REST_CONT_OPER}
Log ${resp.content}
Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} {}
-
+ Should Contain ${resp.content} "id":"netopeer"
+ Should Contain ${resp.content} "netconf-node-inventory:connected":true
+ Should Contain ${resp.content} "netconf-node-inventory:initial-capability"
*** Test Cases ***
Get Token With Valid Username And Password
[Documentation] Sanity test to ensure default user/password can get a token
- ${auth_data}= Create Auth Data ${USER} ${PWD}
- ${resp}= AAA Login ${CONTROLLER} ${auth_data}
- ${auth_token}= Extract Value From Content ${resp.content} /access_token strip
+ ${auth_token}= Get Auth Token
Should Be String ${auth_token}
Log Token: ${auth_token}
- Should Be Equal As Strings ${resp.status_code} 201
Validate Token Format ${auth_token}
Fail To Get Token With Invalid Username And Password
Create Token with Client Authorization
[Documentation] Get a token using client domain
- ${auth_data}= Create Auth Data ${USER} ${PWD} ${SCOPE} dlux secrete
- ${resp}= AAA Login ${CONTROLLER} ${auth_data}
- ${auth_token}= Extract Value From Content ${resp.content} /access_token strip
+ ${auth_token}= Get Auth Token ${USER} ${PWD} ${SCOPE} dlux secrete
Should Be String ${auth_token}
Log Token: ${auth_token}
- Should Be Equal As Strings ${resp.status_code} 201
Validate Token Format ${auth_token}
Token Authentication In REST Request
[Documentation] Use a token to make a successful REST transaction
- ${auth_data}= Create Auth Data ${USER} ${PWD}
- ${resp}= AAA Login ${CONTROLLER} ${auth_data}
- ${auth_token}= Extract Value From Content ${resp.content} /access_token strip
- Create Session ODL_SESSION http://${CONTROLLER}:8181
- ${headers}= Create Dictionary Content-Type application/x-www-form-urlencoded
- Set To Dictionary ${headers} Authorization Bearer ${auth_token}
- ${resp}= RequestsLibrary.GET ODL_SESSION ${OPERATIONAL_NODES_API} headers=${headers}
- Log STATUS_CODE: ${resp.status_code} CONTENT: ${resp.content}
- Should Be Equal As Strings ${resp.status_code} 200
- Should Contain ${resp.content} nodes
+ ${auth_token}= Get Auth Token
+ Make REST Transaction 200 ${auth_token}
-Revoke Token
+Revoke Token And Verify Transaction Fails
[Documentation] negative test to revoke valid token and check that REST transaction fails
- ${auth_data}= Create Auth Data ${USER} ${PWD}
- ${resp}= AAA Login ${CONTROLLER} ${auth_data}
- ${auth_token}= Extract Value From Content ${resp.content} /access_token strip
+ ${auth_token}= Get Auth Token
+ Make REST Transaction 200 ${auth_token}
+ Revoke Auth Token ${auth_token}
+ Make REST Transaction 401 ${auth_token}
+
+Disable Authentication And Re-Enable Authentication
+ [Documentation] Toggles authentication off and verifies that no login credentials are needed for REST transactions
+ Disable Authentication On Controller ${CONTROLLER}
+ Wait Until Keyword Succeeds 10s 1s Make REST Transaction 200
+ Enable Authentication On Controller ${CONTROLLER}
+ Wait Until Keyword Succeeds 10s 1s Validate That Authentication Fails With Wrong Token
+ ${auth_token}= Get Auth Token
+ Make REST Transaction 200 ${auth_token}
+
+*** Keywords ***
+Validate That Authentication Fails With Wrong Token
+ ${bad_token}= Set Variable notARealToken
+ Make REST Transaction 401 ${bad_token}
+
+Make REST Transaction
+ [Arguments] ${expected_status_code} ${auth_data}=${EMPTY}
Create Session ODL_SESSION http://${CONTROLLER}:8181
${headers}= Create Dictionary Content-Type application/x-www-form-urlencoded
- Set To Dictionary ${headers} Authorization Bearer ${auth_token}
+ Run Keyword If "${auth_data}" != "${EMPTY}" Set To Dictionary ${headers} Authorization Bearer ${auth_data}
${resp}= RequestsLibrary.GET ODL_SESSION ${OPERATIONAL_NODES_API} headers=${headers}
Log STATUS_CODE: ${resp.status_code} CONTENT: ${resp.content}
- Should Be Equal As Strings ${resp.status_code} 200
+ Should Be Equal As Strings ${resp.status_code} ${expected_status_code}
Should Contain ${resp.content} nodes
- ${headers}= Create Dictionary Content-Type application/x-www-form-urlencoded
- ${resp}= RequestsLibrary.POST ODL_SESSION ${REVOKE_TOKEN_API} data=${auth_token} headers=${headers}
- Should Be Equal As Strings ${resp.status_code} 204
- Set To Dictionary ${headers} Authorization Bearer ${auth_token}
- ${resp}= RequestsLibrary.GET ODL_SESSION ${OPERATIONAL_NODES_API} headers=${headers}
- Log STATUS_CODE: ${resp.status_code} CONTENT: ${resp.content}
- Should Be Equal As Strings ${resp.status_code} 401
-*** Keywords ***
Credential Authentication Suite Setup
Log Suite Setup
</ethernet-source>
</ethernet-match>
<arp-op>1</arp-op>
- <arp-source-transport-address>192.168.4.1</arp-source-transport-address>
- <arp-target-transport-address>10.21.22.23</arp-target-transport-address>
+ <arp-source-transport-address>192.168.4.1/32</arp-source-transport-address>
+ <arp-target-transport-address>10.21.22.23/32</arp-target-transport-address>
</match>
<cookie>13</cookie>
<flow-name>FooXf13</flow-name>
<priority>13</priority>
<barrier>false</barrier>
-</flow>
\ No newline at end of file
+</flow>
</ethernet-source>
</ethernet-match>
<arp-op>1</arp-op>
- <arp-source-transport-address>192.168.4.1</arp-source-transport-address>
- <arp-target-transport-address>10.21.22.23</arp-target-transport-address>
+ <arp-source-transport-address>192.168.4.1/32</arp-source-transport-address>
+ <arp-target-transport-address>10.21.22.23/32</arp-target-transport-address>
<arp-source-hardware-address>
<address>12:34:56:78:98:AB</address>
</arp-source-hardware-address>
<flow-name>FooXf14</flow-name>
<priority>14</priority>
<barrier>false</barrier>
-</flow>
\ No newline at end of file
+</flow>
<type>2048</type>
</ethernet-type>
</ethernet-match>
- <ipv4-source>10.0.0.1</ipv4-source>
+ <ipv4-source>10.0.0.1/32</ipv4-source>
</match>
<cookie>2</cookie>
<flow-name>FooXf2</flow-name>
<priority>2</priority>
<barrier>false</barrier>
-</flow>
\ No newline at end of file
+</flow>
-<module>
+<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">prefix:sal-netconf-connector</type>
<name>netopeer</name>
<address xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">127.0.0.1</address>
*.swm
*.csv
logs/
+.vagrant
- [Usage Details: loop_wcbench.sh](#user-content-usage-details-loop_wcbenchsh)
- [Usage Details: stats.py](#user-content-usage-details-statspy)
- [WCBench Results](#user-content-wcbench-results)
- - [Detailed Walkthrough](#user-content-detailed-walkthrough)
+ - [Detailed Walkthrough: Vagrant](#user-content-detailed-walkthrough-vagrant)
+ - [Detailed Walkthrough: Manual](#user-content-detailed-walkthrough-manual)
- [Contributing](#user-content-contributing)
- [Contact](#user-content-contact)
CBench is a somewhat classic SDN controller benchmark tool. It blasts a controller with OpenFlow packet-in messages and counts the rate of flow mod messages returned. WCBench consumes CBench as a library, then builds a robust test automation, stats collection and stats analysis/graphing system around it.
-WCBench currently only supports the OpenDaylight SDN controller, but it would be fairly easy to add support for other controllers. Community contributions are encouraged!
+WCBench currently only supports the Helium release of the OpenDaylight SDN controller, but it would be fairly easy to add support for other controllers. Community contributions are encouraged!
### Usage
OPTIONS:
-h Show this message
+ -v Output verbose debug info
-c Install CBench
-t <time> Run CBench for given number of minutes
-r Run CBench against OpenDaylight
- -i Install ODL from last successful build
+ -i Install OpenDaylight Helium 0.2.1
-p <processors> Pin ODL to given number of processors
- -o Run ODL from last successful build
+ -o Start and configure OpenDaylight Helium 0.2.1
-k Kill OpenDaylight
-d Delete local ODL and CBench code
```
OPTIONS:
-h Show this help message
+ -v Output verbose debug info
-l Loop WCBench runs without restarting ODL
-r Loop WCBench runs, restart ODL between runs
-t <time> Run WCBench for a given number of minutes
As you likely know, `ssh-copy-id` can help you setup your system to connect with the remote box via public key crypto. If you don't have keys setup for public key crypto, google for guides (very out of scope). Finally, note that the `SSH_HOSTNAME` var in `wcbench.sh` must be set to the exact same value given on the `Host` line above.
* Trivially installing/configuring ODL from the last successful build (via an Integration team Jenkins job).
* Pinning the OpenDaylight process to a given number of CPU cores. This is useful for ensuring that ODL is properly pegged, working as hard as it can with given resource limits. It can also expose bad ODL behavior that comes about when the process is pegged.
-* Running OpenDaylight and issuing all of the required configurations. Note that the `ODL_STARTUP_DELAY` variable in `wcbench.sh` might need some attention when running on a new system. If ODL takes longer than this value (in seconds) to start, `wcbench.sh` will attempt to issue the required configuration via telnet to the OSGi console before ODL can accept the configuration changes. This will result in fairly obvious error messages dumped to stdout. If you see these, increase the `ODL_STARTUP_DELAY` time. Alternatively, you can manually issue the required configuration after ODL starts by connecting to the OSGi console via telnet and issuing `dropAllPacketsRpc on`. See the `issue_odl_config` function in `wcbench.sh` for more info. Note that there's an open issue to make this config process more robust ([Issue #6](issue_odl_config)). Community contributions solicited!
+* Running OpenDaylight and issuing all of the required configurations.
* Stopping the OpenDaylight process. This is done cleanly via the `run.sh` script, not `kill` or `pkill`.
* Cleaning up everything changed by the `wcbench.sh` script, including deleting ODL and CBench sources and binaries.
```
```
-# Command for graphs of flows/sec and used RAM stats
+# All stats
+./stats.py -S
+{'fifteen_load': {'max': 0,
+ 'mean': 0.62,
+ 'min': 0,
+ 'relstddev': 0.0,
+ 'stddev': 0.0},
+ 'five_load': {'max': 0,
+ 'mean': 0.96,
+ 'min': 0,
+ 'relstddev': 0.0,
+ 'stddev': 0.0},
+ 'flows': {'max': 22384,
+ 'mean': 22384.52,
+ 'min': 22384,
+ 'relstddev': 0.0,
+ 'stddev': 0.0},
+ 'iowait': {'max': 0, 'mean': 0.0, 'min': 0, 'relstddev': 0.0, 'stddev': 0.0},
+ 'one_load': {'max': 0,
+ 'mean': 0.85,
+ 'min': 0,
+ 'relstddev': 0.0,
+ 'stddev': 0.0},
+ 'runtime': {'max': 120,
+ 'mean': 120.0,
+ 'min': 120,
+ 'relstddev': 0.0,
+ 'stddev': 0.0},
+ 'sample_size': 1,
+ 'steal_time': {'max': 0,
+ 'mean': 0.0,
+ 'min': 0,
+ 'relstddev': 0.0,
+ 'stddev': 0.0},
+ 'used_ram': {'max': 3657,
+ 'mean': 3657.0,
+ 'min': 3657,
+ 'relstddev': 0.0,
+ 'stddev': 0.0}}
+```
+
+```
+# Create graphs of flows/sec and used RAM stats
./stats.py -g flows ram
```
* The iowait value at the start of the test on the system running ODL
* The iowait value at the end of the test on the system running ODL
-### Detailed Walkthrough
+### Detailed Walkthrough: Vagrant
+
+A Vagrantfile is provided for WCBench, which allows you to get an OpenDaylight+WCBench environment up-and-running trivially easily. Vagrant also allows folks on otherwise unsupported operating systems (Ubuntu, Debian, Windows) to use WCBench.
+
+If you don't have Vagrant installed already, head over to [their docs](https://docs.vagrantup.com/v2/installation/) and get that knocked out.
+
+If you haven't already, you'll need to clone the WCBench repo:
+
+```
+[~]$ git clone https://github.com/dfarrell07/wcbench.git
+```
+
+You can now trivially stand up a VM with OpenDaylight+CBench+WCBench properly configured:
+
+```
+[~/wcbench]$ vagrant up
+```
+
+If this is your first time using the `chef/fedora-20` Vagrant box, that'll have to download. Future `vagrant up`s will use a locally cached version. Once the box is provisioned, you can connect to it like this:
+
+```
+[~/wcbench]$ vagrant ssh
+Last login: Mon Nov 17 14:29:33 2014 from 10.0.2.2
+[vagrant@localhost ~]$
+```
+
+WCBench, OpenDaylight and CBench are already installed and configured. You can start OpenDaylight like this:
+
+```
+[vagrant@localhost ~]$ cd wcbench/
+[vagrant@localhost wcbench]$ ./wcbench.sh -o
+Starting OpenDaylight
+Will repeatedly attempt connecting to Karaf shell until it's ready
+Issued `dropAllPacketsRpc on` command via Karaf shell to localhost:8101
+Issued `log:set ERROR` command via Karaf shell to localhost:8101
+```
+
+Run CBench against OpenDaylight like this:
+
+```
+[vagrant@localhost wcbench]$ ./wcbench.sh -r
+Collecting pre-test stats
+Running CBench against ODL on localhost:6633
+Collecting post-test stats
+Collecting time-irrelevant stats
+Average responses/second: 29486.95
+```
+
+Since the WCBench Vagrant box is headless, you'll want to move the `results.txt` to a system with a GUI for graphing.
+
+Vagrant hard-links `/home/vagrant/wcbench/` to the directory on your local system that contains WCBench's Vagrantfile. Dropping `results.txt` in `/home/vagrant/wcbench/` will therefore move it to your local system for analysis. You can also modify the `RESULTS_FILE` variable in `wcbench.sh` to point at `/home/vagrant/wcbench/`, if you'd like to put it there by default.
+
+```
+# Move results.txt to hard-linked dir
+[vagrant@localhost wcbench]$ mv ../results.csv .
+```
+
+```
+# Configure wcbench to create results.txt in hard-linked dir
+RESULTS_FILE=$BASE_DIR/wcbench/"results.csv"
+```
+
+You can now generate graphs and stats, as described in the [Usage Details: stats.py](#user-content-usage-details-statspy) section.
+
+To run long batches of tests, use `loop_wcbench.sh`, as described in [Usage Details: loop_wcbench.sh](#user-content-usage-details-loop_wcbenchsh).
+
+Once you're done, you can kill OpenDaylight like this:
+
+```
+[vagrant@localhost wcbench]$ ./wcbench.sh -k
+Stopping OpenDaylight
+```
+
+Unless you want a fresh WCBench Vagrant box, you can save yourself some time at your next `vagrant up` by suspending (instead of destroying) the box:
+
+```
+# On my local system
+[~/wcbench]$ vagrant suspend
+==> default: Saving VM state and suspending execution...
+```
+
+### Detailed Walkthrough: Manual
This walkthrough describes how to setup a system for WCBench testing, starting with a totally fresh [Fedora 20 Cloud](http://fedoraproject.org/get-fedora#clouds) install. I'm going to leave out the VM creation details for the sake of space. As long as you can SSH into the machine and it has access to the Internet, all of the following should work as-is. Note that this process has also been tested on CentOS 6.5 (so obviously should work on RHEL).
```
[~]$ ssh wcbench
Warning: Permanently added '10.3.9.110' (RSA) to the list of known hosts.
-[fedora@dfarrell-wcbench ~]$
+[fedora@dfarrell-wcbench ~]$
```
You'll need a utility like screen or tmux, so you can start long-running tests, log out of the system and leave them running. My Linux configurations are very scripted, so here's how I install tmux and its configuration file. You're welcome to copy this.
[fedora@dfarrell-wcbench wcbench]$ ./wcbench.sh -ci
CBench is not installed
Installing CBench dependencies
-Cloning CBench repo
-Cloning openflow source code
+Cloning CBench repo into /home/fedora/oflops
+Cloning openflow source code into /home/fedora/openflow
Building oflops/configure file
Building CBench
CBench is installed
Successfully installed CBench
Installing OpenDaylight dependencies
-Downloading last successful ODL build
-Unzipping last successful ODL build
-Downloading openflowplugin
-Removing simpleforwarding plugin
-Removing arphandler plugin
+Downloading OpenDaylight Helium 0.2.1
+Unzipping OpenDaylight Helium 0.2.1
+odl-openflowplugin-flow-services added to features installed at boot
+odl-openflowplugin-drop-test added to features installed at boot
```
Huzzah! You now have CBench and OpenDaylight installed/configured.
```
[fedora@dfarrell-wcbench wcbench]$ ./wcbench.sh -o
Starting OpenDaylight
-Giving ODL 90 seconds to get up and running
-80 seconds remaining
-70 seconds remaining
-60 seconds remaining
-50 seconds remaining
-40 seconds remaining
-30 seconds remaining
-20 seconds remaining
-10 seconds remaining
-0 seconds remaining
-Installing telnet, as it's required for issuing ODL config.
-Issuing `dropAllPacketsRpc on` command via telnet to localhost:2400
-Trying ::1...
-Connected to localhost.
-Escape character is '^]'.
-osgi> dropAllPacketsRpc on
-DropAllFlows transitions to on
-osgi> Connection closed by foreign host.
+Will repeatedly attempt connecting to Karaf shell until it's ready
+Issued `dropAllPacketsRpc on` command via Karaf shell to localhost:8101
+Issued `log:set ERROR` command via Karaf shell to localhost:8101
```
Here's an example of running a two minute CBench test against OpenDaylight:
[~/wcbench]$ rsync wcbench:/home/fedora/results.csv .
```
-You can now run `stats.py` against it:
-
-```
-[~/wcbench]$ ./stats.py -S
-{'fifteen_load': {'max': 0,
- 'mean': 0.62,
- 'min': 0,
- 'relstddev': 0.0,
- 'stddev': 0.0},
- 'five_load': {'max': 0,
- 'mean': 0.96,
- 'min': 0,
- 'relstddev': 0.0,
- 'stddev': 0.0},
- 'flows': {'max': 22384,
- 'mean': 22384.52,
- 'min': 22384,
- 'relstddev': 0.0,
- 'stddev': 0.0},
- 'iowait': {'max': 0, 'mean': 0.0, 'min': 0, 'relstddev': 0.0, 'stddev': 0.0},
- 'one_load': {'max': 0,
- 'mean': 0.85,
- 'min': 0,
- 'relstddev': 0.0,
- 'stddev': 0.0},
- 'runtime': {'max': 120,
- 'mean': 120.0,
- 'min': 120,
- 'relstddev': 0.0,
- 'stddev': 0.0},
- 'sample_size': 1,
- 'steal_time': {'max': 0,
- 'mean': 0.0,
- 'min': 0,
- 'relstddev': 0.0,
- 'stddev': 0.0},
- 'used_ram': {'max': 3657,
- 'mean': 3657.0,
- 'min': 3657,
- 'relstddev': 0.0,
- 'stddev': 0.0}}
-```
+You can now generate graphs and stats, as described in the [Usage Details: stats.py](#user-content-usage-details-statspy) section.
If you'd like to collect some serious long-term data, use the `loop_wcbench.sh` script (of course, back on the VM).
[fedora@dfarrell-wcbench wcbench]$ ./wcbench.sh -k
Stopping OpenDaylight
[fedora@dfarrell-wcbench wcbench]$ ./wcbench.sh -d
-Removing /home/fedora/opendaylight
-Removing /home/fedora/distributions-base-0.2.0-SNAPSHOT-osgipackage.zip
+Removing /home/fedora/distribution-karaf-0.2.1-Helium-SR1
+Removing /home/fedora/distribution-karaf-0.2.1-Helium-SR1.zip
Removing /home/fedora/openflow
Removing /home/fedora/oflops
Removing /usr/local/bin/cbench
### Contact
-As mentioned in the [Contributing section](https://github.com/dfarrell07/wcbench/blob/master/README.md#contributing), for bugs/features, please raise an [Issue](https://github.com/dfarrell07/wcbench/issues) on the WCBench GitHub page.
-
-Daniel Farrell is the main developer of WCBench. You can contact him directly at dfarrell@redhat.com or dfarrell07@gmail.com. He also hangs out on IRC at Freenode/#opendaylight most of his waking hours.
+For feature requests, bug reports and questions please raise an [Issue](https://github.com/dfarrell07/wcbench/issues). Daniel Farrell is the primary developer of this tool. He can be contacted directly at dfarrell@redhat.com or on IRC (dfarrell07 on Freenode). **Prefer public, documented communication like Issues over direct 1-1 communication. This is an Open Source project. Keep the community in the loop.**
--- /dev/null
+VAGRANTFILE_API_VERSION = "2"
+
+# The WCBench README describes how to use Vagrant for WCBench work
+# See: https://github.com/dfarrell07/wcbench#user-content-detailed-walkthrough-vagrant
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ # Build Vagrant box based on Fedora 20
+ config.vm.box = "chef/fedora-20"
+
+ # Configure VM RAM and CPU
+ config.vm.provider "virtualbox" do |v|
+ v.memory = 2048
+ v.cpus = 4
+ end
+
+ # This allows sudo commands in wcbench.sh to work
+ config.ssh.pty = true
+
+ # Unexpectedly, /usr/local/bin isn't in the default path
+ # The cbench and oflops binary install there, need to add it
+ config.vm.provision "shell", inline: "echo export PATH=$PATH:/usr/local/bin >> /home/vagrant/.bashrc"
+ config.vm.provision "shell", inline: "echo export PATH=$PATH:/usr/local/bin >> /root/.bashrc"
+
+ # Drop code in /home/vagrant/wcbench, not /vagrant
+ config.vm.synced_folder ".", "/vagrant", disabled: true
+ config.vm.synced_folder ".", "/home/vagrant/wcbench"
+
+ # Install OpenDaylight and CBench with verbose output
+ config.vm.provision "shell", inline: 'su -c "/home/vagrant/wcbench/wcbench.sh -vci" vagrant'
+end
EX_USAGE=64
EX_OK=0
+# Output verbose debug info (true) or not (anything else)
+VERBOSE=false
+
###############################################################################
# Prints usage message
# Globals:
OPTIONS:
-h Show this help message
+ -v Output verbose debug info
-l Loop WCBench runs without restarting ODL
-r Loop WCBench runs, restart ODL between runs
-t <time> Run WCBench for a given number of minutes
# Starts ODL, optionally pinning it to a given number of processors
# Globals:
# processors
+# VERBOSE
# Arguments:
# None
# Returns:
###############################################################################
start_odl()
{
- if [ -z $processors ]; then
- # Start ODL, don't pass processor info
- echo "Starting ODL, not passing processor info"
- ./wcbench.sh -o
+ if "$VERBOSE" = true; then
+ if [ -z $processors ]; then
+ # Start ODL, don't pass processor info
+ echo "Starting ODL, not passing processor info"
+ ./wcbench.sh -vo
+ else
+ # Start ODL, pinning it to given number of processors
+ echo "Pinning ODL to $processors processor(s)"
+ ./wcbench.sh -vp $processors -o
+ fi
else
- # Start ODL, pinning it to given number of processors
- echo "Pinning ODL to $processors processor(s)"
- ./wcbench.sh -p $processors -o
+ if [ -z $processors ]; then
+ # Start ODL, don't pass processor info
+ echo "Starting ODL, not passing processor info"
+ ./wcbench.sh -o
+ else
+ # Start ODL, pinning it to given number of processors
+ echo "Pinning ODL to $processors processor(s)"
+ ./wcbench.sh -p $processors -o
+ fi
fi
}
# Run WCBench against ODL, optionally passing a WCBench run time
# Globals:
# run_time
+# VERBOSE
# Arguments:
# None
# Returns:
###############################################################################
run_wcbench()
{
- if [ -z $run_time ]; then
- # Flag means run WCBench
- echo "Running WCBench, not passing run time info"
- ./wcbench.sh -r
+ if "$VERBOSE" = true; then
+ if [ -z $run_time ]; then
+ # Flag means run WCBench
+ echo "Running WCBench, not passing run time info"
+ ./wcbench.sh -vr
+ else
+ # Flags mean use $run_time WCBench runs, run WCBench
+ echo "Running WCBench with $run_time minute(s) run time"
+ ./wcbench.sh -vt $run_time -r
+ fi
else
- # Flags mean use $run_time WCBench runs, run WCBench
- echo "Running WCBench with $run_time minute(s) run time"
- ./wcbench.sh -t $run_time -r
+ if [ -z $run_time ]; then
+ # Flag means run WCBench
+ echo "Running WCBench, not passing run time info"
+ ./wcbench.sh -r
+ else
+ # Flags mean use $run_time WCBench runs, run WCBench
+ echo "Running WCBench with $run_time minute(s) run time"
+ ./wcbench.sh -t $run_time -r
+ fi
fi
}
###############################################################################
# Repeatedly run WCBench against ODL, restart ODL between runs
# Globals:
-# None
+# VERBOSE
# Arguments:
# None
# Returns:
start_odl
run_wcbench
# Stop ODL
- ./wcbench.sh -k
+ if "$VERBOSE" = true; then
+ ./wcbench.sh -vk
+ else
+ ./wcbench.sh -k
+ fi
done
}
exit $EX_USAGE
fi
+# Used to output help if no valid action results from arguments
+action_taken=false
+
# Parse options given from command line
-while getopts ":hlp:rt:" opt; do
+while getopts ":hvlp:rt:" opt; do
case "$opt" in
h)
# Help message
usage
exit $EX_OK
;;
+ v)
+ # Output debug info verbosely
+ VERBOSE=true
+ ;;
l)
# Loop without restarting ODL between WCBench runs
loop_no_restart
+ action_taken=true
;;
p)
# Pin a given number of processors
r)
# Restart ODL between each WCBench run
loop_with_restart
+ action_taken=true
;;
t)
# Set length of WCBench run in minutes
exit $EX_USAGE
esac
done
+
+# Output help message if no valid action was taken
+if ! "$action_taken" = true; then
+ usage
+ exit $EX_USAGE
+fi
log_file = "cbench.log"
precision = 3
run_index = 0
- flow_index = 1
- start_time_index = 2
- end_time_index = 3
- start_steal_time_index = 10
- end_steal_time_index = 11
- used_ram_index = 13
- one_load_index = 16
- five_load_index = 17
- fifteen_load_index = 18
- start_iowait_index = 20
- end_iowait_index = 21
+ min_flow_index = 1
+ max_flow_index = 2
+ avg_flow_index = 3
+ start_time_index = 4
+ end_time_index = 5
+ start_steal_time_index = 12
+ end_steal_time_index = 13
+ used_ram_index = 15
+ one_load_index = 18
+ five_load_index = 19
+ fifteen_load_index = 20
+ start_iowait_index = 22
+ end_iowait_index = 23
def __init__(self):
"""Setup some flags and data structures, kick off build_cols call."""
def build_cols(self):
"""Parse results file into lists of values, one per column."""
self.run_col = []
- self.flows_col = []
+ self.min_flows_col = []
+ self.max_flows_col = []
+ self.avg_flows_col = []
self.runtime_col = []
self.used_ram_col = []
self.iowait_col = []
for row in results_reader:
try:
self.run_col.append(float(row[self.run_index]))
- self.flows_col.append(float(row[self.flow_index]))
+ self.min_flows_col.append(float(row[self.min_flow_index]))
+ self.max_flows_col.append(float(row[self.max_flow_index]))
+ self.avg_flows_col.append(float(row[self.avg_flow_index]))
self.runtime_col.append(float(row[self.end_time_index]) -
float(row[self.start_time_index]))
self.used_ram_col.append(float(row[self.used_ram_index]))
# Skips header
continue
- def compute_flow_stats(self):
- """Compute CBench flows/second stats."""
- self.compute_generic_stats("flows", self.flows_col)
+ def compute_avg_flow_stats(self):
+ """Compute CBench average flows/second stats."""
+ self.compute_generic_stats("flows", self.avg_flows_col)
- def build_flow_graph(self, total_gcount, graph_num):
- """Plot flows/sec data.
+ def build_avg_flow_graph(self, total_gcount, graph_num):
+ """Plot average flows/sec data.
:param total_gcount: Total number of graphs to render.
:type total_gcount: int
"""
self.build_generic_graph(total_gcount, graph_num,
- "Flows per Second", self.flows_col)
+ "Average Flows per Second", self.avg_flows_col)
+
+ def compute_min_flow_stats(self):
+ """Compute CBench min flows/second stats."""
+ self.compute_generic_stats("min_flows", self.min_flows_col)
+
+ def build_min_flow_graph(self, total_gcount, graph_num):
+ """Plot min flows/sec data.
+
+ :param total_gcount: Total number of graphs to render.
+ :type total_gcount: int
+ :param graph_num: Number for this graph, <= total_gcount.
+ :type graph_num: int
+
+ """
+ self.build_generic_graph(total_gcount, graph_num,
+ "Minimum Flows per Second", self.min_flows_col)
+
+ def compute_max_flow_stats(self):
+ """Compute CBench max flows/second stats."""
+ self.compute_generic_stats("max_flows", self.max_flows_col)
+
+ def build_max_flow_graph(self, total_gcount, graph_num):
+ """Plot max flows/sec data.
+
+ :param total_gcount: Total number of graphs to render.
+ :type total_gcount: int
+ :param graph_num: Number for this graph, <= total_gcount.
+ :type graph_num: int
+
+ """
+ self.build_generic_graph(total_gcount, graph_num,
+ "Maximum Flows per Second", self.max_flows_col)
def compute_ram_stats(self):
"""Compute used RAM stats."""
def build_runtime_graph(self, total_gcount, graph_num):
"""Plot CBench runtime length data.
- :paruntime total_gcount: Total number of graphs to render.
+ :param total_gcount: Total number of graphs to render.
:type total_gcount: int
- :paruntime graph_num: Number for this graph, <= total_gcount.
+ :param graph_num: Number for this graph, <= total_gcount.
:type graph_num: int
"""
def build_iowait_graph(self, total_gcount, graph_num):
"""Plot iowait data.
- :paiowait total_gcount: Total number of graphs to render.
+ :param total_gcount: Total number of graphs to render.
:type total_gcount: int
- :paiowait graph_num: Number for this graph, <= total_gcount.
+ :param graph_num: Number for this graph, <= total_gcount.
:type graph_num: int
"""
def build_steal_time_graph(self, total_gcount, graph_num):
"""Plot steal time data.
- :pasteal_time total_gcount: Total number of graphs to render.
+ :param total_gcount: Total number of graphs to render.
:type total_gcount: int
- :pasteal_time graph_num: Number for this graph, <= total_gcount.
+ :param graph_num: Number for this graph, <= total_gcount.
:type graph_num: int
"""
def build_one_load_graph(self, total_gcount, graph_num):
"""Plot one minute load data.
- :paone_load total_gcount: Total number of graphs to render.
+ :param total_gcount: Total number of graphs to render.
:type total_gcount: int
- :paone_load graph_num: Number for this graph, <= total_gcount.
+ :param graph_num: Number for this graph, <= total_gcount.
:type graph_num: int
"""
def build_five_load_graph(self, total_gcount, graph_num):
"""Plot five minute load data.
- :pafive_load total_gcount: Total number of graphs to render.
+ :param total_gcount: Total number of graphs to render.
:type total_gcount: int
- :pafive_load graph_num: Number for this graph, <= total_gcount.
+ :param graph_num: Number for this graph, <= total_gcount.
:type graph_num: int
"""
def build_fifteen_load_graph(self, total_gcount, graph_num):
"""Plot fifteen minute load data.
- :pafifteen_load total_gcount: Total number of graphs to render.
+ :param total_gcount: Total number of graphs to render.
:type total_gcount: int
- :pafifteen_load graph_num: Number for this graph, <= total_gcount.
+ :param graph_num: Number for this graph, <= total_gcount.
:type graph_num: int
"""
def build_generic_graph(self, total_gcount, graph_num, y_label, data_col):
"""Helper for plotting generic data.
- :pageneric total_gcount: Total number of graphs to render.
+ :param total_gcount: Total number of graphs to render.
:type total_gcount: int
- :pageneric graph_num: Number for this graph, <= total_gcount.
+ :param graph_num: Number for this graph, <= total_gcount.
:type graph_num: int
:param y_label: Lable of Y axis.
:type y_label: string
stats = Stats()
# Map of graph names to the Stats.fns that build them
-graph_map = {"flows": stats.build_flow_graph,
+graph_map = {"min_flows": stats.build_min_flow_graph,
+ "max_flows": stats.build_max_flow_graph,
+ "flows": stats.build_avg_flow_graph,
"runtime": stats.build_runtime_graph,
"iowait": stats.build_iowait_graph,
"steal_time": stats.build_steal_time_graph,
"five_load": stats.build_five_load_graph,
"fifteen_load": stats.build_fifteen_load_graph,
"ram": stats.build_ram_graph}
-stats_map = {"flows": stats.compute_flow_stats,
+stats_map = {"min_flows": stats.compute_min_flow_stats,
+ "max_flows": stats.compute_max_flow_stats,
+ "flows": stats.compute_avg_flow_stats,
"runtime": stats.compute_runtime_stats,
"iowait": stats.compute_iowait_stats,
"steal_time": stats.compute_steal_time_stats,
EX_OK=0
EX_ERR=1
+# Output verbose debug info (true) or not (anything else)
+VERBOSE=false
+
# Params for CBench test and ODL config
-NUM_SWITCHES=64 # Default number of switches for CBench to simulate
+NUM_SWITCHES=32 # Default number of switches for CBench to simulate
NUM_MACS=100000 # Default number of MACs for CBench to use
TESTS_PER_SWITCH=10 # Default number of CBench tests to do per CBench run
MS_PER_TEST=10000 # Default milliseconds to run each CBench test
CBENCH_WARMUP=1 # Default number of warmup cycles to run CBench
-OSGI_PORT=2400 # Port that the OSGi console listens for telnet on
-ODL_STARTUP_DELAY=90 # Default time in seconds to give ODL to start
-ODL_RUNNING_STATUS=0 # run.sh gives this status when ODL is running
-ODL_STOPPED_STATUS=255 # run.sh gives this status when ODL is stopped
-ODL_BROKEN_STATUS=1 # run.sh gives this status when things are FUBR
+KARAF_SHELL_PORT=8101 # Port that the Karaf shell listens on
CONTROLLER="OpenDaylight" # Currently only support ODL
CONTROLLER_IP="localhost" # Change this to remote IP if running on two systems
CONTROLLER_PORT=6633 # Default port for OpenDaylight
BASE_DIR=$HOME # Directory that code and such is dropped into
OF_DIR=$BASE_DIR/openflow # Directory that contains OpenFlow code
OFLOPS_DIR=$BASE_DIR/oflops # Directory that contains oflops repo
-ODL_DIR=$BASE_DIR/opendaylight # Directory with ODL code
-ODL_ZIP="distributions-base-0.2.0-SNAPSHOT-osgipackage.zip" # ODL zip name
+ODL_DIR=$BASE_DIR/distribution-karaf-0.2.1-Helium-SR1 # Directory with ODL code
+ODL_ZIP="distribution-karaf-0.2.1-Helium-SR1.zip" # ODL zip name
ODL_ZIP_PATH=$BASE_DIR/$ODL_ZIP # Full path to ODL zip
PLUGIN_DIR=$ODL_DIR/plugins # ODL plugin directory
RESULTS_FILE=$BASE_DIR/"results.csv" # File that results are stored in
CBENCH_LOG=$BASE_DIR/"cbench.log" # Log file used to store strange error msgs
CBENCH_BIN="/usr/local/bin/cbench" # Path to CBench binary
+OFLOPS_BIN="/usr/local/bin/oflops" # Path to oflops binary
+FEATURES_FILE=$ODL_DIR/etc/org.apache.karaf.features.cfg # Karaf features to install
# Array that stores results in indexes defined by cols array
declare -a results
# The order of these array values determines column order in RESULTS_FILE
-cols=(run_num cbench_avg start_time end_time controller_ip human_time
- num_switches num_macs tests_per_switch ms_per_test start_steal_time
- end_steal_time total_ram used_ram free_ram cpus one_min_load five_min_load
- fifteen_min_load controller start_iowait end_iowait)
+cols=(run_num cbench_min cbench_max cbench_avg start_time end_time
+ controller_ip human_time num_switches num_macs tests_per_switch
+ ms_per_test start_steal_time end_steal_time total_ram used_ram
+ free_ram cpus one_min_load five_min_load fifteen_min_load controller
+ start_iowait end_iowait)
# This two-stat-array system is needed until I find an answer to this question:
# http://goo.gl/e0M8Tp
OPTIONS:
-h Show this message
+ -v Output verbose debug info
-c Install CBench
-t <time> Run CBench for given number of minutes
-r Run CBench against OpenDaylight
- -i Install ODL from last successful build
+ -i Install OpenDaylight Helium 0.2.1
-p <processors> Pin ODL to given number of processors
- -o Run ODL from last successful build
+ -o Start and configure OpenDaylight Helium 0.2.1
-k Kill OpenDaylight
-d Delete local ODL and CBench code
EOF
}
###############################################################################
+# Checks if CBench is installed
# Globals:
# EX_OK
# EX_NOT_FOUND
# This has been tested on fresh cloud versions of Fedora 20 and CentOS 6.5
# Not currently building oflops/netfpga-packet-generator-c-library (optional)
# Globals:
+# VERBOSE
# EX_OK
# EX_ERR
# OFLOPS_DIR
# Install required packages
echo "Installing CBench dependencies"
- sudo yum install -y net-snmp-devel libpcap-devel autoconf make automake libtool libconfig-devel git &> /dev/null
+ if "$VERBOSE" = true; then
+ sudo yum install -y net-snmp-devel libpcap-devel autoconf make automake libtool libconfig-devel git
+ else
+ sudo yum install -y net-snmp-devel libpcap-devel autoconf make automake libtool libconfig-devel git &> /dev/null
+ fi
# Clone repo that contains CBench
- echo "Cloning CBench repo"
- git clone https://github.com/andi-bigswitch/oflops.git $OFLOPS_DIR &> /dev/null
+ echo "Cloning CBench repo into $OFLOPS_DIR"
+ if "$VERBOSE" = true; then
+ git clone https://github.com/andi-bigswitch/oflops.git $OFLOPS_DIR
+ else
+ git clone https://github.com/andi-bigswitch/oflops.git $OFLOPS_DIR &> /dev/null
+ fi
# CBench requires the OpenFlow source code, clone it
- echo "Cloning openflow source code"
- git clone git://gitosis.stanford.edu/openflow.git $OF_DIR &> /dev/null
+ echo "Cloning openflow source code into $OF_DIR"
+ if "$VERBOSE" = true; then
+ git clone git://gitosis.stanford.edu/openflow.git $OF_DIR
+ else
+ git clone git://gitosis.stanford.edu/openflow.git $OF_DIR &> /dev/null
+ fi
# Build the oflops/configure file
old_cwd=$PWD
cd $OFLOPS_DIR
echo "Building oflops/configure file"
- ./boot.sh &> /dev/null
+ if "$VERBOSE" = true; then
+ ./boot.sh
+ else
+ ./boot.sh &> /dev/null
+ fi
# Build oflops
echo "Building CBench"
- ./configure --with-openflow-src-dir=$OF_DIR &> /dev/null
- make &> /dev/null
- sudo make install &> /dev/null
+ if "$VERBOSE" = true; then
+ ./configure --with-openflow-src-dir=$OF_DIR
+ make
+ sudo make install
+ else
+ ./configure --with-openflow-src-dir=$OF_DIR &> /dev/null
+ make &> /dev/null
+ sudo make install &> /dev/null
+ fi
cd $old_cwd
# Validate that the install worked
# Globals:
# CONTROLLER_IP
# CONTROLLER_PORT
+# VERBOSE
# MS_PER_TEST
# TEST_PER_SWITCH
# NUM_SWITCHES
{
get_pre_test_stats
echo "Running CBench against ODL on $CONTROLLER_IP:$CONTROLLER_PORT"
- cbench_output=`cbench -c $CONTROLLER_IP -p $CONTROLLER_PORT -m $MS_PER_TEST -l $TESTS_PER_SWITCH -s $NUM_SWITCHES -M $NUM_MACS -w $CBENCH_WARMUP 2>&1`
+ if "$VERBOSE" = true; then
+ cbench_output=`cbench -c $CONTROLLER_IP -p $CONTROLLER_PORT -m $MS_PER_TEST -l $TESTS_PER_SWITCH -s $NUM_SWITCHES -M $NUM_MACS -w $CBENCH_WARMUP`
+ else
+ cbench_output=`cbench -c $CONTROLLER_IP -p $CONTROLLER_PORT -m $MS_PER_TEST -l $TESTS_PER_SWITCH -s $NUM_SWITCHES -M $NUM_MACS -w $CBENCH_WARMUP 2>&1`
+ fi
get_post_test_stats
get_time_irrelevant_stats
- # Parse out average responses/sec, log/handle very rare unexplained errors
- # This logic can be removed if/when the root cause of this error is discovered and fixed
+ # Parse out min, max and average responses/sec, log/handle errors
+ # See: https://github.com/dfarrell07/wcbench/issues/16
+ cbench_min=`echo "$cbench_output" | grep RESULT | awk '{print $8}' | awk -F'/' '{print $1}'`
+ cbench_max=`echo "$cbench_output" | grep RESULT | awk '{print $8}' | awk -F'/' '{print $2}'`
cbench_avg=`echo "$cbench_output" | grep RESULT | awk '{print $8}' | awk -F'/' '{print $3}'`
if [ -z "$cbench_avg" ]; then
- echo "WARNING: Rare error occurred: failed to parse avg. See $CBENCH_LOG." >&2
+ echo "WARNING: Error occurred: Failed to parse CBench average" >&2
+ echo "This is an issue with CBench or ODL, not WCBench." >&2
+ echo "May need to reduce NUM_SWITCHES or allocate more CPU cores" >&2
+ echo "See: $CBENCH_LOG" >&2
+ echo "See: https://github.com/dfarrell07/wcbench/issues/16" >&2
echo "Run $(next_run_num) failed to record a CBench average. CBench details:" >> $CBENCH_LOG
echo "$cbench_output" >> $CBENCH_LOG
return
else
echo "Average responses/second: $cbench_avg"
+ results[$(name_to_index "cbench_min")]=$cbench_min
+ results[$(name_to_index "cbench_max")]=$cbench_max
results[$(name_to_index "cbench_avg")]=$cbench_avg
fi
fi
}
+###############################################################################
+# Checks if the given feature is in list to be installed at boot
+# Globals:
+# FEATURES_FILE
+# EX_OK
+# EX_NOT_FOUND
+# Arguments:
+# Feature to search featuresBoot list for
+# Returns:
+# EX_OK if feature already in featuresBoot list
+# EX_NOT_FOUND if feature isn't in featuresBoot list
+###############################################################################
+is_in_featuresBoot()
+{
+ feature=$1
+
+ # Check if feature is already set to be installed at boot
+ if $(grep featuresBoot= $FEATURES_FILE | grep -q $feature); then
+ return $EX_OK
+ else
+ return $EX_NOT_FOUND
+ fi
+}
+
+###############################################################################
+# Adds features to be installed by Karaf at ODL boot
+# Globals:
+# FEATURES_FILE
+# EX_OK
+# EX_ERR
+# Arguments:
+# Feature to append to end of featuresBoot CSV list
+# Returns:
+# EX_OK if feature already is installed or was successfully added
+# EX_ERR if failed to add feature to group installed at boot
+###############################################################################
+add_to_featuresBoot()
+{
+ feature=$1
+
+ # Check if feature is already set to be installed at boot
+ if is_in_featuresBoot $feature; then
+ echo "$feature is already set to be installed at boot"
+ return $EX_OK
+ fi
+
+ # Append feature to end of boot-install list
+ sed -i "/^featuresBoot=/ s/$/,$feature/" $FEATURES_FILE
+
+ # Check if feature was added to install list correctly
+ if is_in_featuresBoot $feature; then
+ echo "$feature added to features installed at boot"
+ return $EX_OK
+ else
+ echo "ERROR: Failed to add $feature to features installed at boot"
+ return $EX_ERR
+ fi
+}
+
###############################################################################
# Installs latest build of the OpenDaylight controller
# Note that the installed build is via an Integration team Jenkins job
# Globals:
-# BASE_DIR
+# ODL_DIR
+# VERBOSE
# ODL_ZIP_DIR
+# BASE_DIR
+# ODL_ZIP_PATH
# ODL_ZIP
# EX_ERR
# Arguments:
# None
# Returns:
-# EX_ERR if ODL download fails, typically because of version bump
+# EX_ERR if ODL install fails
###############################################################################
install_opendaylight()
{
- # Remove old controller code
- uninstall_odl
+ # Only remove unzipped code, as zip is large and unlikely to have changed.
+ if [ -d $ODL_DIR ]; then
+ echo "Removing $ODL_DIR"
+ rm -rf $ODL_DIR
+ fi
# Install required packages
echo "Installing OpenDaylight dependencies"
- sudo yum install -y java-1.7.0-openjdk unzip wget &> /dev/null
+ if "$VERBOSE" = true; then
+ sudo yum install -y java-1.7.0-openjdk unzip wget
+ else
+ sudo yum install -y java-1.7.0-openjdk unzip wget &> /dev/null
+ fi
- # Grab last successful build
- echo "Downloading last successful ODL build"
- wget -P $BASE_DIR "https://jenkins.opendaylight.org/integration/job/integration-master-project-centralized-integration/lastSuccessfulBuild/artifact/distributions/base/target/$ODL_ZIP" &> /dev/null
+ # If we already have the zip archive, use that.
+ if [ -f $ODL_ZIP_PATH ]; then
+ echo "Using local $ODL_ZIP_PATH. Pass -d flag to remove."
+ else
+ # Grab OpenDaylight Helium 0.2.1
+ echo "Downloading OpenDaylight Helium 0.2.1"
+ if "$VERBOSE" = true; then
+ wget -P $BASE_DIR "https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.1-Helium-SR1/$ODL_ZIP"
+ else
+ wget -P $BASE_DIR "https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.1-Helium-SR1/$ODL_ZIP" &> /dev/null
+ fi
+ fi
+
+ # Confirm that download was successful
if [ ! -f $ODL_ZIP_PATH ]; then
echo "WARNING: Failed to dl ODL. Version bumped? If so, update \$ODL_ZIP" >&2
return $EX_ERR
fi
- echo "Unzipping last successful ODL build"
- unzip -d $BASE_DIR $ODL_ZIP_PATH &> /dev/null
- # Make some plugin changes that are apparently required for CBench
- echo "Downloading openflowplugin"
- wget -P $PLUGIN_DIR 'https://jenkins.opendaylight.org/openflowplugin/job/openflowplugin-merge/lastSuccessfulBuild/org.opendaylight.openflowplugin$drop-test/artifact/org.opendaylight.openflowplugin/drop-test/0.0.3-SNAPSHOT/drop-test-0.0.3-SNAPSHOT.jar' &> /dev/null
- echo "Removing simpleforwarding plugin"
- rm $PLUGIN_DIR/org.opendaylight.controller.samples.simpleforwarding-0.4.2-SNAPSHOT.jar
- echo "Removing arphandler plugin"
- rm $PLUGIN_DIR/org.opendaylight.controller.arphandler-0.5.2-SNAPSHOT.jar
+ # Unzip ODL archive
+ echo "Unzipping OpenDaylight Helium 0.2.1"
+ if "$VERBOSE" = true; then
+ unzip -d $BASE_DIR $ODL_ZIP_PATH
+ else
+ unzip -d $BASE_DIR $ODL_ZIP_PATH &> /dev/null
+ fi
- # TODO: Change controller log level to ERROR. Confirm this is necessary.
- # Relevant Issue: https://github.com/dfarrell07/wcbench/issues/3
+ # Add required features to list installed by Karaf at ODL boot
+ add_to_featuresBoot "odl-openflowplugin-flow-services"
+ add_to_featuresBoot "odl-openflowplugin-drop-test"
}
###############################################################################
# Assumes you've checked that ODL is installed
# Globals:
# ODL_DIR
+# VERBOSE
+# EX_OK
+# EX_NOT_FOUND
# Arguments:
# None
# Returns:
{
old_cwd=$PWD
cd $ODL_DIR
- ./run.sh -status &> /dev/null
+ if "$VERBOSE" = true; then
+ ./bin/status
+ else
+ ./bin/status &> /dev/null
+ fi
if [ $? = 0 ]; then
return $EX_OK
else
# ODL_DIR
# EX_OK
# processors
-# OSGI_PORT
-# ODL_STARTUP_DELAY
+# VERBOSE
# Arguments:
# None
# Returns:
else
echo "Starting OpenDaylight"
if [ -z $processors ]; then
- ./run.sh -start $OSGI_PORT -of13 -Xms1g -Xmx4g &> /dev/null
+ if "$VERBOSE" = true; then
+ ./bin/start
+ else
+ ./bin/start &> /dev/null
+ fi
else
echo "Pinning ODL to $processors processor(s)"
- if [ $processors == 1 ]; then
- echo "Increasing ODL start time, as 1 processor will slow it down"
- ODL_STARTUP_DELAY=120
- fi
# Use taskset to pin ODL to a given number of processors
- taskset -c 0-$(expr $processors - 1) ./run.sh -start $OSGI_PORT -of13 -Xms1g -Xmx4g &> /dev/null
+ if "$VERBOSE" = true; then
+ taskset -c 0-$(expr $processors - 1) ./bin/start
+ else
+ taskset -c 0-$(expr $processors - 1) ./bin/start &> /dev/null
+ fi
fi
fi
cd $old_cwd
- # TODO: Smarter block until ODL is actually up
- # Relevant Issue: https://github.com/dfarrell07/wcbench/issues/6
- echo "Giving ODL $ODL_STARTUP_DELAY seconds to get up and running"
- while [ $ODL_STARTUP_DELAY -gt 0 ]; do
- sleep 10
- let ODL_STARTUP_DELAY=ODL_STARTUP_DELAY-10
- echo "$ODL_STARTUP_DELAY seconds remaining"
- done
issue_odl_config
}
###############################################################################
-# Give `dropAllPackets on` command via telnet to OSGi
-# See: http://goo.gl/VEJIRc
-# TODO: This can be issued too early. Smarter check needed.
-# Relevant Issue: https://github.com/dfarrell07/wcbench/issues/6
+# Set `dropAllPackets on` and log level to DEBUG via Karaf shell
# Globals:
-# OSGI_PORT
+# VERBOSE
+# KARAF_SHELL_PORT
# Arguments:
# None
# Returns:
###############################################################################
issue_odl_config()
{
- if ! command -v telnet &> /dev/null; then
- echo "Installing telnet, as it's required for issuing ODL config."
- sudo yum install -y telnet &> /dev/null
+ # This could be done with public key crypto, but sshpass is easier
+ if ! command -v sshpass &> /dev/null; then
+ echo "Installing sshpass. It's used for issuing ODL config."
+ if "$VERBOSE" = true; then
+ sudo yum install -y sshpass
+ else
+ sudo yum install -y sshpass &> /dev/null
+ fi
+ fi
+
+ # Set `dropAllPacketsRpc on`
+ echo "Will repeatedly attempt connecting to Karaf shell until it's ready"
+ # Loop until exit status 0 (success) given by Karaf shell
+ # Exit status 255 means Karaf shell isn't open for SSH connections yet
+ # Exit status 1 means `dropAllPacketsRpc on` isn't runnable yet
+ if "$VERBOSE" = true; then
+ until sshpass -p karaf ssh -p $KARAF_SHELL_PORT -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no karaf@localhost dropallpacketsrpc on
+ do
+ echo "Karaf shell isn't ready yet, sleeping 5 seconds..."
+ sleep 5
+ done
+ else
+ until sshpass -p karaf ssh -p $KARAF_SHELL_PORT -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no karaf@localhost dropallpacketsrpc on &> /dev/null
+ do
+ sleep 5
+ done
fi
- echo "Issuing \`dropAllPacketsRpc on\` command via telnet to localhost:$OSGI_PORT"
- # NB: Not using sleeps results in silent failures (cmd has no effect)
- (sleep 3; echo dropAllPacketsRpc on; sleep 3) | telnet localhost $OSGI_PORT
+ echo "Issued \`dropAllPacketsRpc on\` command via Karaf shell to localhost:$KARAF_SHELL_PORT"
+
+ # Change log level to ERROR
+ # Loop until exit status 0 (success) given by Karaf shell
+ # Exit status 255 means Karaf shell isn't open for SSH connections yet
+ if "$VERBOSE" = true; then
+ until sshpass -p karaf ssh -p $KARAF_SHELL_PORT -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no karaf@localhost log:set ERROR
+ do
+ echo "Karaf shell isn't ready yet, sleeping 5 seconds..."
+ sleep 5
+ done
+ else
+ until sshpass -p karaf ssh -p $KARAF_SHELL_PORT -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no karaf@localhost log:set ERROR &> /dev/null
+ do
+ sleep 5
+ done
+ fi
+ echo "Issued \`log:set ERROR\` command via Karaf shell to localhost:$KARAF_SHELL_PORT"
}
###############################################################################
-# Stops OpenDaylight using run.sh
+# Stops OpenDaylight
# Globals:
# ODL_DIR
+# VERBOSE
# Arguments:
# None
# Returns:
old_cwd=$PWD
cd $ODL_DIR
if odl_started; then
- echo "Stopping OpenDaylight"
- ./run.sh -stop &> /dev/null
+ echo "Told ODL to stop. Waiting on it to do so..."
+ echo "This check is useless if you have other Java processes running (ctrl+c it)."
+ if "$VERBOSE" = true; then
+ ./bin/stop
+ else
+ ./bin/stop &> /dev/null
+ fi
+ # Loop until actually stopped
+ until ! pgrep java &> /dev/null
+ do
+ sleep .5
+ done
+ echo "OpenDaylight has stopped."
else
echo "OpenDaylight isn't running"
fi
echo "Removing $CBENCH_BIN"
sudo rm -f $CBENCH_BIN
fi
- # TODO: Remove oflops binary
- # Relevant issue: https://github.com/dfarrell07/wcbench/issues/25
+ if [ -f $OFLOPS_BIN ]; then
+ echo "Removing $OFLOPS_BIN"
+ sudo rm -f $OFLOPS_BIN
+ fi
}
# If executed with no options
exit $EX_USAGE
fi
+# Used to output help if no valid action results from arguments
+action_taken=false
+
# Parse options given from command line
-while getopts ":hrcip:ot:kd" opt; do
+while getopts ":hvrcip:ot:kd" opt; do
case "$opt" in
h)
# Help message
usage
exit $EX_OK
;;
+ v)
+ # Output debug info verbosely
+ VERBOSE=true
+ ;;
r)
# Run CBench against OpenDaylight
if [ $CONTROLLER_IP = "localhost" ]; then
fi
fi
run_cbench
+ action_taken=true
;;
c)
# Install CBench
install_cbench
+ action_taken=true
;;
i)
- # Install OpenDaylight from last successful build
+ # Install OpenDaylight
install_opendaylight
+ action_taken=true
;;
p)
# Pin a given number of processors
fi
;;
o)
- # Run OpenDaylight from last successful build
+ # Run OpenDaylight
if ! odl_installed; then
echo "OpenDaylight isn't installed, can't start it"
exit $EX_ERR
fi
start_opendaylight
+ action_taken=true
;;
t)
# Set CBench run time in minutes
exit $EX_ERR
fi
stop_opendaylight
+ action_taken=true
;;
d)
# Delete local ODL and CBench code
uninstall_odl
uninstall_cbench
+ action_taken=true
;;
*)
# Print usage message
exit $EX_USAGE
esac
done
+
+# Output help message if no valid action was taken
+if ! "$action_taken" = true; then
+ usage
+ exit $EX_USAGE
+fi