Merge "added output error message display by remote ssh execution"
authorLuis Gomez <ecelgp@gmail.com>
Sun, 30 Nov 2014 01:24:32 +0000 (01:24 +0000)
committerGerrit Code Review <gerrit@opendaylight.org>
Sun, 30 Nov 2014 01:24:32 +0000 (01:24 +0000)
32 files changed:
test/csit/libraries/AAAKeywords.txt
test/csit/suites/clustering/datastore/001_start_cluster.txt [new file with mode: 0644]
test/csit/suites/clustering/datastore/010_crud_on_leader.txt [moved from test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_01_execute_on_leader.txt with 80% similarity]
test/csit/suites/clustering/datastore/020_crud_on_any_follower.txt [moved from test/csit/suites/clustering/datastore/basic/020_restconf_rpc_crud_test_02_execute_on_follower1.txt with 81% similarity]
test/csit/suites/clustering/datastore/030_failover_crud_on_new_leader.txt [moved from test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_05_execute_on_new_leader.txt with 86% similarity]
test/csit/suites/clustering/datastore/040_failover_read_from_follower.txt [moved from test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_06_execute_on_remaining_follower.txt with 80% similarity]
test/csit/suites/clustering/datastore/050_failover_crud_on_any_follower.txt [moved from test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_07_execute_on_remaining_follower.txt with 89% similarity]
test/csit/suites/clustering/datastore/060_failover_read_from_new_leader.txt [moved from test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_08_execute_on_new_leader.txt with 79% similarity]
test/csit/suites/clustering/datastore/130_recovery_restart_leader.txt [new file with mode: 0644]
test/csit/suites/clustering/datastore/140_recovery_restart_follower.txt [new file with mode: 0644]
test/csit/suites/clustering/datastore/__init__.txt [moved from test/csit/suites/clustering/datastore/basic/__init__.txt with 51% similarity]
test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_09_execute_on_last_node.txt [deleted file]
test/csit/suites/clustering/datastore/basic/030_restconf_rpc_crud_test_03_execute_on_follower2.txt [deleted file]
test/csit/suites/clustering/datastore/basic/130_restconf_disaster_recovery_restart_leader.txt [deleted file]
test/csit/suites/clustering/datastore/basic/140_restconf_disaster_recovery_restart_follower.txt [deleted file]
test/csit/suites/clustering/longevity/010__longevity.txt [moved from test/csit/suites/clustering/datastore/longevity/010__longevity.txt with 100% similarity]
test/csit/suites/clustering/longevity/__init__.txt [moved from test/csit/suites/clustering/datastore/longevity/__init__.txt with 100% similarity]
test/csit/suites/clustering/routedrpc/001_start_cluster.txt [new file with mode: 0644]
test/csit/suites/clustering/routedrpc/023_routed_rpc_crud_test.txt [moved from test/csit/suites/clustering/datastore/routedrpc/023_routed_rpc_crud_test.txt with 65% similarity]
test/csit/suites/clustering/routedrpc/024_routed_rpc_crud_test.txt [moved from test/csit/suites/clustering/datastore/routedrpc/024_routed_rpc_crud_test.txt with 64% similarity]
test/csit/suites/karaf-compatible/110__NETCONF/010__netconf_inventory.txt
test/csit/suites/karaf-compatible/900__AAA/010_Credential_Authentication.txt
test/csit/variables/xmls/f13.xml
test/csit/variables/xmls/f14.xml
test/csit/variables/xmls/f2.xml
test/csit/variables/xmls/netconf.xml
test/tools/wcbench/.gitignore
test/tools/wcbench/README.md
test/tools/wcbench/Vagrantfile [new file with mode: 0644]
test/tools/wcbench/loop_wcbench.sh
test/tools/wcbench/stats.py
test/tools/wcbench/wcbench.sh

index fca8ccfffd7dd7fb9473087729506de47e1205dc..b8f1ff04bd82148ae59cb08d9828fbf3409d8b23 100644 (file)
@@ -3,7 +3,9 @@ Library           ./RequestsLibrary.py
 Variables         ../variables/Variables.py
 
 *** Variables ***
-
+${WORKSPACE}      /opt/jenkins-integration/workspace/shared-controller
+${BUNDLEFOLDER}    distribution-karaf-0.3.0-SNAPSHOT
+${AUTHN_CFG_FILE}    ${WORKSPACE}/${BUNDLEFOLDER}/etc/org.opendaylight.aaa.authn.cfg
 
 *** Keywords ***
 AAA Login
@@ -26,6 +28,38 @@ Create Auth Data
     ...    ${data}
     [Return]    ${data}
 
+Disable Authentication On Controller
+    [Arguments]    ${controller_ip}
+    [Documentation]    Will disable token based authentication. Currently, that is done with a config file change
+    SSHLibrary.Open Connection    ${controller_ip}
+    Login With Public Key    ${MININET_USER}    ${USER_HOME}/.ssh/id_rsa    any
+    ${cmd}=    Set Variable    sed -i 's/^authEnabled=.*$/authEnabled=false/g' ${AUTHN_CFG_FILE}
+    SSHLibrary.Execute Command    ${cmd}
+
+Enable Authentication On Controller
+    [Arguments]    ${controller_ip}
+    [Documentation]    Will enable token based authentication. Currently, that is done with a config file change
+    SSHLibrary.Open Connection    ${controller_ip}
+    Login With Public Key    ${MININET_USER}    ${USER_HOME}/.ssh/id_rsa    any
+    ${cmd}=    Set Variable    sed -i 's/^authEnabled=.*$/authEnabled=true/g' ${AUTHN_CFG_FILE}
+    SSHLibrary.Execute Command    ${cmd}
+
+Get Auth Token
+    [Arguments]    ${user}=${USER}    ${password}=${PWD}    ${scope}=${SCOPE}    ${client_id}=${EMPTY}    ${client_secret}=${EMPTY}
+    [Documentation]    Wrapper used to login to controller and retrieve an auth token. Optional argumented available for client based credentials.
+    ${auth_data}=    Create Auth Data    ${USER}    ${PWD}    ${scope}    ${client_id}    ${client_secret}
+    ${resp}=    AAA Login    ${CONTROLLER}    ${auth_data}
+    Should Be Equal As Strings    ${resp.status_code}    201
+    ${auth_token}=    Extract Value From Content    ${resp.content}    /access_token    strip
+    [Return]    ${auth_token}
+
+Revoke Auth Token
+    [Arguments]    ${token}
+    [Documentation]    Requests the given token be revoked via POST to ${REVOKE_TOKEN_API}
+    ${headers}=    Create Dictionary    Content-Type    application/x-www-form-urlencoded
+    ${resp}=    RequestsLibrary.POST    ODL_SESSION    ${REVOKE_TOKEN_API}    data=${token}    headers=${headers}
+    Should Be Equal As Strings    ${resp.status_code}    204
+
 Validate Token Format
     [Arguments]    ${token}
     [Documentation]    Validates the given string is in the proper "token" format
diff --git a/test/csit/suites/clustering/datastore/001_start_cluster.txt b/test/csit/suites/clustering/datastore/001_start_cluster.txt
new file mode 100644 (file)
index 0000000..da9264a
--- /dev/null
@@ -0,0 +1,38 @@
+*** Settings ***
+Documentation     Start the controllers
+Library           Collections
+Library           ../../../libraries/RequestsLibrary.py
+Library           ../../../libraries/Common.py
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/SettingsLibrary.py
+Library           ../../../libraries/UtilLibrary.py
+Variables         ../../../variables/Variables.py
+
+*** Variables ***
+${REST_CONTEXT}    /restconf/config/
+
+*** Test Cases *** 
+Stop All Controllers
+    [Documentation]    Stop all the controllers in the cluster
+    Stopcontroller    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    30
+    KillController    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    KillController    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    KillController    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+
+
+Clean All Journals
+    [Documentation]    Clean the journals of all the controllers in the cluster
+    CleanJournal    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    CleanJournal    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    CleanJournal    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    5
+
+Start All Controllers
+    [Documentation]    Start all the controllers in the cluster
+    Startcontroller    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Startcontroller    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Startcontroller    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    120
\ No newline at end of file
similarity index 80%
rename from test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_01_execute_on_leader.txt
rename to test/csit/suites/clustering/datastore/010_crud_on_leader.txt
index 3bf86cba992ce4204c4101b71a2373f9cc553e6a..f09db2f15f92497893d70779b393cab241548f17 100644 (file)
@@ -1,14 +1,14 @@
 *** Settings ***
-Documentation     Test suite for testing Distributed Datastore main operations performed from leader
+Documentation     This test finds the leader for shards in a 3-Node cluster and executes CRUD operations on them
 
 Library           Collections
-Library           ../../../../libraries/RequestsLibrary.py
-Library           ../../../../libraries/Common.py
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/SettingsLibrary.py
-Library           ../../../../libraries/UtilLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
-Variables         ../../../../variables/Variables.py
+Library           ../../../libraries/RequestsLibrary.py
+Library           ../../../libraries/Common.py
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/SettingsLibrary.py
+Library           ../../../libraries/UtilLibrary.py
+Library           ../../../libraries/ClusterStateLibrary.py
+Variables         ../../../variables/Variables.py
 
 
 *** Variables ***
@@ -21,7 +21,7 @@ ${SHARD_CAR_PERSON_NAME}      shard-car-people-config
 *** Test Cases ***
 Add cars and get cars from Leader
     [Documentation]    Add 100 cars and get added cars from Leader
-    ${CURRENT_CAR_LEADER}   GetLeader   ${SHARD_CAR_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${LEADER}   ${FOLLOWER1}    ${FOLLOWER2}
+    ${CURRENT_CAR_LEADER}   GetLeader   ${SHARD_CAR_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${MEMBER1}   ${MEMBER2}    ${MEMBER3}
     Log    CURRENT_CAR_SHARD_LEADER ${CURRENT_CAR_LEADER}
     Set Suite Variable  ${CURRENT_CAR_LEADER}
        ${resp}         AddCar  ${CURRENT_CAR_LEADER}   ${PORT} ${100}
@@ -32,7 +32,7 @@ Add cars and get cars from Leader
 Add persons and get persons from Leader
     [Documentation]    Add 100 persons and get persons
     [Documentation]    Note: There should be one person added first to enable rpc
-    ${CURRENT_PEOPLE_LEADER}   GetLeader   ${SHARD_PEOPLE_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${LEADER}   ${FOLLOWER1}    ${FOLLOWER2}
+    ${CURRENT_PEOPLE_LEADER}   GetLeader   ${SHARD_PEOPLE_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${MEMBER1}   ${MEMBER2}    ${MEMBER3}
     Set Suite Variable  ${CURRENT_PEOPLE_LEADER}
        ${resp}         AddPerson       ${CURRENT_PEOPLE_LEADER}        ${PORT} ${0}
        ${resp}         AddPerson       ${CURRENT_PEOPLE_LEADER}        ${PORT} ${100}
@@ -43,7 +43,7 @@ Add persons and get persons from Leader
 Add car-person mapping and get car-person mapping from Leader
     [Documentation]    Add car-person and get car-person from Leader
     [Documentation]  Note: This is done to enable working of rpc
-    ${CURRENT_CAR_PERSON_LEADER}   GetLeader   ${SHARD_CAR_PERSON_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${LEADER}   ${FOLLOWER1}    ${FOLLOWER2}
+    ${CURRENT_CAR_PERSON_LEADER}   GetLeader   ${SHARD_CAR_PERSON_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${MEMBER1}   ${MEMBER2}  ${MEMBER3}
     Set Suite Variable  ${CURRENT_CAR_PERSON_LEADER}
        ${resp}         AddCarPerson    ${CURRENT_CAR_PERSON_LEADER}   ${PORT}     ${0}
        Sleep       2
@@ -53,7 +53,6 @@ Add car-person mapping and get car-person mapping from Leader
 
 Purchase 100 cars using Leader
     [Documentation]  Purchase 100 cars using Leader
-
        ${resp}         BuyCar  ${CURRENT_CAR_PERSON_LEADER}    ${PORT} ${100}
     Sleep       2
        ${resp}         GetCarPersonMappings    ${CURRENT_CAR_PERSON_LEADER}    ${PORT} ${0}
@@ -67,7 +66,7 @@ Get car-person mappings using Leader
 
 Get car-person mappings using Follower1
    [Documentation]     Get car-person mappings using Follower1 to see 100 entry
-   ${FOLLOWERS}   GetFollowers   ${SHARD_CAR_PERSON_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${LEADER}   ${FOLLOWER1}    ${FOLLOWER2}
+   ${FOLLOWERS}   GetFollowers   ${SHARD_CAR_PERSON_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${MEMBER1}   ${MEMBER2}    ${MEMBER3}
    Log                 ${FOLLOWERS}
    SET SUITE VARIABLE  ${FOLLOWERS}
        ${resp}         GetCarPersonMappings    ${FOLLOWERS[0]}     ${PORT}     ${0}
similarity index 81%
rename from test/csit/suites/clustering/datastore/basic/020_restconf_rpc_crud_test_02_execute_on_follower1.txt
rename to test/csit/suites/clustering/datastore/020_crud_on_any_follower.txt
index 8b54e98351f04d74a01a255927b10c6a1d200295..baaa24e44e7660119e42441ca79dd683be626eea 100644 (file)
@@ -1,14 +1,14 @@
 *** Settings ***
-Documentation     Test suite for testing Distributed Datastore main operations performed from follower1
+Documentation     This test finds the followers of certain shards in a 3-Node cluster and executes CRUD operations on any one follower
 
 Library           Collections
-Library           ../../../../libraries/RequestsLibrary.py
-Library           ../../../../libraries/Common.py
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/SettingsLibrary.py
-Library           ../../../../libraries/UtilLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
-Variables         ../../../../variables/Variables.py
+Library           ../../../libraries/RequestsLibrary.py
+Library           ../../../libraries/Common.py
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/SettingsLibrary.py
+Library           ../../../libraries/UtilLibrary.py
+Library           ../../../libraries/ClusterStateLibrary.py
+Variables         ../../../variables/Variables.py
 
 *** Variables ***
 ${REST_CONTEXT}    /restconf/config/
@@ -20,7 +20,7 @@ ${SHARD_CAR_PERSON_NAME}      shard-car-people-config
 *** Test Cases ***
 Add cars and get cars from Follower1
     [Documentation]    Add 100 cars and get added cars from Follower1
-    ${FOLLOWERS}   GetFollowers   ${SHARD_CAR_PERSON_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${LEADER}   ${FOLLOWER1}    ${FOLLOWER2}
+    ${FOLLOWERS}   GetFollowers   ${SHARD_CAR_PERSON_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${MEMBER1}   ${MEMBER2}    ${MEMBER3}
     Log                ${FOLLOWERS}
     SET SUITE VARIABLE  ${FOLLOWERS}
 
@@ -66,7 +66,7 @@ Get car-person mappings using Follower1
 
 Get car-person mappings using Leader
    [Documentation]     Get car-person mappings using Leader to see 100 entry
-    ${CURRENT_CAR_LEADER}   GetLeader   ${SHARD_CAR_PERSON_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${LEADER}   ${FOLLOWER1}    ${FOLLOWER2}
+    ${CURRENT_CAR_LEADER}   GetLeader   ${SHARD_CAR_PERSON_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${MEMBER1}   ${MEMBER2}    ${MEMBER3}
     Log     ${CURRENT_CAR_LEADER}
     Sleep   1
        ${resp}         GetCarPersonMappings    ${CURRENT_CAR_LEADER}   ${PORT} ${0}
similarity index 86%
rename from test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_05_execute_on_new_leader.txt
rename to test/csit/suites/clustering/datastore/030_failover_crud_on_new_leader.txt
index 08573e84205ddba7910f85425da38b3fa72c2cae..e40888d365c3597a836ccad6583c9628c26234c0 100644 (file)
@@ -1,8 +1,8 @@
 *** Settings ***
-Documentation     Run this test after running test no 03
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/UtilLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
+Documentation     This test brings down the current leader of the "car" shard and then executes CRUD operations on the new leader
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/UtilLibrary.py
+Library           ../../../libraries/ClusterStateLibrary.py
 
 *** Variables ***
 ${SHARD}    shard-car-config
@@ -10,10 +10,10 @@ ${SHARD}    shard-car-config
 *** Test Cases ***
 Switch Leader
     [Documentation]    stop leader and elect new leader
-  ${OLD_LEADER}    GetLeader  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
+  ${OLD_LEADER}    GetLeader  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${MEMBER1}  ${MEMBER2}  ${MEMBER3}
   Stopcontroller  ${OLD_LEADER}  ${USERNAME}  ${PASSWORD}  ${KARAFHOME}
   Sleep    30
-  ${NEW_LEADER}    GetLeader  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
+  ${NEW_LEADER}    GetLeader  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${MEMBER1}  ${MEMBER2}  ${MEMBER3}
   Log  ${NEW_LEADER}
   Set Suite Variable   ${NEW_LEADER}
 
similarity index 80%
rename from test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_06_execute_on_remaining_follower.txt
rename to test/csit/suites/clustering/datastore/040_failover_read_from_follower.txt
index 7b70f71357b35c6e48706b089b42106b0ed991b1..9cfd3588d911fd807317fe9211ed7654f7a16c86 100644 (file)
@@ -1,7 +1,7 @@
 *** Settings ***
-Documentation     Run this test after running test no 05
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
+Documentation     This test tries to read the data that was written by the previous test from any one follower
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/ClusterStateLibrary.py
 
 *** Variables ***
 ${SHARD}    shard-car-config
@@ -9,7 +9,7 @@ ${SHARD}    shard-car-config
 *** Test Cases ***
 Find follower
     [Documentation]    find follower
-  ${FOLLOWERS}    GetFollowers  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
+  ${FOLLOWERS}    GetFollowers  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${MEMBER1}  ${MEMBER2}  ${MEMBER3}
   Log  ${FOLLOWERS}
   ${LAST_FOLLOWER}  Set Variable  ${FOLLOWERS[0]}
   Set Suite Variable   ${LAST_FOLLOWER}
similarity index 89%
rename from test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_07_execute_on_remaining_follower.txt
rename to test/csit/suites/clustering/datastore/050_failover_crud_on_any_follower.txt
index 62be8a9987d6615350ad7babefd7c114d0e6d5cd..89bb931b70e018c4a23e95e3d8813c48ab763031 100644 (file)
@@ -1,7 +1,7 @@
 *** Settings ***
-Documentation     Run this test after running test no 06
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
+Documentation     This test executes CRUD operations on any one follower after the old leader has been brought down
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/ClusterStateLibrary.py
 
 *** Variables ***
 ${SHARD}    shard-car-config
@@ -9,7 +9,7 @@ ${SHARD}    shard-car-config
 *** Test Cases ***
 Find follower
     [Documentation]    find follower
-  ${FOLLOWERS}    GetFollowers  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
+  ${FOLLOWERS}    GetFollowers  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${MEMBER1}  ${MEMBER2}  ${MEMBER3}
   Log  ${FOLLOWERS}
   ${LAST_FOLLOWER}  Set Variable  ${FOLLOWERS[0]}
   Set Suite Variable   ${LAST_FOLLOWER}
similarity index 79%
rename from test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_08_execute_on_new_leader.txt
rename to test/csit/suites/clustering/datastore/060_failover_read_from_new_leader.txt
index 06496e9e10ae34c7e366aff214ea10549596cc2b..d673aefb59ca8e4af6b4b75269de9261286ac051 100644 (file)
@@ -1,7 +1,7 @@
 *** Settings ***
-Documentation     Run this test after running test no 07
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
+Documentation     This test reads the data from the leader that was written to the follower by the previous test
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/ClusterStateLibrary.py
 
 *** Variables ***
 ${SHARD}    shard-car-config
@@ -9,7 +9,7 @@ ${SHARD}    shard-car-config
 *** Test Cases ***
 Find Leader
     [Documentation]    find new leader
-  ${NEW_LEADER}    GetLeader  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
+  ${NEW_LEADER}    GetLeader  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${MEMBER1}  ${MEMBER2}  ${MEMBER3}
   Log  ${NEW_LEADER}
   Set Suite Variable   ${NEW_LEADER}
 
diff --git a/test/csit/suites/clustering/datastore/130_recovery_restart_leader.txt b/test/csit/suites/clustering/datastore/130_recovery_restart_leader.txt
new file mode 100644 (file)
index 0000000..567eb3f
--- /dev/null
@@ -0,0 +1,104 @@
+*** Settings ***
+Documentation     This test kills the leader and verifies that on restart the old leader is able to rejoin the cluster
+Library           Collections
+Library           ../../../libraries/RequestsLibrary.py
+Library           ../../../libraries/Common.py
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/SettingsLibrary.py
+Library           ../../../libraries/UtilLibrary.py
+Library           ../../../libraries/ClusterStateLibrary.py
+Variables         ../../../variables/Variables.py
+
+*** Variables ***
+${REST_CONTEXT}    /restconf/config/
+${KARAF_HOME}      /root/odl/dist
+${USER_NAME}       root
+${PASSWORD}        Ecp123
+${CAR_SHARD}      shard-car-config
+
+*** Test Cases *** 
+Stop All Controllers
+    [Documentation]    Stop all the controllers in the cluster
+    Stopcontroller    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    30
+    KillController    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    KillController    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    KillController    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+
+
+Clean All Journals
+    [Documentation]    Clean the journals of all the controllers in the cluster
+    CleanJournal    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    CleanJournal    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    CleanJournal    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    5
+
+Start All Controllers
+    [Documentation]    Start all the controllers in the cluster
+    Startcontroller    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Startcontroller    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Startcontroller    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    120
+
+Delete all cars
+    [Documentation]    Delete all the cars from the system
+       ${resp}         DeleteAllCars   ${MEMBER1}      ${PORT}         0
+       ${resp}         GetCars ${MEMBER1}      ${PORT}         0
+       Should Be Equal As Strings    ${resp.status_code}    404
+       
+
+Delete all people
+    [Documentation]    Delete all the people from the system
+       ${resp}         DeleteAllPersons        ${MEMBER1}      ${PORT}         0
+       ${resp}         GetPersons      ${MEMBER1}      ${PORT}    0    
+       Should Be Equal As Strings    ${resp.status_code}    404
+
+Add 200 cars
+    [Documentation]    Add 200 cars
+       ${resp}         AddCar  ${MEMBER1}      ${PORT}  ${200}
+       Should Be Equal As Strings    ${resp.status_code}    204
+
+Add 200 people
+    [Documentation]    Add 200 people
+       ${resp}         AddPerson       ${MEMBER1}      ${PORT} ${0}    
+       ${resp}         AddPerson       ${MEMBER1}      ${PORT}  ${200}
+       Should Be Equal As Strings    ${resp.status_code}    204
+
+Add Car Person mapping
+    [Documentation]    Add Car Persons
+       ${resp}         AddCarPerson    ${MEMBER1}      ${PORT} ${0}    
+       ${resp}         BuyCar  ${MEMBER1}      ${PORT} ${200}  
+
+Stop the Leader
+    ${CAR_LEADER}  GetLeader  ${CAR_SHARD}  ${3}  ${3}  ${1}  8181  ${MEMBER1}  ${MEMBER2}  ${MEMBER3}
+    Set Suite Variable    ${CAR_LEADER}
+    Stopcontroller    ${CAR_LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    30
+    KillController    ${CAR_LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+
+Get all the cars from Follower 1
+    ${followers}  GetFollowers  ${CAR_SHARD}  ${3}  ${3}  ${1}  8181  ${MEMBER1}  ${MEMBER2}  ${MEMBER3}
+       ${resp}         Getcars ${followers[0]} ${PORT} ${0}
+       Should Be Equal As Strings    ${resp.status_code}    200
+       Should Contain     ${resp.content}   manufacturer1
+
+Restart the Leader
+    Startcontroller   ${CAR_LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    120
+
+Get all the cars from Leader
+       ${resp}         Getcars ${CAR_LEADER}   ${PORT} ${0}
+       Should Be Equal As Strings    ${resp.status_code}    200
+       Should Contain     ${resp.content}   manufacturer1
+
+Cleanup All Controllers 
+    [Documentation]    Stop all the controllers in the cluster
+    Stopcontroller    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    30
+
+
+
diff --git a/test/csit/suites/clustering/datastore/140_recovery_restart_follower.txt b/test/csit/suites/clustering/datastore/140_recovery_restart_follower.txt
new file mode 100644 (file)
index 0000000..195ef3e
--- /dev/null
@@ -0,0 +1,105 @@
+*** Settings ***
+Documentation     This test kills any of the followers and verifies that when that follower is restarted it can join the cluster
+Library           Collections
+Library           ../../../libraries/RequestsLibrary.py
+Library           ../../../libraries/Common.py
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/SettingsLibrary.py
+Library           ../../../libraries/UtilLibrary.py
+Library           ../../../libraries/ClusterStateLibrary.py
+Variables         ../../../variables/Variables.py
+
+*** Variables ***
+${REST_CONTEXT}    /restconf/config/
+${KARAF_HOME}      /root/odl/dist
+${USER_NAME}       root
+${PASSWORD}        Ecp123
+${CAR_SHARD}      shard-car-config
+
+*** Test Cases *** 
+Stop All Controllers
+    [Documentation]    Stop all the controllers in the cluster
+    Stopcontroller    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    30
+    KillController    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    KillController    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    KillController    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+
+
+Clean All Journals
+    [Documentation]    Clean the journals of all the controllers in the cluster
+    CleanJournal    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    CleanJournal    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    CleanJournal    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    5
+
+Start All Controllers
+    [Documentation]    Start all the controllers in the cluster
+    Startcontroller    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Startcontroller    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Startcontroller    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    120
+
+Delete all cars
+    [Documentation]    Delete all the cars from the system
+       ${resp}         DeleteAllCars   ${MEMBER1}      ${PORT}         0
+       ${resp}         GetCars ${MEMBER1}      ${PORT}         0
+       Should Be Equal As Strings    ${resp.status_code}    404
+       
+
+Delete all people
+    [Documentation]    Delete all the people from the system
+       ${resp}         DeleteAllPersons        ${MEMBER1}      ${PORT}         0
+       ${resp}         GetPersons      ${MEMBER1}      ${PORT}    0    
+       Should Be Equal As Strings    ${resp.status_code}    404
+
+Add 200 cars
+    [Documentation]    Add 200 cars
+       ${resp}         AddCar  ${MEMBER1}      ${PORT}  ${200}
+       Should Be Equal As Strings    ${resp.status_code}    204
+
+Add 200 people
+    [Documentation]    Add 200 people
+       ${resp}         AddPerson       ${MEMBER1}      ${PORT} ${0}    
+       ${resp}         AddPerson       ${MEMBER1}      ${PORT}  ${200}
+       Should Be Equal As Strings    ${resp.status_code}    204
+
+Add Car Person mapping
+    [Documentation]    Add Car Persons
+       ${resp}         AddCarPerson    ${MEMBER1}      ${PORT} ${0}    
+       ${resp}         BuyCar  ${MEMBER1}      ${PORT} ${200}  
+
+Stop one of the followers
+    ${followers}  GetFollowers  ${CAR_SHARD}  ${3}  ${3}  ${1}  8181  ${MEMBER1}  ${MEMBER2}  ${MEMBER3}
+    ${CAR_FOLLOWER}    Set Variable    ${followers[0]}
+    Set Suite Variable    ${CAR_FOLLOWER}
+    Stopcontroller    ${CAR_FOLLOWER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    30
+    KillController    ${CAR_FOLLOWER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+
+Get all the cars from the other Follower
+    ${followers}  GetFollowers  ${CAR_SHARD}  ${3}  ${3}  ${1}  8181  ${MEMBER1}  ${MEMBER2}  ${MEMBER3}
+       ${resp}         Getcars ${followers[0]} ${PORT} ${0}
+       Should Be Equal As Strings    ${resp.status_code}    200
+       Should Contain     ${resp.content}   manufacturer1
+
+Restart the Stopped Follower
+    Startcontroller   ${CAR_FOLLOWER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    120
+
+Get all the cars from Stopped Follower
+       ${resp}         Getcars ${CAR_FOLLOWER} ${PORT} ${0}
+       Should Be Equal As Strings    ${resp.status_code}    200
+       Should Contain     ${resp.content}   manufacturer1
+
+Cleanup All Controllers 
+    [Documentation]    Stop all the controllers in the cluster
+    Stopcontroller    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    30
+
+
+
similarity index 51%
rename from test/csit/suites/clustering/datastore/basic/__init__.txt
rename to test/csit/suites/clustering/datastore/__init__.txt
index a6ced85fa2ccc6beab52f385ea797508edd9d623..cf422c9cf5fac4eef5bb6203c7bfe3c50818bac3 100644 (file)
@@ -1,5 +1,5 @@
 *** Settings ***
-Documentation     Test suite for MD-SAL NSF
+Documentation     Test suite for Clustering Datastore
 Library     SSHLibrary
 
 
diff --git a/test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_09_execute_on_last_node.txt b/test/csit/suites/clustering/datastore/basic/010_restconf_rpc_crud_test_09_execute_on_last_node.txt
deleted file mode 100644 (file)
index f309081..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-*** Settings ***
-Documentation     Run this test after running test no 05
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/UtilLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
-
-*** Variables ***
-${SHARD}    shard-car-config
-
-*** Test Cases ***
-Stop Leader
-    [Documentation]    find new leader
-  ${FOLLOWERS}    GetFollowers  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
-  Log  ${FOLLOWERS}
-  ${LAST_FOLLOWER}  Set Variable  ${FOLLOWERS[0]}
-  Set Suite Variable   ${LAST_FOLLOWER}
-  ${NEW_LEADER}    GetLeader  ${SHARD}  ${3}  ${3}  ${2}  ${8181}  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
-  Log  ${NEW_LEADER}
-  Stopcontroller  ${NEW_LEADER}  ${USERNAME}  ${PASSWORD}  ${KARAFHOME}
-  Sleep    30
-
-
-Get cars from last follower
-    [Documentation]    get cars from last follower
-       ${resp}         Getcars ${LAST_FOLLOWER}        ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    500
-
-Add cars and get cars from last follower
-    [Documentation]    Add 80 cars and get added cars from last follower
-       ${resp}         AddCar  ${LAST_FOLLOWER}        ${PORT} ${80}
-       Should Be Equal As Strings    ${resp.status_code}    500
diff --git a/test/csit/suites/clustering/datastore/basic/030_restconf_rpc_crud_test_03_execute_on_follower2.txt b/test/csit/suites/clustering/datastore/basic/030_restconf_rpc_crud_test_03_execute_on_follower2.txt
deleted file mode 100644 (file)
index 8bc6695..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-*** Settings ***
-Documentation     Test suite for testing Distributed Datastore main operations performed from follower2
-
-Library           Collections
-Library           ../../../../libraries/RequestsLibrary.py
-Library           ../../../../libraries/Common.py
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/SettingsLibrary.py
-Library           ../../../../libraries/UtilLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
-Variables         ../../../../variables/Variables.py
-
-*** Variables ***
-${REST_CONTEXT}    /restconf/config/
-${SHARD_CAR_NAME}      shard-car-config
-${SHARD_PEOPLE_NAME}      shard-people-config
-${SHARD_CAR_PERSON_NAME}      shard-car-people-config
-
-
-*** Test Cases ***
-Add cars and get cars from Follower2
-    [Documentation]    Add 100 cars and get added cars from Follower2
-    ${FOLLOWERS}   GetFollowers   ${SHARD_CAR_PERSON_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${LEADER}   ${FOLLOWER1}    ${FOLLOWER2}
-    Log                ${FOLLOWERS}
-    SET SUITE VARIABLE  ${FOLLOWERS}
-
-       ${resp}         AddCar  ${FOLLOWERS[1]} ${PORT} ${100}
-       Sleep   1
-       ${resp}         Getcars ${FOLLOWERS[1]} ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-       Should Contain     ${resp.content}   manufacturer1
-
-Add persons and get persons from Follower2
-    [Documentation]    Add 100 persons and get persons from Follower2
-    [Documentation]    Note: There should be one person added first to enable rpc
-       ${resp}         AddPerson       ${FOLLOWERS[1]} ${PORT} ${0}
-       ${resp}         AddPerson       ${FOLLOWERS[1]} ${PORT} ${100}
-       Sleep   1
-       ${resp}         GetPersons      ${FOLLOWERS[1]} ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-       Should Contain     ${resp.content}   user5
-
-Add car-person mapping and get car-person mapping from Follower2
-    [Documentation]    Add car-person and get car-person from Follower2
-    [Documentation]  Note: This is done to enable working of rpc
-
-       ${resp}         AddCarPerson    ${FOLLOWERS[1]} ${PORT} ${0}
-       ${resp}         GetCarPersonMappings    ${FOLLOWERS[1]} ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-       Should Contain     ${resp.content}   user0
-
-Purchase 100 cars using Follower1
-    [Documentation]  Purchase 100 cars using Follower2
-
-       ${resp}         BuyCar  ${FOLLOWERS[1]} ${PORT} ${100}
-       Sleep   1
-       ${resp}         GetCarPersonMappings    ${FOLLOWERS[1]} ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-
-Get car-person mappings using Follower2
-   [Documentation]     Get car-person mappings using follower2 to see 100 entry
-       ${resp}         GetCarPersonMappings    ${FOLLOWERS[1]} ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-       Should Contain     ${resp.content}   user100
-       Should Contain     ${resp.content}   user5
-
-Get car-person mappings using Leader
-   [Documentation]     Get car-person mappings using Leader to see 100 entry
-    ${CURRENT_CAR_LEADER}   GetLeader   ${SHARD_CAR_PERSON_NAME}   ${3}    ${3}    ${1}    ${PORT}     ${LEADER}   ${FOLLOWER1}    ${FOLLOWER2}
-    Log     ${CURRENT_CAR_LEADER}
-    Sleep   1
-       ${resp}         GetCarPersonMappings    ${CURRENT_CAR_LEADER}   ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-       Should Contain     ${resp.content}   user100
-
-Get car-person mappings using Follower1
-   [Documentation]     Get car-person mappings using Follower1 to see 100 entry
-       ${resp}         GetCarPersonMappings    ${FOLLOWERS[0]} ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-       Should Contain     ${resp.content}   user0
-       Should Contain     ${resp.content}   user100
diff --git a/test/csit/suites/clustering/datastore/basic/130_restconf_disaster_recovery_restart_leader.txt b/test/csit/suites/clustering/datastore/basic/130_restconf_disaster_recovery_restart_leader.txt
deleted file mode 100644 (file)
index 925daba..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-*** Settings ***
-Documentation     Test suite for RESTCONF RPC CAR PERSON 
-Library           Collections
-Library           ../../../../libraries/RequestsLibrary.py
-Library           ../../../../libraries/Common.py
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/SettingsLibrary.py
-Library           ../../../../libraries/UtilLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
-Variables         ../../../../variables/Variables.py
-
-*** Variables ***
-${REST_CONTEXT}    /restconf/config/
-${KARAF_HOME}      /root/odl/dist
-${USER_NAME}       root
-${PASSWORD}        Ecp123
-${CAR_SHARD}      shard-car-config
-
-*** Test Cases *** 
-Stop All Controllers
-    [Documentation]    Stop all the controllers in the cluster
-    Stopcontroller    ${LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Stopcontroller    ${FOLLOWER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Stopcontroller    ${FOLLOWER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    30
-    KillController    ${LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    KillController    ${FOLLOWER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    KillController    ${FOLLOWER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-
-
-Clean All Journals
-    [Documentation]    Clean the journals of all the controllers in the cluster
-    CleanJournal    ${LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    CleanJournal    ${FOLLOWER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    CleanJournal    ${FOLLOWER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    5
-
-Start All Controllers
-    [Documentation]    Start all the controllers in the cluster
-    Startcontroller    ${LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Startcontroller    ${FOLLOWER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Startcontroller    ${FOLLOWER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    120
-
-Delete all cars
-    [Documentation]    Delete all the cars from the system
-       ${resp}         DeleteAllCars   ${LEADER}       ${PORT}         0
-       ${resp}         GetCars ${LEADER}       ${PORT}         0
-       Should Be Equal As Strings    ${resp.status_code}    404
-       
-
-Delete all people
-    [Documentation]    Delete all the people from the system
-       ${resp}         DeleteAllPersons        ${LEADER}       ${PORT}         0
-       ${resp}         GetPersons      ${LEADER}       ${PORT}    0    
-       Should Be Equal As Strings    ${resp.status_code}    404
-
-Add 20000 cars
-    [Documentation]    Add 200 cars
-       ${resp}         AddCar  ${LEADER}       ${PORT}  ${200}
-       Should Be Equal As Strings    ${resp.status_code}    204
-
-Add 20000 people
-    [Documentation]    Add 200 people
-       ${resp}         AddPerson       ${LEADER}       ${PORT} ${0}    
-       ${resp}         AddPerson       ${LEADER}       ${PORT}  ${200}
-       Should Be Equal As Strings    ${resp.status_code}    204
-
-Add Car Person mapping
-    [Documentation]    Add Car Persons
-       ${resp}         AddCarPerson    ${LEADER}       ${PORT} ${0}    
-       ${resp}         BuyCar  ${LEADER}       ${PORT} ${200}  
-
-Stop the Leader
-    ${CAR_LEADER}  GetLeader  ${CAR_SHARD}  ${3}  ${3}  ${1}  8181  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
-    Set Suite Variable    ${CAR_LEADER}
-    Stopcontroller    ${CAR_LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    30
-    KillController    ${CAR_LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-
-Get all the cars from Follower 1
-    ${followers}  GetFollowers  ${CAR_SHARD}  ${3}  ${3}  ${1}  8181  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
-       ${resp}         Getcars ${followers[0]} ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-       Should Contain     ${resp.content}   manufacturer1
-
-Restart the Leader
-    Startcontroller   ${CAR_LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    120
-
-Get all the cars from Leader
-       ${resp}         Getcars ${CAR_LEADER}   ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-       Should Contain     ${resp.content}   manufacturer1
-
-Cleanup All Controllers 
-    [Documentation]    Stop all the controllers in the cluster
-    Stopcontroller    ${LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Stopcontroller    ${FOLLOWER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Stopcontroller    ${FOLLOWER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    30
-
-
-
diff --git a/test/csit/suites/clustering/datastore/basic/140_restconf_disaster_recovery_restart_follower.txt b/test/csit/suites/clustering/datastore/basic/140_restconf_disaster_recovery_restart_follower.txt
deleted file mode 100644 (file)
index 87793c5..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-*** Settings ***
-Documentation     Test suite for RESTCONF RPC CAR PERSON 
-Library           Collections
-Library           ../../../../libraries/RequestsLibrary.py
-Library           ../../../../libraries/Common.py
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/SettingsLibrary.py
-Library           ../../../../libraries/UtilLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
-Variables         ../../../../variables/Variables.py
-
-*** Variables ***
-${REST_CONTEXT}    /restconf/config/
-${KARAF_HOME}      /root/odl/dist
-${USER_NAME}       root
-${PASSWORD}        Ecp123
-${CAR_SHARD}      shard-car-config
-
-*** Test Cases *** 
-Stop All Controllers
-    [Documentation]    Stop all the controllers in the cluster
-    Stopcontroller    ${LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Stopcontroller    ${FOLLOWER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Stopcontroller    ${FOLLOWER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    30
-    KillController    ${LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    KillController    ${FOLLOWER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    KillController    ${FOLLOWER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-
-
-Clean All Journals
-    [Documentation]    Clean the journals of all the controllers in the cluster
-    CleanJournal    ${LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    CleanJournal    ${FOLLOWER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    CleanJournal    ${FOLLOWER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    5
-
-Start All Controllers
-    [Documentation]    Start all the controllers in the cluster
-    Startcontroller    ${LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Startcontroller    ${FOLLOWER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Startcontroller    ${FOLLOWER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    120
-
-Delete all cars
-    [Documentation]    Delete all the cars from the system
-       ${resp}         DeleteAllCars   ${LEADER}       ${PORT}         0
-       ${resp}         GetCars ${LEADER}       ${PORT}         0
-       Should Be Equal As Strings    ${resp.status_code}    404
-       
-
-Delete all people
-    [Documentation]    Delete all the people from the system
-       ${resp}         DeleteAllPersons        ${LEADER}       ${PORT}         0
-       ${resp}         GetPersons      ${LEADER}       ${PORT}    0    
-       Should Be Equal As Strings    ${resp.status_code}    404
-
-Add 20000 cars
-    [Documentation]    Add 200 cars
-       ${resp}         AddCar  ${LEADER}       ${PORT}  ${200}
-       Should Be Equal As Strings    ${resp.status_code}    204
-
-Add 20000 people
-    [Documentation]    Add 200 people
-       ${resp}         AddPerson       ${LEADER}       ${PORT} ${0}    
-       ${resp}         AddPerson       ${LEADER}       ${PORT}  ${200}
-       Should Be Equal As Strings    ${resp.status_code}    204
-
-Add Car Person mapping
-    [Documentation]    Add Car Persons
-       ${resp}         AddCarPerson    ${LEADER}       ${PORT} ${0}    
-       ${resp}         BuyCar  ${LEADER}       ${PORT} ${200}  
-
-Stop of of the followers
-    ${followers}  GetFollowers  ${CAR_SHARD}  ${3}  ${3}  ${1}  8181  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
-    ${CAR_FOLLOWER}    Set Variable    ${followers[0]}
-    Set Suite Variable    ${CAR_FOLLOWER}
-    Stopcontroller    ${CAR_FOLLOWER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    30
-    KillController    ${CAR_FOLLOWER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-
-Get all the cars from the other Follower
-    ${followers}  GetFollowers  ${CAR_SHARD}  ${3}  ${3}  ${1}  8181  ${LEADER}  ${FOLLOWER1}  ${FOLLOWER2}
-       ${resp}         Getcars ${followers[0]} ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-       Should Contain     ${resp.content}   manufacturer1
-
-Restart the Stopped Follower
-    Startcontroller   ${CAR_FOLLOWER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    120
-
-Get all the cars from Stopped Follower
-       ${resp}         Getcars ${CAR_FOLLOWER} ${PORT} ${0}
-       Should Be Equal As Strings    ${resp.status_code}    200
-       Should Contain     ${resp.content}   manufacturer1
-
-Cleanup All Controllers 
-    [Documentation]    Stop all the controllers in the cluster
-    Stopcontroller    ${LEADER}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Stopcontroller    ${FOLLOWER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Stopcontroller    ${FOLLOWER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
-    Sleep    30
-
-
-
diff --git a/test/csit/suites/clustering/routedrpc/001_start_cluster.txt b/test/csit/suites/clustering/routedrpc/001_start_cluster.txt
new file mode 100644 (file)
index 0000000..da9264a
--- /dev/null
@@ -0,0 +1,38 @@
+*** Settings ***
+Documentation     Start the controllers
+Library           Collections
+Library           ../../../libraries/RequestsLibrary.py
+Library           ../../../libraries/Common.py
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/SettingsLibrary.py
+Library           ../../../libraries/UtilLibrary.py
+Variables         ../../../variables/Variables.py
+
+*** Variables ***
+${REST_CONTEXT}    /restconf/config/
+
+*** Test Cases *** 
+Stop All Controllers
+    [Documentation]    Stop all the controllers in the cluster
+    Stopcontroller    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Stopcontroller    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    30
+    KillController    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    KillController    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    KillController    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+
+
+Clean All Journals
+    [Documentation]    Clean the journals of all the controllers in the cluster
+    CleanJournal    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    CleanJournal    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    CleanJournal    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    5
+
+Start All Controllers
+    [Documentation]    Start all the controllers in the cluster
+    Startcontroller    ${MEMBER1}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Startcontroller    ${MEMBER2}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Startcontroller    ${MEMBER3}    ${USER_NAME}    ${PASSWORD}    ${KARAF_HOME}
+    Sleep    120
\ No newline at end of file
similarity index 65%
rename from test/csit/suites/clustering/datastore/routedrpc/023_routed_rpc_crud_test.txt
rename to test/csit/suites/clustering/routedrpc/023_routed_rpc_crud_test.txt
index 1e1781dce8ed0520ffa31ba6f9213728ed4c4a06..2ee99f23063dce66cc068ce099141c8c88af0698 100644 (file)
@@ -1,12 +1,12 @@
 *** Settings ***
 Documentation     Test suite for Routed RPC. 
 Library           Collections
-Library           ../../../../libraries/RequestsLibrary.py
-Library           ../../../../libraries/Common.py
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/SettingsLibrary.py
-Library           ../../../../libraries/UtilLibrary.py
-Variables         ../../../../variables/Variables.py
+Library           ../../../libraries/RequestsLibrary.py
+Library           ../../../libraries/Common.py
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/SettingsLibrary.py
+Library           ../../../libraries/UtilLibrary.py
+Variables         ../../../variables/Variables.py
 
 *** Variables ***
 ${REST_CONTEXT}    /restconf/config/
@@ -15,17 +15,17 @@ ${REST_CONTEXT}    /restconf/config/
 *** Test Cases *** 
 Add cars and get cars from Leader 
     [Documentation]    Add 100 cars and get added cars from Leader
-       ${resp}         AddCar  ${LEADER}       ${PORT} ${100}  
-       ${resp}         Getcars ${LEADER}       ${PORT} ${0}
+       ${resp}         AddCar  ${MEMBER1}      ${PORT} ${100}  
+       ${resp}         Getcars ${MEMBER1}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain     ${resp.content}   manufacturer1      
                
 Add persons and get persons from Leader 
     [Documentation]    Add 100 persons and get persons
     [Documentation]    Note: There should be one person added first to enable rpc
-       ${resp}         AddPerson       ${LEADER}       ${PORT} ${0}    
-       ${resp}         AddPerson       ${LEADER}       ${PORT} ${100}  
-       ${resp}         GetPersons      ${LEADER}       ${PORT} ${0}
+       ${resp}         AddPerson       ${MEMBER1}      ${PORT} ${0}    
+       ${resp}         AddPerson       ${MEMBER1}      ${PORT} ${100}  
+       ${resp}         GetPersons      ${MEMBER1}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain     ${resp.content}   user5
        SLEEP   10      
@@ -33,32 +33,32 @@ Add persons and get persons from Leader
 Add car-person mapping and get car-person mapping from Follower1
     [Documentation]    Add car-person and get car-person from Leader
     [Documentation]    Note: This is done to enable working of rpc
-        ${resp}                AddCarPerson    ${FOLLOWER1}    ${PORT} ${0}
-        ${resp}                GetCarPersonMappings    ${FOLLOWER1}    ${PORT} ${0}
+        ${resp}                AddCarPerson    ${MEMBER2}      ${PORT} ${0}
+        ${resp}                GetCarPersonMappings    ${MEMBER2}      ${PORT} ${0}
        Should Be Equal As Strings      ${resp.status_code}     200
         Should Contain ${resp.content} user0
        SLEEP   5
 
 Purchase 100 cars using Follower1 
     [Documentation]  Purchase 100 cars using Follower1
-       ${resp}         BuyCar  ${FOLLOWER1}    ${PORT} ${100}
-       ${resp}         GetCarPersonMappings    ${FOLLOWER1}    ${PORT} ${0}
+       ${resp}         BuyCar  ${MEMBER2}      ${PORT} ${100}
+       ${resp}         GetCarPersonMappings    ${MEMBER2}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
 
 Get Cars from Leader
     [Documentation]    Get 100 using Leader
-       ${resp}         Getcars ${LEADER}       ${PORT} ${0}
+       ${resp}         Getcars ${MEMBER1}      ${PORT} ${0}
         Should Be Equal As Strings    ${resp.status_code}    200
         Should Contain     ${resp.content}   manufacturer99
 
 Get persons from Leader
     [Documentation]    Get 101 Persons from Leader
-       ${resp}         GetPersons      ${LEADER}       ${PORT} ${0}
+       ${resp}         GetPersons      ${MEMBER1}      ${PORT} ${0}
         Should Be Equal As Strings    ${resp.status_code}    200
         Should Contain     ${resp.content}   user100
 
 Get car-person mappings using Leader
    [Documentation]     Get 101 car-person mappings using Leader to see 100 entry
-       ${resp}         GetCarPersonMappings    ${LEADER}       ${PORT} ${0}
+       ${resp}         GetCarPersonMappings    ${MEMBER1}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain     ${resp.content}   user100
similarity index 64%
rename from test/csit/suites/clustering/datastore/routedrpc/024_routed_rpc_crud_test.txt
rename to test/csit/suites/clustering/routedrpc/024_routed_rpc_crud_test.txt
index 82d4acc694a460e5c596950445f947ab9e183919..e29c3f5bf19a1f61ed437d5a884f0c10c1fa6026 100644 (file)
@@ -1,13 +1,13 @@
 *** Settings ***
 Documentation     Test suite for Routed RPC. 
 Library           Collections
-Library           ../../../../libraries/RequestsLibrary.py
-Library           ../../../../libraries/Common.py
-Library           ../../../../libraries/CrudLibrary.py
-Library           ../../../../libraries/SettingsLibrary.py
-Library           ../../../../libraries/UtilLibrary.py
-Library           ../../../../libraries/ClusterStateLibrary.py
-Variables         ../../../../variables/Variables.py
+Library           ../../../libraries/RequestsLibrary.py
+Library           ../../../libraries/Common.py
+Library           ../../../libraries/CrudLibrary.py
+Library           ../../../libraries/SettingsLibrary.py
+Library           ../../../libraries/UtilLibrary.py
+Library           ../../../libraries/ClusterStateLibrary.py
+Variables         ../../../variables/Variables.py
 
 *** Variables ***
 ${REST_CONTEXT}    /restconf/config/
@@ -15,104 +15,104 @@ ${REST_CONTEXT}    /restconf/config/
 *** Test Cases *** 
 Add cars and get cars from Leader 
     [Documentation]    Add 100 cars and get added cars from Leader
-       ${resp}         AddCar  ${LEADER}       ${PORT} ${100}  
-       ${resp}         Getcars ${LEADER}       ${PORT} ${0}
+       ${resp}         AddCar  ${MEMBER1}      ${PORT} ${100}  
+       ${resp}         Getcars ${MEMBER1}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain     ${resp.content}   manufacturer1      
        
 Add persons and get persons from Leader 
     [Documentation]    Add 100 persons and get persons
     [Documentation]    Note: There should be one person added first to enable rpc
-       ${resp}         AddPerson       ${LEADER}       ${PORT} ${0}    
-       ${resp}         AddPerson       ${LEADER}       ${PORT} ${100}  
-       ${resp}         GetPersons      ${LEADER}       ${PORT} ${0}
+       ${resp}         AddPerson       ${MEMBER1}      ${PORT} ${0}    
+       ${resp}         AddPerson       ${MEMBER1}      ${PORT} ${100}  
+       ${resp}         GetPersons      ${MEMBER1}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain     ${resp.content}   user5      
 
 Add car-person mapping and get car-person mapping from Follower1
     [Documentation]     Add car-person and get car-person from Follower1
     [Documentation]  Note: This is done to enable working of rpc
-       ${resp}         AddCarPerson    ${FOLLOWER1}    ${PORT} ${0}
-       ${resp}         GetCarPersonMappings    ${FOLLOWER1}    ${PORT} ${0}
+       ${resp}         AddCarPerson    ${MEMBER2}      ${PORT} ${0}
+       ${resp}         GetCarPersonMappings    ${MEMBER2}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain  ${resp.content} user0
 
 Purchase 100 cars using Follower 
     [Documentation]  Purchase 100 cars using Follower
        SLEEP   10
-       ${resp}         BuyCar  ${FOLLOWER1}    ${PORT} ${100}  
-       ${resp}         GetCarPersonMappings    ${FOLLOWER1}    ${PORT} ${0}
+       ${resp}         BuyCar  ${MEMBER2}      ${PORT} ${100}  
+       ${resp}         GetCarPersonMappings    ${MEMBER2}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
 
 Get Cars from Leader
     [Documentation]    Get 100 using Leader
-       ${resp}         Getcars ${LEADER}       ${PORT} ${0}
+       ${resp}         Getcars ${MEMBER1}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain  ${resp.content}         manufacturer9
 
 Get persons from Leader
     [Documentation]    Get 11 Persons from Leader
-       ${resp}         GetPersons      ${LEADER}       ${PORT} ${0}
+       ${resp}         GetPersons      ${MEMBER1}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain  ${resp.content}         user100
 
 Get car-person mappings using Leader
    [Documentation]     Get car-person mappings using Leader to see 100 entry
-       ${resp}         GetCarPersonMappings    ${LEADER}       ${PORT} ${0}
+       ${resp}         GetCarPersonMappings    ${MEMBER1}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain  ${resp.content}         user100
 
 Stop Leader
    [Documentation]     Stop Leader controller
-       ${resp}         Stopcontroller  ${LEADER}       root    Ecp123  /opt/clustering/dist
+       ${resp}         Stopcontroller  ${MEMBER1}      ${USERNAME}     ${PASSWORD}     ${KARAF_HOME}
        SLEEP   30
-       ${resp}         Killcontroller  ${LEADER}       root    Ecp123  /opt/clustering/dist
+       ${resp}         Killcontroller  ${MEMBER1}      ${USERNAME}     ${PASSWORD}     ${KARAF_HOME}
 
        
 Add cars and get cars from Follower1 
     [Documentation]    Add 100 cars and get added cars from Follower
-       ${resp}         AddCar  ${FOLLOWER1}    ${PORT} ${100}  
-       ${resp}         Getcars ${FOLLOWER1}    ${PORT} ${0}
+       ${resp}         AddCar  ${MEMBER2}      ${PORT} ${100}  
+       ${resp}         Getcars ${MEMBER2}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain  ${resp.content}         manufacturer1
 
 Add persons and get persons from Follower1
     [Documentation]    Add 100 persons and get persons
     [Documentation]    Note: There should be one person added first to enable rpc
-       ${resp}         AddPerson       ${FOLLOWER1}    ${PORT} ${0}    
-       ${resp}         AddPerson       ${FOLLOWER1}    ${PORT} ${100}  
-       ${resp}         GetPersons      ${FOLLOWER1}    ${PORT} ${0}
+       ${resp}         AddPerson       ${MEMBER2}      ${PORT} ${0}    
+       ${resp}         AddPerson       ${MEMBER2}      ${PORT} ${100}  
+       ${resp}         GetPersons      ${MEMBER2}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain  ${resp.content}         user5
        SLEEP   10
        
 Purchase 100 cars using Follower2 
     [Documentation]  Purchase 100 cars using Follower2
-       ${resp}         BuyCar  ${FOLLOWER2}    ${PORT} ${100}
+       ${resp}         BuyCar  ${MEMBER3}      ${PORT} ${100}
        SLEEP   10
-       ${resp}         GetCarPersonMappings    ${FOLLOWER2}    ${PORT} ${0}
+       ${resp}         GetCarPersonMappings    ${MEMBER3}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
 
 Get Cars from Follower1
     [Documentation]    Get 100 using Follower1
-       ${resp}         Getcars ${FOLLOWER1}    ${PORT} ${0}
+       ${resp}         Getcars ${MEMBER2}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain  ${resp.content}         manufacturer9
 
 Get persons from Follower1
     [Documentation]    Get 11 Persons from Follower1
-       ${resp}         GetPersons      ${FOLLOWER1}    ${PORT} ${0}
+       ${resp}         GetPersons      ${MEMBER2}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain  ${resp.content}         user100
 
 Get car-person mappings using Follower1
    [Documentation]     Get car-person mappings using Follower1 to see 100 entry
-       ${resp}         GetCarPersonMappings    ${FOLLOWER1}    ${PORT} ${0}
+       ${resp}         GetCarPersonMappings    ${MEMBER2}      ${PORT} ${0}
        Should Be Equal As Strings    ${resp.status_code}    200
        Should Contain  ${resp.content}         user100
 
 Start Leader
    [Documentation]     Start Leader controller 
-       ${resp}         Startcontroller ${LEADER}       root    Ecp123  /opt/clustering/dist
+       ${resp}         Startcontroller ${MEMBER1}      ${USERNAME}     ${PASSWORD}     ${KARAF_HOME}
        SLEEP   20
 
index fd168243d01a4f82c360d5bcfc32188f7ca9e4a6..dd6b5237a7eb769ce7764853de8fb2c3f59c0dce 100644 (file)
@@ -11,9 +11,9 @@ Variables         ../../../variables/Variables.py
 
 *** Variables ***
 ${FILE}                 ${CURDIR}/../../../variables/xmls/netconf.xml
-${REST_CONT_CONF}        /restconf/config/opendaylight-inventory:nodes
+${REST_CONT_CONF}       /restconf/config/opendaylight-inventory:nodes
 ${REST_CONT_OPER}       /restconf/operational/opendaylight-inventory:nodes 
-${REST_NTPR_CONF}        node/controller-config/yang-ext:mount/config:modules/config:module/netopeer
+${REST_NTPR_CONF}       node/controller-config/yang-ext:mount/config:modules
 ${REST_NTPR_MOUNT}      node/netopeer/yang-ext:mount/
 
 *** Test Cases ***
@@ -21,27 +21,25 @@ Add NetConf device
     [Documentation]    Add NetConf device using REST
     [Tags]     netconf
     ${XML1}    Get File    ${FILE}
-    ${XML2}    Replace String    127.0.0.1    ${MININET}     ${XML1}
-    ${body}    Replace String    mininet      ${MININET_USER}    ${XML2}
+    ${XML2}    Replace String    ${XML1}    127.0.0.1    ${MININET}
+    ${body}    Replace String    ${XML2}    mininet      ${MININET_USER}
     Log    ${body}
-    ${resp}    Putxml    session    ${REST_CONT_CONF}/${REST_NTPR_CONF}    data=${body}
+    ${resp}    Post    session    ${REST_CONT_CONF}/${REST_NTPR_CONF}    data=${body}
     Log    ${resp.content}
-    Should Be Equal As Strings    ${resp.status_code}    200
+    Should Be Equal As Strings    ${resp.status_code}    204
 
 Get Controller Inventory
     [Documentation]    Get Controller operational inventory
     [Tags]    netconf
-    ${resp}   Get    session    ${REST_CONT_OPER}
-    Log    ${resp.content}
-    Should Be Equal As Strings    ${resp.status_code}    200
-    Should Contain    ${resp.content}    "id":"netopeer"
-    Should Contain    ${resp.content}    "netconf-node-inventory:connected":true
-    Should Contain    ${resp.content}    "netconf-node-inventory:initial-capability"
+    Wait Until Keyword Succeeds    10s    2s    Get Inventory 
 
 Pull External Device configuration
     [Documentation]    Pull Netopeer configuration
     [Tags]    netconf
-    Wait Until Keyword Succeeds    10s    2s    Pull Config
+    ${resp}   Get    session    ${REST_CONT_CONF}/${REST_NTPR_MOUNT}
+    Log    ${resp.content}
+    Should Be Equal As Strings    ${resp.status_code}    200
+    Should Contain    ${resp.content}    {}
 
 Verify Device Operational data
     [Documentation]    Verify Netopeer operational data
@@ -54,10 +52,11 @@ Verify Device Operational data
     Should Contain    ${resp.content}    datastores
 
 *** Keywords ***
-Pull Config
-    ${resp}   Get    session    ${REST_CONT_CONF}/${REST_NTPR_MOUNT}
+Get Inventory
+    ${resp}   Get    session    ${REST_CONT_OPER}
     Log    ${resp.content}
     Should Be Equal As Strings    ${resp.status_code}    200
-    Should Contain    ${resp.content}    {}
-
+    Should Contain    ${resp.content}    "id":"netopeer"
+    Should Contain    ${resp.content}    "netconf-node-inventory:connected":true
+    Should Contain    ${resp.content}    "netconf-node-inventory:initial-capability"
 
index 94cf7514af0a41c1076a090a72649cdba5cd0d3f..dadfed0023c01213200c6be9a06098f31747952c 100644 (file)
@@ -17,12 +17,9 @@ Resource          ../../../libraries/AAAKeywords.txt
 *** Test Cases ***
 Get Token With Valid Username And Password
     [Documentation]    Sanity test to ensure default user/password can get a token
-    ${auth_data}=    Create Auth Data    ${USER}    ${PWD}
-    ${resp}=    AAA Login    ${CONTROLLER}    ${auth_data}
-    ${auth_token}=    Extract Value From Content    ${resp.content}    /access_token    strip
+    ${auth_token}=    Get Auth Token
     Should Be String    ${auth_token}
     Log    Token: ${auth_token}
-    Should Be Equal As Strings    ${resp.status_code}    201
     Validate Token Format    ${auth_token}
 
 Fail To Get Token With Invalid Username And Password
@@ -37,48 +34,47 @@ Fail To Get Token With Invalid Username And Password
 
 Create Token with Client Authorization
     [Documentation]    Get a token using client domain
-    ${auth_data}=    Create Auth Data    ${USER}    ${PWD}    ${SCOPE}    dlux    secrete
-    ${resp}=    AAA Login    ${CONTROLLER}    ${auth_data}
-    ${auth_token}=    Extract Value From Content    ${resp.content}    /access_token    strip
+    ${auth_token}=    Get Auth Token    ${USER}    ${PWD}    ${SCOPE}    dlux    secrete
     Should Be String    ${auth_token}
     Log    Token: ${auth_token}
-    Should Be Equal As Strings    ${resp.status_code}    201
     Validate Token Format    ${auth_token}
 
 Token Authentication In REST Request
     [Documentation]    Use a token to make a successful REST transaction
-    ${auth_data}=    Create Auth Data    ${USER}    ${PWD}
-    ${resp}=    AAA Login    ${CONTROLLER}    ${auth_data}
-    ${auth_token}=    Extract Value From Content    ${resp.content}    /access_token    strip
-    Create Session    ODL_SESSION    http://${CONTROLLER}:8181
-    ${headers}=    Create Dictionary    Content-Type    application/x-www-form-urlencoded
-    Set To Dictionary    ${headers}    Authorization    Bearer ${auth_token}
-    ${resp}=    RequestsLibrary.GET    ODL_SESSION    ${OPERATIONAL_NODES_API}    headers=${headers}
-    Log    STATUS_CODE: ${resp.status_code} CONTENT: ${resp.content}
-    Should Be Equal As Strings    ${resp.status_code}    200
-    Should Contain    ${resp.content}    nodes
+    ${auth_token}=    Get Auth Token
+    Make REST Transaction    200    ${auth_token}
 
-Revoke Token
+Revoke Token And Verify Transaction Fails
     [Documentation]    negative test to revoke valid token and check that REST transaction fails
-    ${auth_data}=    Create Auth Data    ${USER}    ${PWD}
-    ${resp}=    AAA Login    ${CONTROLLER}    ${auth_data}
-    ${auth_token}=    Extract Value From Content    ${resp.content}    /access_token    strip
+    ${auth_token}=    Get Auth Token
+    Make REST Transaction    200    ${auth_token}
+    Revoke Auth Token    ${auth_token}
+    Make REST Transaction    401    ${auth_token}
+
+Disable Authentication And Re-Enable Authentication
+    [Documentation]    Toggles authentication off and verifies that no login credentials are needed for REST transactions
+    Disable Authentication On Controller    ${CONTROLLER}
+    Wait Until Keyword Succeeds    10s    1s    Make REST Transaction    200
+    Enable Authentication On Controller    ${CONTROLLER}
+    Wait Until Keyword Succeeds    10s    1s    Validate That Authentication Fails With Wrong Token
+    ${auth_token}=    Get Auth Token
+    Make REST Transaction    200    ${auth_token}
+
+*** Keywords ***
+Validate That Authentication Fails With Wrong Token
+    ${bad_token}=    Set Variable    notARealToken
+    Make REST Transaction    401    ${bad_token}
+
+Make REST Transaction
+    [Arguments]    ${expected_status_code}    ${auth_data}=${EMPTY}
     Create Session    ODL_SESSION    http://${CONTROLLER}:8181
     ${headers}=    Create Dictionary    Content-Type    application/x-www-form-urlencoded
-    Set To Dictionary    ${headers}    Authorization    Bearer ${auth_token}
+    Run Keyword If    "${auth_data}" != "${EMPTY}"    Set To Dictionary    ${headers}    Authorization    Bearer ${auth_data}
     ${resp}=    RequestsLibrary.GET    ODL_SESSION    ${OPERATIONAL_NODES_API}    headers=${headers}
     Log    STATUS_CODE: ${resp.status_code} CONTENT: ${resp.content}
-    Should Be Equal As Strings    ${resp.status_code}    200
+    Should Be Equal As Strings    ${resp.status_code}    ${expected_status_code}
     Should Contain    ${resp.content}    nodes
-    ${headers}=    Create Dictionary    Content-Type    application/x-www-form-urlencoded
-    ${resp}=    RequestsLibrary.POST    ODL_SESSION    ${REVOKE_TOKEN_API}    data=${auth_token}    headers=${headers}
-    Should Be Equal As Strings    ${resp.status_code}    204
-    Set To Dictionary    ${headers}    Authorization    Bearer ${auth_token}
-    ${resp}=    RequestsLibrary.GET    ODL_SESSION    ${OPERATIONAL_NODES_API}    headers=${headers}
-    Log    STATUS_CODE: ${resp.status_code} CONTENT: ${resp.content}
-    Should Be Equal As Strings    ${resp.status_code}    401
 
-*** Keywords ***
 Credential Authentication Suite Setup
     Log    Suite Setup
 
index 4941ef03c44560aab656a4a88c0ec6b3c5328d3e..293e2453597a41e96fae7b8a58765c22a0b0e60a 100644 (file)
             </ethernet-source>
         </ethernet-match>
         <arp-op>1</arp-op>
-        <arp-source-transport-address>192.168.4.1</arp-source-transport-address>
-        <arp-target-transport-address>10.21.22.23</arp-target-transport-address>
+        <arp-source-transport-address>192.168.4.1/32</arp-source-transport-address>
+        <arp-target-transport-address>10.21.22.23/32</arp-target-transport-address>
     </match>
     <cookie>13</cookie>
     <flow-name>FooXf13</flow-name>
     <priority>13</priority>
     <barrier>false</barrier>
-</flow>
\ No newline at end of file
+</flow>
index 1200583283b4bf23ef525429b000bd291502b261..582de509ba96ba04432d9eedca6dd467c22490b5 100644 (file)
@@ -31,8 +31,8 @@
             </ethernet-source>
         </ethernet-match>
         <arp-op>1</arp-op>
-        <arp-source-transport-address>192.168.4.1</arp-source-transport-address>
-        <arp-target-transport-address>10.21.22.23</arp-target-transport-address>
+        <arp-source-transport-address>192.168.4.1/32</arp-source-transport-address>
+        <arp-target-transport-address>10.21.22.23/32</arp-target-transport-address>
         <arp-source-hardware-address>
             <address>12:34:56:78:98:AB</address>
         </arp-source-hardware-address>
@@ -44,4 +44,4 @@
     <flow-name>FooXf14</flow-name>
     <priority>14</priority>
     <barrier>false</barrier>
-</flow>
\ No newline at end of file
+</flow>
index c65bfcacf78c26c03f2d2aeedce1892faf14b885..110e6f32d5227f10d530ee9fcefcf9cdb7928b89 100644 (file)
                 <type>2048</type>
             </ethernet-type>
         </ethernet-match>
-        <ipv4-source>10.0.0.1</ipv4-source>
+        <ipv4-source>10.0.0.1/32</ipv4-source>
     </match>
     <cookie>2</cookie>
     <flow-name>FooXf2</flow-name>
     <priority>2</priority>
     <barrier>false</barrier>
-</flow>
\ No newline at end of file
+</flow>
index 437486aefc42c0cd6eb223d52f80640fb6e1c1cd..fe21ad1903869171191deda7556839fa64c3b3d2 100644 (file)
@@ -1,4 +1,4 @@
-<module>
+<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
   <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">prefix:sal-netconf-connector</type>
   <name>netopeer</name>
   <address xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">127.0.0.1</address>
index 0121e53ebe892f15aa55168b19078f4069cd88ed..df8e044751df95bd35afb55b94448f6b15118977 100644 (file)
@@ -4,3 +4,4 @@
 *.swm
 *.csv
 logs/
+.vagrant
index 980fd675b1c52393f3fa62d3425f22b4f6e89461..b85d1c97ad9458e5f66218aab563e591a1c5b5c4 100644 (file)
@@ -10,7 +10,8 @@ CBench, wrapped in stuff that makes it useful.
         - [Usage Details: loop_wcbench.sh](#user-content-usage-details-loop_wcbenchsh)
         - [Usage Details: stats.py](#user-content-usage-details-statspy)
     - [WCBench Results](#user-content-wcbench-results)
-    - [Detailed Walkthrough](#user-content-detailed-walkthrough)
+    - [Detailed Walkthrough: Vagrant](#user-content-detailed-walkthrough-vagrant)
+    - [Detailed Walkthrough: Manual](#user-content-detailed-walkthrough-manual)
     - [Contributing](#user-content-contributing)
     - [Contact](#user-content-contact)
 
@@ -18,7 +19,7 @@ CBench, wrapped in stuff that makes it useful.
 
 CBench is a somewhat classic SDN controller benchmark tool. It blasts a controller with OpenFlow packet-in messages and counts the rate of flow mod messages returned. WCBench consumes CBench as a library, then builds a robust test automation, stats collection and stats analysis/graphing system around it.
 
-WCBench currently only supports the OpenDaylight SDN controller, but it would be fairly easy to add support for other controllers. Community contributions are encouraged!
+WCBench currently only supports the Helium release of the OpenDaylight SDN controller, but it would be fairly easy to add support for other controllers. Community contributions are encouraged!
 
 ### Usage
 
@@ -33,12 +34,13 @@ Setup and/or run CBench and/or OpenDaylight.
 
 OPTIONS:
     -h Show this message
+    -v Output verbose debug info
     -c Install CBench
     -t <time> Run CBench for given number of minutes
     -r Run CBench against OpenDaylight
-    -i Install ODL from last successful build
+    -i Install OpenDaylight Helium 0.2.1
     -p <processors> Pin ODL to given number of processors
-    -o Run ODL from last successful build
+    -o Start and configure OpenDaylight Helium 0.2.1
     -k Kill OpenDaylight
     -d Delete local ODL and CBench code
 ```
@@ -50,6 +52,7 @@ Run WCBench against OpenDaylight in a loop.
 
 OPTIONS:
     -h Show this help message
+    -v Output verbose debug info
     -l Loop WCBench runs without restarting ODL
     -r Loop WCBench runs, restart ODL between runs
     -t <time> Run WCBench for a given number of minutes
@@ -95,7 +98,7 @@ Host cbench
 As you likely know, `ssh-copy-id` can help you setup your system to connect with the remote box via public key crypto. If you don't have keys setup for public key crypto, google for guides (very out of scope). Finally, note that the `SSH_HOSTNAME` var in `wcbench.sh` must be set to the exact same value given on the `Host` line above.
 * Trivially installing/configuring ODL from the last successful build (via an Integration team Jenkins job).
 * Pinning the OpenDaylight process to a given number of CPU cores. This is useful for ensuring that ODL is properly pegged, working as hard as it can with given resource limits. It can also expose bad ODL behavior that comes about when the process is pegged.
-* Running OpenDaylight and issuing all of the required configurations. Note that the `ODL_STARTUP_DELAY` variable in `wcbench.sh` might need some attention when running on a new system. If ODL takes longer than this value (in seconds) to start, `wcbench.sh` will attempt to issue the required configuration via telnet to the OSGi console before ODL can accept the configuration changes. This will result in fairly obvious error messages dumped to stdout. If you see these, increase the `ODL_STARTUP_DELAY` time. Alternatively, you can manually issue the required configuration after ODL starts by connecting to the OSGi console via telnet and issuing `dropAllPacketsRpc on`. See the `issue_odl_config` function in `wcbench.sh` for more info. Note that there's an open issue to make this config process more robust ([Issue #6](issue_odl_config)). Community contributions solicited!
+* Running OpenDaylight and issuing all of the required configurations.
 * Stopping the OpenDaylight process. This is done cleanly via the `run.sh` script, not `kill` or `pkill`.
 * Cleaning up everything changed by the `wcbench.sh` script, including deleting ODL and CBench sources and binaries.
 
@@ -131,7 +134,49 @@ Examples are useful:
 ```
 
 ```
-# Command for graphs of flows/sec and used RAM stats
+# All stats
+./stats.py -S
+{'fifteen_load': {'max': 0,
+                  'mean': 0.62,
+                  'min': 0,
+                  'relstddev': 0.0,
+                  'stddev': 0.0},
+ 'five_load': {'max': 0,
+               'mean': 0.96,
+               'min': 0,
+               'relstddev': 0.0,
+               'stddev': 0.0},
+ 'flows': {'max': 22384,
+           'mean': 22384.52,
+           'min': 22384,
+           'relstddev': 0.0,
+           'stddev': 0.0},
+ 'iowait': {'max': 0, 'mean': 0.0, 'min': 0, 'relstddev': 0.0, 'stddev': 0.0},
+ 'one_load': {'max': 0,
+              'mean': 0.85,
+              'min': 0,
+              'relstddev': 0.0,
+              'stddev': 0.0},
+ 'runtime': {'max': 120,
+             'mean': 120.0,
+             'min': 120,
+             'relstddev': 0.0,
+             'stddev': 0.0},
+ 'sample_size': 1,
+ 'steal_time': {'max': 0,
+                'mean': 0.0,
+                'min': 0,
+                'relstddev': 0.0,
+                'stddev': 0.0},
+ 'used_ram': {'max': 3657,
+              'mean': 3657.0,
+              'min': 3657,
+              'relstddev': 0.0,
+              'stddev': 0.0}}
+```
+
+```
+# Create graphs of flows/sec and used RAM stats
 ./stats.py -g flows ram
 ```
 
@@ -170,7 +215,88 @@ The data collected by WCBench and stored in the results file for each run includ
 * The iowait value at the start of the test on the system running ODL
 * The iowait value at the end of the test on the system running ODL
 
-### Detailed Walkthrough
+### Detailed Walkthrough: Vagrant
+
+A Vagrantfile is provided for WCBench, which allows you to get an OpenDaylight+WCBench environment up-and-running trivially easily. Vagrant also allows folks on otherwise unsupported operating systems (Ubuntu, Debian, Windows) to use WCBench.
+
+If you don't have Vagrant installed already, head over to [their docs](https://docs.vagrantup.com/v2/installation/) and get that knocked out.
+
+If you haven't already, you'll need to clone the WCBench repo:
+
+```
+[~]$ git clone https://github.com/dfarrell07/wcbench.git
+```
+
+You can now trivially stand up a VM with OpenDaylight+CBench+WCBench properly configured:
+
+```
+[~/wcbench]$ vagrant up
+```
+
+If this is your first time using the `chef/fedora-20` Vagrant box, that'll have to download. Future `vagrant up`s will use a locally cached version. Once the box is provisioned, you can connect to it like this:
+
+```
+[~/wcbench]$ vagrant ssh
+Last login: Mon Nov 17 14:29:33 2014 from 10.0.2.2
+[vagrant@localhost ~]$
+```
+
+WCBench, OpenDaylight and CBench are already installed and configured. You can start OpenDaylight like this:
+
+```
+[vagrant@localhost ~]$ cd wcbench/
+[vagrant@localhost wcbench]$ ./wcbench.sh -o
+Starting OpenDaylight
+Will repeatedly attempt connecting to Karaf shell until it's ready
+Issued `dropAllPacketsRpc on` command via Karaf shell to localhost:8101
+Issued `log:set ERROR` command via Karaf shell to localhost:8101
+```
+
+Run CBench against OpenDaylight like this:
+
+```
+[vagrant@localhost wcbench]$ ./wcbench.sh -r
+Collecting pre-test stats
+Running CBench against ODL on localhost:6633
+Collecting post-test stats
+Collecting time-irrelevant stats
+Average responses/second: 29486.95
+```
+
+Since the WCBench Vagrant box is headless, you'll want to move the `results.txt` to a system with a GUI for graphing.
+
+Vagrant hard-links `/home/vagrant/wcbench/` to the directory on your local system that contains WCBench's Vagrantfile. Dropping `results.txt` in `/home/vagrant/wcbench/` will therefore move it to your local system for analysis. You can also modify the `RESULTS_FILE` variable in `wcbench.sh` to point at `/home/vagrant/wcbench/`, if you'd like to put it there by default.
+
+```
+# Move results.txt to hard-linked dir
+[vagrant@localhost wcbench]$ mv ../results.csv .
+```
+
+```
+# Configure wcbench to create results.txt in hard-linked dir
+RESULTS_FILE=$BASE_DIR/wcbench/"results.csv"
+```
+
+You can now generate graphs and stats, as described in the [Usage Details: stats.py](#user-content-usage-details-statspy) section.
+
+To run long batches of tests, use `loop_wcbench.sh`, as described in [Usage Details: loop_wcbench.sh](#user-content-usage-details-loop_wcbenchsh).
+
+Once you're done, you can kill OpenDaylight like this:
+
+```
+[vagrant@localhost wcbench]$ ./wcbench.sh -k
+Stopping OpenDaylight
+```
+
+Unless you want a fresh WCBench Vagrant box, you can save yourself some time at your next `vagrant up` by suspending (instead of destroying) the box:
+
+```
+# On my local system
+[~/wcbench]$ vagrant suspend
+==> default: Saving VM state and suspending execution...
+```
+
+### Detailed Walkthrough: Manual
 
 This walkthrough describes how to setup a system for WCBench testing, starting with a totally fresh [Fedora 20 Cloud](http://fedoraproject.org/get-fedora#clouds) install. I'm going to leave out the VM creation details for the sake of space. As long as you can SSH into the machine and it has access to the Internet, all of the following should work as-is. Note that this process has also been tested on CentOS 6.5 (so obviously should work on RHEL).
 
@@ -195,7 +321,7 @@ You can now SSH into your fresh VM:
 ```
 [~]$ ssh wcbench
 Warning: Permanently added '10.3.9.110' (RSA) to the list of known hosts.
-[fedora@dfarrell-wcbench ~]$ 
+[fedora@dfarrell-wcbench ~]$
 ```
 
 You'll need a utility like screen or tmux, so you can start long-running tests, log out of the system and leave them running. My Linux configurations are very scripted, so here's how I install tmux and its configuration file. You're welcome to copy this.
@@ -239,18 +365,17 @@ Huzzah! You now have WCBench "installed" on your VM. Now, to install CBench and
 [fedora@dfarrell-wcbench wcbench]$ ./wcbench.sh -ci
 CBench is not installed
 Installing CBench dependencies
-Cloning CBench repo
-Cloning openflow source code
+Cloning CBench repo into /home/fedora/oflops
+Cloning openflow source code into /home/fedora/openflow
 Building oflops/configure file
 Building CBench
 CBench is installed
 Successfully installed CBench
 Installing OpenDaylight dependencies
-Downloading last successful ODL build
-Unzipping last successful ODL build
-Downloading openflowplugin
-Removing simpleforwarding plugin
-Removing arphandler plugin
+Downloading OpenDaylight Helium 0.2.1
+Unzipping OpenDaylight Helium 0.2.1
+odl-openflowplugin-flow-services added to features installed at boot
+odl-openflowplugin-drop-test added to features installed at boot
 ```
 
 Huzzah! You now have CBench and OpenDaylight installed/configured.
@@ -260,24 +385,9 @@ You're ready to get started using WCBench. You can start ODL like this:
 ```
 [fedora@dfarrell-wcbench wcbench]$ ./wcbench.sh -o
 Starting OpenDaylight
-Giving ODL 90 seconds to get up and running
-80 seconds remaining
-70 seconds remaining
-60 seconds remaining
-50 seconds remaining
-40 seconds remaining
-30 seconds remaining
-20 seconds remaining
-10 seconds remaining
-0 seconds remaining
-Installing telnet, as it's required for issuing ODL config.
-Issuing `dropAllPacketsRpc on` command via telnet to localhost:2400
-Trying ::1...
-Connected to localhost.
-Escape character is '^]'.
-osgi> dropAllPacketsRpc on
-DropAllFlows transitions to on
-osgi> Connection closed by foreign host.
+Will repeatedly attempt connecting to Karaf shell until it's ready
+Issued `dropAllPacketsRpc on` command via Karaf shell to localhost:8101
+Issued `log:set ERROR` command via Karaf shell to localhost:8101
 ```
 
 Here's an example of running a two minute CBench test against OpenDaylight:
@@ -299,48 +409,7 @@ I suggest copying your results.csv file back to your local system for analysis,
 [~/wcbench]$ rsync wcbench:/home/fedora/results.csv .
 ```
 
-You can now run `stats.py` against it:
-
-```
-[~/wcbench]$ ./stats.py -S
-{'fifteen_load': {'max': 0,
-                  'mean': 0.62,
-                  'min': 0,
-                  'relstddev': 0.0,
-                  'stddev': 0.0},
- 'five_load': {'max': 0,
-               'mean': 0.96,
-               'min': 0,
-               'relstddev': 0.0,
-               'stddev': 0.0},
- 'flows': {'max': 22384,
-           'mean': 22384.52,
-           'min': 22384,
-           'relstddev': 0.0,
-           'stddev': 0.0},
- 'iowait': {'max': 0, 'mean': 0.0, 'min': 0, 'relstddev': 0.0, 'stddev': 0.0},
- 'one_load': {'max': 0,
-              'mean': 0.85,
-              'min': 0,
-              'relstddev': 0.0,
-              'stddev': 0.0},
- 'runtime': {'max': 120,
-             'mean': 120.0,
-             'min': 120,
-             'relstddev': 0.0,
-             'stddev': 0.0},
- 'sample_size': 1,
- 'steal_time': {'max': 0,
-                'mean': 0.0,
-                'min': 0,
-                'relstddev': 0.0,
-                'stddev': 0.0},
- 'used_ram': {'max': 3657,
-              'mean': 3657.0,
-              'min': 3657,
-              'relstddev': 0.0,
-              'stddev': 0.0}}
-```
+You can now generate graphs and stats, as described in the [Usage Details: stats.py](#user-content-usage-details-statspy) section.
 
 If you'd like to collect some serious long-term data, use the `loop_wcbench.sh` script (of course, back on the VM).
 
@@ -357,8 +426,8 @@ Once you're done, you can stop ODL and clean up the CBench and ODL source/binari
 [fedora@dfarrell-wcbench wcbench]$ ./wcbench.sh -k
 Stopping OpenDaylight
 [fedora@dfarrell-wcbench wcbench]$ ./wcbench.sh -d
-Removing /home/fedora/opendaylight
-Removing /home/fedora/distributions-base-0.2.0-SNAPSHOT-osgipackage.zip
+Removing /home/fedora/distribution-karaf-0.2.1-Helium-SR1
+Removing /home/fedora/distribution-karaf-0.2.1-Helium-SR1.zip
 Removing /home/fedora/openflow
 Removing /home/fedora/oflops
 Removing /usr/local/bin/cbench
@@ -376,6 +445,4 @@ Note that the code is Open Source under a BSD 2-clause license.
 
 ### Contact
 
-As mentioned in the [Contributing section](https://github.com/dfarrell07/wcbench/blob/master/README.md#contributing), for bugs/features, please raise an [Issue](https://github.com/dfarrell07/wcbench/issues) on the WCBench GitHub page.
-
-Daniel Farrell is the main developer of WCBench. You can contact him directly at dfarrell@redhat.com or dfarrell07@gmail.com. He also hangs out on IRC at Freenode/#opendaylight most of his waking hours.
+For feature requests, bug reports and questions please raise an [Issue](https://github.com/dfarrell07/wcbench/issues). Daniel Farrell is the primary developer of this tool. He can be contacted directly at dfarrell@redhat.com or on IRC (dfarrell07 on Freenode). **Prefer public, documented communication like Issues over direct 1-1 communication. This is an Open Source project. Keep the community in the loop.**
diff --git a/test/tools/wcbench/Vagrantfile b/test/tools/wcbench/Vagrantfile
new file mode 100644 (file)
index 0000000..fb8c5c9
--- /dev/null
@@ -0,0 +1,30 @@
+VAGRANTFILE_API_VERSION = "2"
+
+# The WCBench README describes how to use Vagrant for WCBench work
+# See: https://github.com/dfarrell07/wcbench#user-content-detailed-walkthrough-vagrant
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+    # Build Vagrant box based on Fedora 20
+    config.vm.box = "chef/fedora-20"
+
+    # Configure VM RAM and CPU
+    config.vm.provider "virtualbox" do |v|
+      v.memory = 2048
+      v.cpus = 4
+    end
+
+    # This allows sudo commands in wcbench.sh to work
+    config.ssh.pty = true
+
+    # Unexpectedly, /usr/local/bin isn't in the default path
+    # The cbench and oflops binary install there, need to add it
+    config.vm.provision "shell", inline: "echo export PATH=$PATH:/usr/local/bin >> /home/vagrant/.bashrc"
+    config.vm.provision "shell", inline: "echo export PATH=$PATH:/usr/local/bin >> /root/.bashrc"
+
+    # Drop code in /home/vagrant/wcbench, not /vagrant
+    config.vm.synced_folder ".", "/vagrant", disabled: true
+    config.vm.synced_folder ".", "/home/vagrant/wcbench"
+
+    # Install OpenDaylight and CBench with verbose output
+    config.vm.provision "shell", inline: 'su -c "/home/vagrant/wcbench/wcbench.sh -vci" vagrant'
+end
index 832f57b3fe33a373c2993a7dc9d9e8d6cf9a81a7..e6244be5dae0d79fc9c542714cfe76d13efa4855 100755 (executable)
@@ -6,6 +6,9 @@
 EX_USAGE=64
 EX_OK=0
 
+# Output verbose debug info (true) or not (anything else)
+VERBOSE=false
+
 ###############################################################################
 # Prints usage message
 # Globals:
@@ -24,6 +27,7 @@ Run WCBench against OpenDaylight in a loop.
 
 OPTIONS:
     -h Show this help message
+    -v Output verbose debug info
     -l Loop WCBench runs without restarting ODL
     -r Loop WCBench runs, restart ODL between runs
     -t <time> Run WCBench for a given number of minutes
@@ -35,6 +39,7 @@ EOF
 # Starts ODL, optionally pinning it to a given number of processors
 # Globals:
 #   processors
+#   VERBOSE
 # Arguments:
 #   None
 # Returns:
@@ -42,14 +47,26 @@ EOF
 ###############################################################################
 start_odl()
 {
-    if [ -z $processors ]; then
-        # Start ODL, don't pass processor info
-        echo "Starting ODL, not passing processor info"
-        ./wcbench.sh -o
+    if "$VERBOSE" = true; then
+        if [ -z $processors ]; then
+            # Start ODL, don't pass processor info
+            echo "Starting ODL, not passing processor info"
+            ./wcbench.sh -vo
+        else
+            # Start ODL, pinning it to given number of processors
+            echo "Pinning ODL to $processors processor(s)"
+            ./wcbench.sh -vp $processors -o
+        fi
     else
-        # Start ODL, pinning it to given number of processors
-        echo "Pinning ODL to $processors processor(s)"
-        ./wcbench.sh -p $processors -o
+        if [ -z $processors ]; then
+            # Start ODL, don't pass processor info
+            echo "Starting ODL, not passing processor info"
+            ./wcbench.sh -o
+        else
+            # Start ODL, pinning it to given number of processors
+            echo "Pinning ODL to $processors processor(s)"
+            ./wcbench.sh -p $processors -o
+        fi
     fi
 }
 
@@ -57,6 +74,7 @@ start_odl()
 # Run WCBench against ODL, optionally passing a WCBench run time
 # Globals:
 #   run_time
+#   VERBOSE
 # Arguments:
 #   None
 # Returns:
@@ -64,14 +82,26 @@ start_odl()
 ###############################################################################
 run_wcbench()
 {
-    if [ -z $run_time ]; then
-        # Flag means run WCBench
-        echo "Running WCBench, not passing run time info"
-        ./wcbench.sh -r
+    if "$VERBOSE" = true; then
+        if [ -z $run_time ]; then
+            # Flag means run WCBench
+            echo "Running WCBench, not passing run time info"
+            ./wcbench.sh -vr
+        else
+            # Flags mean use $run_time WCBench runs, run WCBench
+            echo "Running WCBench with $run_time minute(s) run time"
+            ./wcbench.sh -vt $run_time -r
+        fi
     else
-        # Flags mean use $run_time WCBench runs, run WCBench
-        echo "Running WCBench with $run_time minute(s) run time"
-        ./wcbench.sh -t $run_time -r
+        if [ -z $run_time ]; then
+            # Flag means run WCBench
+            echo "Running WCBench, not passing run time info"
+            ./wcbench.sh -r
+        else
+            # Flags mean use $run_time WCBench runs, run WCBench
+            echo "Running WCBench with $run_time minute(s) run time"
+            ./wcbench.sh -t $run_time -r
+        fi
     fi
 }
 
@@ -96,7 +126,7 @@ loop_no_restart()
 ###############################################################################
 # Repeatedly run WCBench against ODL, restart ODL between runs
 # Globals:
-#   None
+#   VERBOSE
 # Arguments:
 #   None
 # Returns:
@@ -109,7 +139,11 @@ loop_with_restart()
         start_odl
         run_wcbench
         # Stop ODL
-        ./wcbench.sh -k
+        if "$VERBOSE" = true; then
+            ./wcbench.sh -vk
+        else
+            ./wcbench.sh -k
+        fi
     done
 }
 
@@ -119,17 +153,25 @@ if [ $# -eq 0 ]; then
     exit $EX_USAGE
 fi
 
+# Used to output help if no valid action results from arguments
+action_taken=false
+
 # Parse options given from command line
-while getopts ":hlp:rt:" opt; do
+while getopts ":hvlp:rt:" opt; do
     case "$opt" in
         h)
             # Help message
             usage
             exit $EX_OK
             ;;
+        v)
+            # Output debug info verbosely
+            VERBOSE=true
+            ;;
         l)
             # Loop without restarting ODL between WCBench runs
             loop_no_restart
+            action_taken=true
             ;;
         p)
             # Pin a given number of processors
@@ -143,6 +185,7 @@ while getopts ":hlp:rt:" opt; do
         r)
             # Restart ODL between each WCBench run
             loop_with_restart
+            action_taken=true
             ;;
         t)
             # Set length of WCBench run in minutes
@@ -154,3 +197,9 @@ while getopts ":hlp:rt:" opt; do
             exit $EX_USAGE
     esac
 done
+
+# Output help message if no valid action was taken
+if ! "$action_taken" = true; then
+    usage
+    exit $EX_USAGE
+fi
index 810ca441354c01a48834919db8fec76408f3bc7b..deb8c78d58dc9a857041b3145b4e81762c774ebc 100755 (executable)
@@ -25,17 +25,19 @@ class Stats(object):
     log_file = "cbench.log"
     precision = 3
     run_index = 0
-    flow_index = 1
-    start_time_index = 2
-    end_time_index = 3
-    start_steal_time_index = 10
-    end_steal_time_index = 11
-    used_ram_index = 13
-    one_load_index = 16
-    five_load_index = 17
-    fifteen_load_index = 18
-    start_iowait_index = 20
-    end_iowait_index = 21
+    min_flow_index = 1
+    max_flow_index = 2
+    avg_flow_index = 3
+    start_time_index = 4
+    end_time_index = 5
+    start_steal_time_index = 12
+    end_steal_time_index = 13
+    used_ram_index = 15
+    one_load_index = 18
+    five_load_index = 19
+    fifteen_load_index = 20
+    start_iowait_index = 22
+    end_iowait_index = 23
 
     def __init__(self):
         """Setup some flags and data structures, kick off build_cols call."""
@@ -46,7 +48,9 @@ class Stats(object):
     def build_cols(self):
         """Parse results file into lists of values, one per column."""
         self.run_col = []
-        self.flows_col = []
+        self.min_flows_col = []
+        self.max_flows_col = []
+        self.avg_flows_col = []
         self.runtime_col = []
         self.used_ram_col = []
         self.iowait_col = []
@@ -60,7 +64,9 @@ class Stats(object):
             for row in results_reader:
                 try:
                     self.run_col.append(float(row[self.run_index]))
-                    self.flows_col.append(float(row[self.flow_index]))
+                    self.min_flows_col.append(float(row[self.min_flow_index]))
+                    self.max_flows_col.append(float(row[self.max_flow_index]))
+                    self.avg_flows_col.append(float(row[self.avg_flow_index]))
                     self.runtime_col.append(float(row[self.end_time_index]) -
                                             float(row[self.start_time_index]))
                     self.used_ram_col.append(float(row[self.used_ram_index]))
@@ -77,12 +83,12 @@ class Stats(object):
                     # Skips header
                     continue
 
-    def compute_flow_stats(self):
-        """Compute CBench flows/second stats."""
-        self.compute_generic_stats("flows", self.flows_col)
+    def compute_avg_flow_stats(self):
+        """Compute CBench average flows/second stats."""
+        self.compute_generic_stats("flows", self.avg_flows_col)
 
-    def build_flow_graph(self, total_gcount, graph_num):
-        """Plot flows/sec data.
+    def build_avg_flow_graph(self, total_gcount, graph_num):
+        """Plot average flows/sec data.
 
         :param total_gcount: Total number of graphs to render.
         :type total_gcount: int
@@ -91,7 +97,39 @@ class Stats(object):
 
         """
         self.build_generic_graph(total_gcount, graph_num,
-                                 "Flows per Second", self.flows_col)
+                                 "Average Flows per Second", self.avg_flows_col)
+
+    def compute_min_flow_stats(self):
+        """Compute CBench min flows/second stats."""
+        self.compute_generic_stats("min_flows", self.min_flows_col)
+
+    def build_min_flow_graph(self, total_gcount, graph_num):
+        """Plot min flows/sec data.
+
+        :param total_gcount: Total number of graphs to render.
+        :type total_gcount: int
+        :param graph_num: Number for this graph, <= total_gcount.
+        :type graph_num: int
+
+        """
+        self.build_generic_graph(total_gcount, graph_num,
+                                 "Minimum Flows per Second", self.min_flows_col)
+
+    def compute_max_flow_stats(self):
+        """Compute CBench max flows/second stats."""
+        self.compute_generic_stats("max_flows", self.max_flows_col)
+
+    def build_max_flow_graph(self, total_gcount, graph_num):
+        """Plot max flows/sec data.
+
+        :param total_gcount: Total number of graphs to render.
+        :type total_gcount: int
+        :param graph_num: Number for this graph, <= total_gcount.
+        :type graph_num: int
+
+        """
+        self.build_generic_graph(total_gcount, graph_num,
+                                 "Maximum Flows per Second", self.max_flows_col)
 
     def compute_ram_stats(self):
         """Compute used RAM stats."""
@@ -116,9 +154,9 @@ class Stats(object):
     def build_runtime_graph(self, total_gcount, graph_num):
         """Plot CBench runtime length data.
 
-        :paruntime total_gcount: Total number of graphs to render.
+        :param total_gcount: Total number of graphs to render.
         :type total_gcount: int
-        :paruntime graph_num: Number for this graph, <= total_gcount.
+        :param graph_num: Number for this graph, <= total_gcount.
         :type graph_num: int
 
         """
@@ -132,9 +170,9 @@ class Stats(object):
     def build_iowait_graph(self, total_gcount, graph_num):
         """Plot iowait data.
 
-        :paiowait total_gcount: Total number of graphs to render.
+        :param total_gcount: Total number of graphs to render.
         :type total_gcount: int
-        :paiowait graph_num: Number for this graph, <= total_gcount.
+        :param graph_num: Number for this graph, <= total_gcount.
         :type graph_num: int
 
         """
@@ -148,9 +186,9 @@ class Stats(object):
     def build_steal_time_graph(self, total_gcount, graph_num):
         """Plot steal time data.
 
-        :pasteal_time total_gcount: Total number of graphs to render.
+        :param total_gcount: Total number of graphs to render.
         :type total_gcount: int
-        :pasteal_time graph_num: Number for this graph, <= total_gcount.
+        :param graph_num: Number for this graph, <= total_gcount.
         :type graph_num: int
 
         """
@@ -164,9 +202,9 @@ class Stats(object):
     def build_one_load_graph(self, total_gcount, graph_num):
         """Plot one minute load data.
 
-        :paone_load total_gcount: Total number of graphs to render.
+        :param total_gcount: Total number of graphs to render.
         :type total_gcount: int
-        :paone_load graph_num: Number for this graph, <= total_gcount.
+        :param graph_num: Number for this graph, <= total_gcount.
         :type graph_num: int
 
         """
@@ -180,9 +218,9 @@ class Stats(object):
     def build_five_load_graph(self, total_gcount, graph_num):
         """Plot five minute load data.
 
-        :pafive_load total_gcount: Total number of graphs to render.
+        :param total_gcount: Total number of graphs to render.
         :type total_gcount: int
-        :pafive_load graph_num: Number for this graph, <= total_gcount.
+        :param graph_num: Number for this graph, <= total_gcount.
         :type graph_num: int
 
         """
@@ -196,9 +234,9 @@ class Stats(object):
     def build_fifteen_load_graph(self, total_gcount, graph_num):
         """Plot fifteen minute load data.
 
-        :pafifteen_load total_gcount: Total number of graphs to render.
+        :param total_gcount: Total number of graphs to render.
         :type total_gcount: int
-        :pafifteen_load graph_num: Number for this graph, <= total_gcount.
+        :param graph_num: Number for this graph, <= total_gcount.
         :type graph_num: int
 
         """
@@ -223,9 +261,9 @@ class Stats(object):
     def build_generic_graph(self, total_gcount, graph_num, y_label, data_col):
         """Helper for plotting generic data.
 
-        :pageneric total_gcount: Total number of graphs to render.
+        :param total_gcount: Total number of graphs to render.
         :type total_gcount: int
-        :pageneric graph_num: Number for this graph, <= total_gcount.
+        :param graph_num: Number for this graph, <= total_gcount.
         :type graph_num: int
         :param y_label: Lable of Y axis.
         :type y_label: string
@@ -245,7 +283,9 @@ class Stats(object):
 stats = Stats()
 
 # Map of graph names to the Stats.fns that build them
-graph_map = {"flows": stats.build_flow_graph,
+graph_map = {"min_flows": stats.build_min_flow_graph,
+             "max_flows": stats.build_max_flow_graph,
+             "flows": stats.build_avg_flow_graph,
              "runtime": stats.build_runtime_graph,
              "iowait": stats.build_iowait_graph,
              "steal_time": stats.build_steal_time_graph,
@@ -253,7 +293,9 @@ graph_map = {"flows": stats.build_flow_graph,
              "five_load": stats.build_five_load_graph,
              "fifteen_load": stats.build_fifteen_load_graph,
              "ram": stats.build_ram_graph}
-stats_map = {"flows": stats.compute_flow_stats,
+stats_map = {"min_flows": stats.compute_min_flow_stats,
+             "max_flows": stats.compute_max_flow_stats,
+             "flows": stats.compute_avg_flow_stats,
              "runtime": stats.compute_runtime_stats,
              "iowait": stats.compute_iowait_stats,
              "steal_time": stats.compute_steal_time_stats,
index 60baf943494fcf1d6dd4344cf1db5d5629ad9252..4702097926841e7a93c7c1676899913de2ab9245 100755 (executable)
@@ -14,17 +14,16 @@ EX_NOT_FOUND=65
 EX_OK=0
 EX_ERR=1
 
+# Output verbose debug info (true) or not (anything else)
+VERBOSE=false
+
 # Params for CBench test and ODL config
-NUM_SWITCHES=64 # Default number of switches for CBench to simulate
+NUM_SWITCHES=32 # Default number of switches for CBench to simulate
 NUM_MACS=100000  # Default number of MACs for CBench to use
 TESTS_PER_SWITCH=10  # Default number of CBench tests to do per CBench run
 MS_PER_TEST=10000  # Default milliseconds to run each CBench test
 CBENCH_WARMUP=1  # Default number of warmup cycles to run CBench
-OSGI_PORT=2400  # Port that the OSGi console listens for telnet on
-ODL_STARTUP_DELAY=90  # Default time in seconds to give ODL to start
-ODL_RUNNING_STATUS=0  # run.sh gives this status when ODL is running
-ODL_STOPPED_STATUS=255  # run.sh gives this status when ODL is stopped
-ODL_BROKEN_STATUS=1  # run.sh gives this status when things are FUBR
+KARAF_SHELL_PORT=8101  # Port that the Karaf shell listens on
 CONTROLLER="OpenDaylight"  # Currently only support ODL
 CONTROLLER_IP="localhost"  # Change this to remote IP if running on two systems
 CONTROLLER_PORT=6633  # Default port for OpenDaylight
@@ -34,22 +33,25 @@ SSH_HOSTNAME="cbenchc"  # You'll need to update this to reflect ~/.ssh/config
 BASE_DIR=$HOME  # Directory that code and such is dropped into
 OF_DIR=$BASE_DIR/openflow  # Directory that contains OpenFlow code
 OFLOPS_DIR=$BASE_DIR/oflops  # Directory that contains oflops repo
-ODL_DIR=$BASE_DIR/opendaylight  # Directory with ODL code
-ODL_ZIP="distributions-base-0.2.0-SNAPSHOT-osgipackage.zip"  # ODL zip name
+ODL_DIR=$BASE_DIR/distribution-karaf-0.2.1-Helium-SR1  # Directory with ODL code
+ODL_ZIP="distribution-karaf-0.2.1-Helium-SR1.zip"  # ODL zip name
 ODL_ZIP_PATH=$BASE_DIR/$ODL_ZIP  # Full path to ODL zip
 PLUGIN_DIR=$ODL_DIR/plugins  # ODL plugin directory
 RESULTS_FILE=$BASE_DIR/"results.csv"  # File that results are stored in
 CBENCH_LOG=$BASE_DIR/"cbench.log"  # Log file used to store strange error msgs
 CBENCH_BIN="/usr/local/bin/cbench"  # Path to CBench binary
+OFLOPS_BIN="/usr/local/bin/oflops"  # Path to oflops binary
+FEATURES_FILE=$ODL_DIR/etc/org.apache.karaf.features.cfg  # Karaf features to install
 
 # Array that stores results in indexes defined by cols array
 declare -a results
 
 # The order of these array values determines column order in RESULTS_FILE
-cols=(run_num cbench_avg start_time end_time controller_ip human_time
-    num_switches num_macs tests_per_switch ms_per_test start_steal_time
-    end_steal_time total_ram used_ram free_ram cpus one_min_load five_min_load
-    fifteen_min_load controller start_iowait end_iowait)
+cols=(run_num cbench_min cbench_max cbench_avg start_time end_time
+    controller_ip human_time num_switches num_macs tests_per_switch
+    ms_per_test start_steal_time end_steal_time total_ram used_ram
+    free_ram cpus one_min_load five_min_load fifteen_min_load controller
+    start_iowait end_iowait)
 
 # This two-stat-array system is needed until I find an answer to this question:
 # http://goo.gl/e0M8Tp
@@ -97,18 +99,20 @@ Setup and/or run CBench and/or OpenDaylight.
 
 OPTIONS:
     -h Show this message
+    -v Output verbose debug info
     -c Install CBench
     -t <time> Run CBench for given number of minutes
     -r Run CBench against OpenDaylight
-    -i Install ODL from last successful build
+    -i Install OpenDaylight Helium 0.2.1
     -p <processors> Pin ODL to given number of processors
-    -o Run ODL from last successful build
+    -o Start and configure OpenDaylight Helium 0.2.1
     -k Kill OpenDaylight
     -d Delete local ODL and CBench code
 EOF
 }
 
 ###############################################################################
+# Checks if CBench is installed
 # Globals:
 #   EX_OK
 #   EX_NOT_FOUND
@@ -136,6 +140,7 @@ cbench_installed()
 # This has been tested on fresh cloud versions of Fedora 20 and CentOS 6.5
 # Not currently building oflops/netfpga-packet-generator-c-library (optional)
 # Globals:
+#   VERBOSE
 #   EX_OK
 #   EX_ERR
 #   OFLOPS_DIR
@@ -154,27 +159,49 @@ install_cbench()
 
     # Install required packages
     echo "Installing CBench dependencies"
-    sudo yum install -y net-snmp-devel libpcap-devel autoconf make automake libtool libconfig-devel git &> /dev/null
+    if "$VERBOSE" = true; then
+        sudo yum install -y net-snmp-devel libpcap-devel autoconf make automake libtool libconfig-devel git
+    else
+        sudo yum install -y net-snmp-devel libpcap-devel autoconf make automake libtool libconfig-devel git &> /dev/null
+    fi
 
     # Clone repo that contains CBench
-    echo "Cloning CBench repo"
-    git clone https://github.com/andi-bigswitch/oflops.git $OFLOPS_DIR &> /dev/null
+    echo "Cloning CBench repo into $OFLOPS_DIR"
+    if "$VERBOSE" = true; then
+        git clone https://github.com/andi-bigswitch/oflops.git $OFLOPS_DIR
+    else
+        git clone https://github.com/andi-bigswitch/oflops.git $OFLOPS_DIR &> /dev/null
+    fi
 
     # CBench requires the OpenFlow source code, clone it
-    echo "Cloning openflow source code"
-    git clone git://gitosis.stanford.edu/openflow.git $OF_DIR &> /dev/null
+    echo "Cloning openflow source code into $OF_DIR"
+    if "$VERBOSE" = true; then
+        git clone git://gitosis.stanford.edu/openflow.git $OF_DIR
+    else
+        git clone git://gitosis.stanford.edu/openflow.git $OF_DIR &> /dev/null
+    fi
 
     # Build the oflops/configure file
     old_cwd=$PWD
     cd $OFLOPS_DIR
     echo "Building oflops/configure file"
-    ./boot.sh &> /dev/null
+    if "$VERBOSE" = true; then
+        ./boot.sh
+    else
+        ./boot.sh &> /dev/null
+    fi
 
     # Build oflops
     echo "Building CBench"
-    ./configure --with-openflow-src-dir=$OF_DIR &> /dev/null
-    make &> /dev/null
-    sudo make install &> /dev/null
+    if "$VERBOSE" = true; then
+        ./configure --with-openflow-src-dir=$OF_DIR
+        make
+        sudo make install
+    else
+        ./configure --with-openflow-src-dir=$OF_DIR &> /dev/null
+        make &> /dev/null
+        sudo make install &> /dev/null
+    fi
     cd $old_cwd
 
     # Validate that the install worked
@@ -388,6 +415,7 @@ write_results()
 # Globals:
 #   CONTROLLER_IP
 #   CONTROLLER_PORT
+#   VERBOSE
 #   MS_PER_TEST
 #   TEST_PER_SWITCH
 #   NUM_SWITCHES
@@ -404,20 +432,32 @@ run_cbench()
 {
     get_pre_test_stats
     echo "Running CBench against ODL on $CONTROLLER_IP:$CONTROLLER_PORT"
-    cbench_output=`cbench -c $CONTROLLER_IP -p $CONTROLLER_PORT -m $MS_PER_TEST -l $TESTS_PER_SWITCH -s $NUM_SWITCHES -M $NUM_MACS -w $CBENCH_WARMUP 2>&1`
+    if "$VERBOSE" = true; then
+        cbench_output=`cbench -c $CONTROLLER_IP -p $CONTROLLER_PORT -m $MS_PER_TEST -l $TESTS_PER_SWITCH -s $NUM_SWITCHES -M $NUM_MACS -w $CBENCH_WARMUP`
+    else
+        cbench_output=`cbench -c $CONTROLLER_IP -p $CONTROLLER_PORT -m $MS_PER_TEST -l $TESTS_PER_SWITCH -s $NUM_SWITCHES -M $NUM_MACS -w $CBENCH_WARMUP 2>&1`
+    fi
     get_post_test_stats
     get_time_irrelevant_stats
 
-    # Parse out average responses/sec, log/handle very rare unexplained errors
-    # This logic can be removed if/when the root cause of this error is discovered and fixed
+    # Parse out min, max and average responses/sec, log/handle errors
+    # See: https://github.com/dfarrell07/wcbench/issues/16
+    cbench_min=`echo "$cbench_output" | grep RESULT | awk '{print $8}' | awk -F'/' '{print $1}'`
+    cbench_max=`echo "$cbench_output" | grep RESULT | awk '{print $8}' | awk -F'/' '{print $2}'`
     cbench_avg=`echo "$cbench_output" | grep RESULT | awk '{print $8}' | awk -F'/' '{print $3}'`
     if [ -z "$cbench_avg" ]; then
-        echo "WARNING: Rare error occurred: failed to parse avg. See $CBENCH_LOG." >&2
+        echo "WARNING: Error occurred: Failed to parse CBench average" >&2
+        echo "This is an issue with CBench or ODL, not WCBench." >&2
+        echo "May need to reduce NUM_SWITCHES or allocate more CPU cores" >&2
+        echo "See: $CBENCH_LOG" >&2
+        echo "See: https://github.com/dfarrell07/wcbench/issues/16" >&2
         echo "Run $(next_run_num) failed to record a CBench average. CBench details:" >> $CBENCH_LOG
         echo "$cbench_output" >> $CBENCH_LOG
         return
     else
         echo "Average responses/second: $cbench_avg"
+        results[$(name_to_index "cbench_min")]=$cbench_min
+        results[$(name_to_index "cbench_max")]=$cbench_max
         results[$(name_to_index "cbench_avg")]=$cbench_avg
     fi
 
@@ -447,48 +487,127 @@ uninstall_odl()
     fi
 }
 
+###############################################################################
+# Checks if the given feature is in list to be installed at boot
+# Globals:
+#   FEATURES_FILE
+#   EX_OK
+#   EX_NOT_FOUND
+# Arguments:
+#   Feature to search featuresBoot list for
+# Returns:
+#   EX_OK if feature already in featuresBoot list
+#   EX_NOT_FOUND if feature isn't in featuresBoot list
+###############################################################################
+is_in_featuresBoot()
+{
+    feature=$1
+
+    # Check if feature is already set to be installed at boot
+    if $(grep featuresBoot= $FEATURES_FILE | grep -q $feature); then
+        return $EX_OK
+    else
+        return $EX_NOT_FOUND
+    fi
+}
+
+###############################################################################
+# Adds features to be installed by Karaf at ODL boot
+# Globals:
+#   FEATURES_FILE
+#   EX_OK
+#   EX_ERR
+# Arguments:
+#   Feature to append to end of featuresBoot CSV list
+# Returns:
+#   EX_OK if feature already is installed or was successfully added
+#   EX_ERR if failed to add feature to group installed at boot
+###############################################################################
+add_to_featuresBoot()
+{
+    feature=$1
+
+    # Check if feature is already set to be installed at boot
+    if is_in_featuresBoot $feature; then
+        echo "$feature is already set to be installed at boot"
+        return $EX_OK
+    fi
+
+    # Append feature to end of boot-install list
+    sed -i "/^featuresBoot=/ s/$/,$feature/" $FEATURES_FILE
+
+    # Check if feature was added to install list correctly
+    if is_in_featuresBoot $feature; then
+        echo "$feature added to features installed at boot"
+        return $EX_OK
+    else
+        echo "ERROR: Failed to add $feature to features installed at boot"
+        return $EX_ERR
+    fi
+}
+
 ###############################################################################
 # Installs latest build of the OpenDaylight controller
 # Note that the installed build is via an Integration team Jenkins job
 # Globals:
-#   BASE_DIR
+#   ODL_DIR
+#   VERBOSE
 #   ODL_ZIP_DIR
+#   BASE_DIR
+#   ODL_ZIP_PATH
 #   ODL_ZIP
 #   EX_ERR
 # Arguments:
 #   None
 # Returns:
-#   EX_ERR if ODL download fails, typically because of version bump
+#   EX_ERR if ODL install fails
 ###############################################################################
 install_opendaylight()
 {
-    # Remove old controller code
-    uninstall_odl
+    # Only remove unzipped code, as zip is large and unlikely to have changed.
+    if [ -d $ODL_DIR ]; then
+        echo "Removing $ODL_DIR"
+        rm -rf $ODL_DIR
+    fi
 
     # Install required packages
     echo "Installing OpenDaylight dependencies"
-    sudo yum install -y java-1.7.0-openjdk unzip wget &> /dev/null
+    if "$VERBOSE" = true; then
+        sudo yum install -y java-1.7.0-openjdk unzip wget
+    else
+        sudo yum install -y java-1.7.0-openjdk unzip wget &> /dev/null
+    fi
 
-    # Grab last successful build
-    echo "Downloading last successful ODL build"
-    wget -P $BASE_DIR "https://jenkins.opendaylight.org/integration/job/integration-master-project-centralized-integration/lastSuccessfulBuild/artifact/distributions/base/target/$ODL_ZIP" &> /dev/null
+    # If we already have the zip archive, use that.
+    if [ -f $ODL_ZIP_PATH ]; then
+        echo "Using local $ODL_ZIP_PATH. Pass -d flag to remove."
+    else
+        # Grab OpenDaylight Helium 0.2.1
+        echo "Downloading OpenDaylight Helium 0.2.1"
+        if "$VERBOSE" = true; then
+            wget -P $BASE_DIR "https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.1-Helium-SR1/$ODL_ZIP"
+        else
+            wget -P $BASE_DIR "https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.1-Helium-SR1/$ODL_ZIP" &> /dev/null
+        fi
+    fi
+
+    # Confirm that download was successful
     if [ ! -f $ODL_ZIP_PATH ]; then
         echo "WARNING: Failed to dl ODL. Version bumped? If so, update \$ODL_ZIP" >&2
         return $EX_ERR
     fi
-    echo "Unzipping last successful ODL build"
-    unzip -d $BASE_DIR $ODL_ZIP_PATH &> /dev/null
 
-    # Make some plugin changes that are apparently required for CBench
-    echo "Downloading openflowplugin"
-    wget -P $PLUGIN_DIR 'https://jenkins.opendaylight.org/openflowplugin/job/openflowplugin-merge/lastSuccessfulBuild/org.opendaylight.openflowplugin$drop-test/artifact/org.opendaylight.openflowplugin/drop-test/0.0.3-SNAPSHOT/drop-test-0.0.3-SNAPSHOT.jar' &> /dev/null
-    echo "Removing simpleforwarding plugin"
-    rm $PLUGIN_DIR/org.opendaylight.controller.samples.simpleforwarding-0.4.2-SNAPSHOT.jar
-    echo "Removing arphandler plugin"
-    rm $PLUGIN_DIR/org.opendaylight.controller.arphandler-0.5.2-SNAPSHOT.jar
+    # Unzip ODL archive
+    echo "Unzipping OpenDaylight Helium 0.2.1"
+    if "$VERBOSE" = true; then
+        unzip -d $BASE_DIR $ODL_ZIP_PATH
+    else
+        unzip -d $BASE_DIR $ODL_ZIP_PATH &> /dev/null
+    fi
 
-    # TODO: Change controller log level to ERROR. Confirm this is necessary.
-    # Relevant Issue: https://github.com/dfarrell07/wcbench/issues/3
+    # Add required features to list installed by Karaf at ODL boot
+    add_to_featuresBoot "odl-openflowplugin-flow-services"
+    add_to_featuresBoot "odl-openflowplugin-drop-test"
 }
 
 ###############################################################################
@@ -514,6 +633,9 @@ odl_installed()
 # Assumes you've checked that ODL is installed
 # Globals:
 #   ODL_DIR
+#   VERBOSE
+#   EX_OK
+#   EX_NOT_FOUND
 # Arguments:
 #   None
 # Returns:
@@ -524,7 +646,11 @@ odl_started()
 {
     old_cwd=$PWD
     cd $ODL_DIR
-    ./run.sh -status &> /dev/null
+    if "$VERBOSE" = true; then
+        ./bin/status
+    else
+        ./bin/status &> /dev/null
+    fi
     if [ $? = 0 ]; then
         return $EX_OK
     else
@@ -541,8 +667,7 @@ odl_started()
 #   ODL_DIR
 #   EX_OK
 #   processors
-#   OSGI_PORT
-#   ODL_STARTUP_DELAY
+#   VERBOSE
 # Arguments:
 #   None
 # Returns:
@@ -558,36 +683,30 @@ start_opendaylight()
     else
         echo "Starting OpenDaylight"
         if [ -z $processors ]; then
-            ./run.sh -start $OSGI_PORT -of13 -Xms1g -Xmx4g &> /dev/null
+            if "$VERBOSE" = true; then
+                ./bin/start
+            else
+                ./bin/start &> /dev/null
+            fi
         else
             echo "Pinning ODL to $processors processor(s)"
-            if [ $processors == 1 ]; then
-                echo "Increasing ODL start time, as 1 processor will slow it down"
-                ODL_STARTUP_DELAY=120
-            fi
             # Use taskset to pin ODL to a given number of processors
-            taskset -c 0-$(expr $processors - 1) ./run.sh -start $OSGI_PORT -of13 -Xms1g -Xmx4g &> /dev/null
+            if "$VERBOSE" = true; then
+                taskset -c 0-$(expr $processors - 1) ./bin/start
+            else
+                taskset -c 0-$(expr $processors - 1) ./bin/start  &> /dev/null
+            fi
         fi
     fi
     cd $old_cwd
-    # TODO: Smarter block until ODL is actually up
-    # Relevant Issue: https://github.com/dfarrell07/wcbench/issues/6
-    echo "Giving ODL $ODL_STARTUP_DELAY seconds to get up and running"
-    while [ $ODL_STARTUP_DELAY -gt 0 ]; do
-        sleep 10
-        let ODL_STARTUP_DELAY=ODL_STARTUP_DELAY-10
-        echo "$ODL_STARTUP_DELAY seconds remaining"
-    done
     issue_odl_config
 }
 
 ###############################################################################
-# Give `dropAllPackets on` command via telnet to OSGi
-# See: http://goo.gl/VEJIRc
-# TODO: This can be issued too early. Smarter check needed.
-# Relevant Issue: https://github.com/dfarrell07/wcbench/issues/6
+# Set `dropAllPackets on` and log level to DEBUG via Karaf shell
 # Globals:
-#   OSGI_PORT
+#   VERBOSE
+#   KARAF_SHELL_PORT
 # Arguments:
 #   None
 # Returns:
@@ -595,19 +714,58 @@ start_opendaylight()
 ###############################################################################
 issue_odl_config()
 {
-    if ! command -v telnet &> /dev/null; then
-        echo "Installing telnet, as it's required for issuing ODL config."
-        sudo yum install -y telnet &> /dev/null
+    # This could be done with public key crypto, but sshpass is easier
+    if ! command -v sshpass &> /dev/null; then
+        echo "Installing sshpass. It's used for issuing ODL config."
+        if "$VERBOSE" = true; then
+            sudo yum install -y sshpass
+        else
+            sudo yum install -y sshpass &> /dev/null
+        fi
+    fi
+
+    # Set `dropAllPacketsRpc on`
+    echo "Will repeatedly attempt connecting to Karaf shell until it's ready"
+    # Loop until exit status 0 (success) given by Karaf shell
+    # Exit status 255 means Karaf shell isn't open for SSH connections yet
+    # Exit status 1 means `dropAllPacketsRpc on` isn't runnable yet
+    if "$VERBOSE" = true; then
+        until sshpass -p karaf ssh -p $KARAF_SHELL_PORT -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no karaf@localhost dropallpacketsrpc on
+        do
+            echo "Karaf shell isn't ready yet, sleeping 5 seconds..."
+            sleep 5
+        done
+    else
+        until sshpass -p karaf ssh -p $KARAF_SHELL_PORT -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no karaf@localhost dropallpacketsrpc on &> /dev/null
+        do
+            sleep 5
+        done
     fi
-    echo "Issuing \`dropAllPacketsRpc on\` command via telnet to localhost:$OSGI_PORT"
-    # NB: Not using sleeps results in silent failures (cmd has no effect)
-    (sleep 3; echo dropAllPacketsRpc on; sleep 3) | telnet localhost $OSGI_PORT
+    echo "Issued \`dropAllPacketsRpc on\` command via Karaf shell to localhost:$KARAF_SHELL_PORT"
+
+    # Change log level to ERROR
+    # Loop until exit status 0 (success) given by Karaf shell
+    # Exit status 255 means Karaf shell isn't open for SSH connections yet
+    if "$VERBOSE" = true; then
+        until sshpass -p karaf ssh -p $KARAF_SHELL_PORT -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no karaf@localhost log:set ERROR
+        do
+            echo "Karaf shell isn't ready yet, sleeping 5 seconds..."
+            sleep 5
+        done
+    else
+        until sshpass -p karaf ssh -p $KARAF_SHELL_PORT -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no karaf@localhost log:set ERROR &> /dev/null
+        do
+            sleep 5
+        done
+    fi
+    echo "Issued \`log:set ERROR\` command via Karaf shell to localhost:$KARAF_SHELL_PORT"
 }
 
 ###############################################################################
-# Stops OpenDaylight using run.sh
+# Stops OpenDaylight
 # Globals:
 #   ODL_DIR
+#   VERBOSE
 # Arguments:
 #   None
 # Returns:
@@ -618,8 +776,19 @@ stop_opendaylight()
     old_cwd=$PWD
     cd $ODL_DIR
     if odl_started; then
-        echo "Stopping OpenDaylight"
-        ./run.sh -stop &> /dev/null
+        echo "Told ODL to stop. Waiting on it to do so..."
+        echo "This check is useless if you have other Java processes running (ctrl+c it)."
+        if "$VERBOSE" = true; then
+            ./bin/stop
+        else
+            ./bin/stop &> /dev/null
+        fi
+        # Loop until actually stopped
+        until ! pgrep java &> /dev/null
+        do
+            sleep .5
+        done
+        echo "OpenDaylight has stopped."
     else
         echo "OpenDaylight isn't running"
     fi
@@ -651,8 +820,10 @@ uninstall_cbench()
         echo "Removing $CBENCH_BIN"
         sudo rm -f $CBENCH_BIN
     fi
-    # TODO: Remove oflops binary
-    # Relevant issue: https://github.com/dfarrell07/wcbench/issues/25
+    if [ -f $OFLOPS_BIN ]; then
+        echo "Removing $OFLOPS_BIN"
+        sudo rm -f $OFLOPS_BIN 
+    fi
 }
 
 # If executed with no options
@@ -661,14 +832,21 @@ if [ $# -eq 0 ]; then
     exit $EX_USAGE
 fi
 
+# Used to output help if no valid action results from arguments
+action_taken=false
+
 # Parse options given from command line
-while getopts ":hrcip:ot:kd" opt; do
+while getopts ":hvrcip:ot:kd" opt; do
     case "$opt" in
         h)
             # Help message
             usage
             exit $EX_OK
             ;;
+        v)
+            # Output debug info verbosely
+            VERBOSE=true
+            ;;
         r)
             # Run CBench against OpenDaylight
             if [ $CONTROLLER_IP = "localhost" ]; then
@@ -682,14 +860,17 @@ while getopts ":hrcip:ot:kd" opt; do
                 fi
             fi
             run_cbench
+            action_taken=true
             ;;
         c)
             # Install CBench
             install_cbench
+            action_taken=true
             ;;
         i)
-            # Install OpenDaylight from last successful build
+            # Install OpenDaylight
             install_opendaylight
+            action_taken=true
             ;;
         p)
             # Pin a given number of processors
@@ -705,12 +886,13 @@ while getopts ":hrcip:ot:kd" opt; do
             fi
             ;;
         o)
-            # Run OpenDaylight from last successful build
+            # Run OpenDaylight
             if ! odl_installed; then
                 echo "OpenDaylight isn't installed, can't start it"
                 exit $EX_ERR
             fi
             start_opendaylight
+            action_taken=true
             ;;
         t)
             # Set CBench run time in minutes
@@ -735,11 +917,13 @@ while getopts ":hrcip:ot:kd" opt; do
                 exit $EX_ERR
             fi
             stop_opendaylight
+            action_taken=true
             ;;
         d)
             # Delete local ODL and CBench code
             uninstall_odl
             uninstall_cbench
+            action_taken=true
             ;;
         *)
             # Print usage message
@@ -747,3 +931,9 @@ while getopts ":hrcip:ot:kd" opt; do
             exit $EX_USAGE
     esac
 done
+
+# Output help message if no valid action was taken
+if ! "$action_taken" = true; then
+    usage
+    exit $EX_USAGE
+fi