Enable check for status after member starts up
[integration/test.git] / csit / libraries / ClusterManagement.robot
index bac88b7dcf2c5c52b3e04b1e9245071e47fb6987..13e87b6cdef1b5d4ac28fbbedadfdb174f388294 100644 (file)
@@ -50,8 +50,10 @@ ${JOLOKIA_READ_URI}    jolokia/read/org.opendaylight.controller
 # Bug 9044 workaround: delete etc/host.key before restart.
 @{ODL_DEFAULT_DATA_PATHS}    tmp/    data/    cache/    snapshots/    journal/    etc/opendaylight/current/    etc/host.key
 ${RESTCONF_MODULES_DIR}    ${CURDIR}/../variables/restconf/modules
-${SINGLETON_NETCONF_DEVICE_ID_PREFIX}    /odl-general-entity:entity[odl-general-entity:name='KeyedInstanceIdentifier{targetType=interface org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node, path=[org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology, org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology[key=TopologyKey [_topologyId=Uri [_value=topology-netconf]]], org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node[key=NodeKey [_nodeId=Uri [_value=
-${SINGLETON_NETCONF_DEVICE_ID_SUFFIX}    ]]]]}']
+${SINGLETON_NETCONF_DEVICE_ID_PREFIX_OLD}    /odl-general-entity:entity[odl-general-entity:name='KeyedInstanceIdentifier{targetType=interface org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node, path=[org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology, org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology[key=TopologyKey [_topologyId=Uri [_value=topology-netconf]]], org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node[key=NodeKey [_nodeId=Uri [_value=
+${SINGLETON_NETCONF_DEVICE_ID_SUFFIX_OLD}    ]]]]}']
+${SINGLETON_NETCONF_DEVICE_ID_PREFIX}    /odl-general-entity:entity[odl-general-entity:name='KeyedInstanceIdentifier{targetType=interface org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node, path=[org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology, org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology[key=TopologyKey{_topologyId=Uri{_value=topology-netconf}}], org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node[key=NodeKey{_nodeId=Uri{_value=
+${SINGLETON_NETCONF_DEVICE_ID_SUFFIX}    }}]]}']
 ${SINGLETON_BGPCEP_DEVICE_ID_PREFIX}    /odl-general-entity:entity[odl-general-entity:name='
 ${SINGLETON_BGPCEP_DEVICE_ID_SUFFIX}    -service-group']
 ${SINGLETON_SXP_DEVICE_ID_PREFIX}    /odl-general-entity:entity[odl-general-entity:name='
@@ -257,11 +259,11 @@ Get_Owner_And_Candidates_For_Device_Singleton_Netconf
     ...    Parsing method is set as netconf (using netconf device id prefix and suffix)
     # Get election entity type results
     ${type} =    BuiltIn.Set_Variable    ${SINGLETON_ELECTION_ENTITY_TYPE}
-    ${id} =    BuiltIn.Set_Variable    ${SINGLETON_NETCONF_DEVICE_ID_PREFIX}${device_name}${SINGLETON_NETCONF_DEVICE_ID_SUFFIX}
+    ${id} =    CompareStream.Set_Variable_If_At_Least_Fluorine    ${SINGLETON_NETCONF_DEVICE_ID_PREFIX}${device_name}${SINGLETON_NETCONF_DEVICE_ID_SUFFIX}    ${SINGLETON_NETCONF_DEVICE_ID_PREFIX_OLD}${device_name}${SINGLETON_NETCONF_DEVICE_ID_SUFFIX_OLD}
     ${owner_1}    ${candidate_list_1} =    Get_Owner_And_Candidates_For_Type_And_Id    ${type}    ${id}    ${member_index}    http_timeout=${http_timeout}
     # Get change ownership entity type results
     ${type} =    BuiltIn.Set_Variable    ${SINGLETON_CHANGE_OWNERSHIP_ENTITY_TYPE}
-    ${id} =    BuiltIn.Set_Variable    ${SINGLETON_NETCONF_DEVICE_ID_PREFIX}${device_name}${SINGLETON_NETCONF_DEVICE_ID_SUFFIX}
+    ${id} =    CompareStream.Set_Variable_If_At_Least_Fluorine    ${SINGLETON_NETCONF_DEVICE_ID_PREFIX}${device_name}${SINGLETON_NETCONF_DEVICE_ID_SUFFIX}    ${SINGLETON_NETCONF_DEVICE_ID_PREFIX_OLD}${device_name}${SINGLETON_NETCONF_DEVICE_ID_SUFFIX_OLD}
     ${owner_2}    ${candidate_list_2} =    Get_Owner_And_Candidates_For_Type_And_Id    ${type}    ${id}    ${member_index}    http_timeout=${http_timeout}
     # Owners must be same, if not, there is still some election or change ownership in progress
     BuiltIn.Should_Be_Equal_As_Integers    ${owner_1}    ${owner_2}    Owners for device ${device_name} are not same
@@ -419,15 +421,18 @@ Kill_Members_From_List_Or_All
     [Return]    ${updated_index_list}
 
 Stop_Single_Member
-    [Arguments]    ${member}    ${original_index_list}=${EMPTY}    ${confirm}=True
+    [Arguments]    ${member}    ${original_index_list}=${EMPTY}    ${confirm}=True    ${msg}=${EMPTY}
     [Documentation]    Convenience keyword that stops the specified member of the cluster.
     ...    The KW will return a list of available members: \${updated index_list}=\${original_index_list}-\${member}
     ${index_list} =    ClusterManagement__Build_List    ${member}
+    ${member_ip} =    Return_Member_IP    ${member}
+    ${msg} =    Builtin.Set Variable If    "${msg}" == "${EMPTY}"    Stopping ODL${member} ${member_ip}    Stopping ODL${member} ${member_ip}, ${msg}
+    KarafKeywords.Log_Message_To_Controller_Karaf    ${msg}
     ${updated_index_list} =    Stop_Members_From_List_Or_All    ${index_list}    ${original_index_list}    ${confirm}
     [Return]    ${updated_index_list}
 
 Stop_Members_From_List_Or_All
-    [Arguments]    ${member_index_list}=${EMPTY}    ${original_index_list}=${EMPTY}    ${confirm}=True    ${timeout}=120s
+    [Arguments]    ${member_index_list}=${EMPTY}    ${original_index_list}=${EMPTY}    ${confirm}=True    ${timeout}=240s
     [Documentation]    If the list is empty, stops all ODL instances. Otherwise stop members based on \${stop_index_list}
     ...    If \${confirm} is True, verify stopped instances are not there anymore.
     ...    The KW will return a list of available members: \${updated index_list}=\${original_index_list}-\${member_index_list}
@@ -443,14 +448,18 @@ Stop_Members_From_List_Or_All
     [Return]    ${updated_index_list}
 
 Start_Single_Member
-    [Arguments]    ${member}    ${wait_for_sync}=True    ${timeout}=300s    ${check_system_status}=False    ${service_list}=@{EMPTY}
+    [Arguments]    ${member}    ${wait_for_sync}=True    ${timeout}=300s    ${msg}=${EMPTY}    ${check_system_status}=True    ${verify_restconf}=True
+    ...    ${service_list}=${EMPTY_LIST}
     [Documentation]    Convenience keyword that starts the specified member of the cluster.
     ${index_list} =    ClusterManagement__Build_List    ${member}
-    Start_Members_From_List_Or_All    ${index_list}    ${wait_for_sync}    ${timeout}    check_system_status=${check_system_status}    service_list=@{service_list}
+    ${member_ip} =    Return_Member_IP    ${member}
+    ${msg} =    Builtin.Set Variable If    "${msg}" == "${EMPTY}"    Starting ODL${member} ${member_ip}    Starting ODL${member} ${member_ip}, ${msg}
+    KarafKeywords.Log_Message_To_Controller_Karaf    ${msg}
+    Start_Members_From_List_Or_All    ${index_list}    ${wait_for_sync}    ${timeout}    check_system_status=${check_system_status}    verify_restconf=${verify_restconf}    service_list=${service_list}
 
 Start_Members_From_List_Or_All
     [Arguments]    ${member_index_list}=${EMPTY}    ${wait_for_sync}=True    ${timeout}=300s    ${karaf_home}=${EMPTY}    ${export_java_home}=${EMPTY}    ${gc_log_dir}=${EMPTY}
-    ...    ${check_system_status}=False    ${service_list}=@{EMPTY}
+    ...    ${check_system_status}=True    ${verify_restconf}=True    ${service_list}=${EMPTY_LIST}
     [Documentation]    If the list is empty, start all cluster members. Otherwise, start members based on present indices.
     ...    If ${wait_for_sync}, wait for cluster sync on listed members.
     ...    Optionally karaf_home can be overriden. Optionally specific JAVA_HOME is used for starting.
@@ -461,12 +470,29 @@ Start_Members_From_List_Or_All
     ${gc_filepath} =    BuiltIn.Set_Variable_If    """${karaf_home}""" != ""    ${karaf_home}/data/log/gc_${epoch}.log    ${GC_LOG_PATH}/gc_${epoch}.log
     ${gc_options} =    BuiltIn.Set_Variable_If    "docker" not in """${node_start_command}"""    -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:${gc_filepath}    ${EMPTY}
     Run_Bash_Command_On_List_Or_All    command=${command} ${gc_options}    member_index_list=${member_index_list}
-    BuiltIn.Return_From_Keyword_If    not ${wait_for_sync}
-    BuiltIn.Wait_Until_Keyword_Succeeds    ${timeout}    10s    Check_Cluster_Is_In_Sync    member_index_list=${member_index_list}
-    BuiltIn.Return_From_Keyword_If    not ${check_system_status}
-    CompareStream.Run_Keyword_If_At_Least_Oxygen    Wait Until Keyword Succeeds    60    2    ClusterManagement.Check Status Of Services Is OPERATIONAL    @{service_list}
+    BuiltIn.Wait_Until_Keyword_Succeeds    ${timeout}    10s    Verify_Members_Are_Ready    ${member_index_list}    ${wait_for_sync}    ${verify_restconf}
+    ...    ${check_system_status}    ${service_list}
     [Teardown]    Run_Bash_Command_On_List_Or_All    command=netstat -pnatu | grep 2550
 
+Verify_Members_Are_Ready
+    [Arguments]    ${member_index_list}    ${verify_cluster_sync}    ${verify_restconf}    ${verify_system_status}    ${service_list}
+    [Documentation]    Verifies the specified readiness conditions for the given listed members after startup.
+    ...    If ${verify_cluster_sync}, verifies the datastores have synced with the rest of the cluster.
+    ...    If ${verify_restconf}, verifies RESTCONF is available.
+    ...    If ${verify_system_status}, verifies the system services are OPERATIONAL.
+    BuiltIn.Run_Keyword_If    ${verify_cluster_sync}    Check_Cluster_Is_In_Sync    ${member_index_list}
+    BuiltIn.Run_Keyword_If    ${verify_restconf}    Verify_Restconf_Is_Available    ${member_index_list}
+    # for backward compatibility, some consumers might not be passing @{service_list}, but since we can't set a list to a default
+    # value, we need to check here if it's empty in order to skip the check which would throw an error
+    BuiltIn.Run_Keyword_If    ${verify_system_status} and ("${service_list}" != "[[]]")    ClusterManagement.Check Status Of Services Is OPERATIONAL    @{service_list}
+
+Verify_Restconf_Is_Available
+    [Arguments]    ${member_index_list}
+    ${index_list} =    List_Indices_Or_All    given_list=${member_index_list}
+    : FOR    ${index}    IN    @{index_list}
+    \    ${session} =    Resolve_Http_Session_For_Member    member_index=${index}
+    \    TemplatedRequests.Get_As_Json_Templated    session=${session}    folder=${RESTCONF_MODULES_DIR}    verify=False
+
 Freeze_Single_Member
     [Arguments]    ${member}
     [Documentation]    Convenience keyword that stops the specified member of the cluster by freezing the jvm.