From 9192bdd94707b997cf6464b1920a8e9eff44988f Mon Sep 17 00:00:00 2001 From: Vratko Polak Date: Fri, 21 Apr 2017 11:44:46 +0200 Subject: [PATCH] Bug 8138: Do not clean journal and snapshots Change-Id: If50aa4ebda02f9155296ff870c50c98b48173701 Signed-off-by: Vratko Polak --- csit/libraries/ClusterManagement.robot | 2 ++ .../Clustering_Datastore/car_outage_corners.robot | 6 +++--- .../suites/netconf/clusteringscale/topology_leader_ha.robot | 2 -- csit/suites/netconf/clusteringscale/topology_owner_ha.robot | 2 -- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/csit/libraries/ClusterManagement.robot b/csit/libraries/ClusterManagement.robot index a3f83ce742..3a72b01fa1 100644 --- a/csit/libraries/ClusterManagement.robot +++ b/csit/libraries/ClusterManagement.robot @@ -425,6 +425,8 @@ Freeze_Or_Unfreeze_Members_From_List_Or_All Clean_Journals_And_Snapshots_On_List_Or_All [Arguments] ${member_index_list}=${EMPTY} ${karaf_home}=${KARAF_HOME} [Documentation] Delete journal and snapshots directories on every node listed (or all). + ... BEWARE: If only a subset of members is cleaned, this causes RetiredGenerationException in Carbon after the affected node re-start. + ... See https://bugs.opendaylight.org/show_bug.cgi?id=8138 ${index_list} = List_Indices_Or_All given_list=${member_index_list} ${command} = Set Variable rm -rf "${karaf_home}/journal" "${karaf_home}/snapshots" : FOR ${index} IN @{index_list} # usually: 1, 2, 3. diff --git a/csit/suites/controller/Clustering_Datastore/car_outage_corners.robot b/csit/suites/controller/Clustering_Datastore/car_outage_corners.robot index 507a2c2a26..f896324f4f 100644 --- a/csit/suites/controller/Clustering_Datastore/car_outage_corners.robot +++ b/csit/suites/controller/Clustering_Datastore/car_outage_corners.robot @@ -42,6 +42,7 @@ ${CLUSTER_DIR} ${CURDIR}/../../../variables/clustering *** Test Cases *** Kill_Majority_Of_The_Followers [Documentation] Kill half plus one car Follower members and set reviving followers down (otherwsise tipping followers cannot join cluster). + ... Mark most of killed members as explicitly down, to allow the surviving leader make progress. ClusterManagement.Kill_Members_From_List_Or_All member_index_list=${list_of_killing} confirm=True : FOR ${index} IN @{list_of_reviving} \ ${data} OperatingSystem.Get File ${CLUSTER_DIR}/member_down.json @@ -56,9 +57,8 @@ Attempt_To_Add_Cars_To_Leader # TODO: Is there a specific status and mesage to require in this scenario? BuiltIn.Should_Contain ${message} '50 -Clean_And_Start_Tipping_Follower +Start_Tipping_Follower [Documentation] Start one Follower member without persisted data. - ClusterManagement.Clean_Journals_And_Snapshots_On_List_Or_All member_index_list=${list_of_tipping} ClusterManagement.Start_Members_From_List_Or_All member_index_list=${list_of_tipping} wait_for_sync=True timeout=${MEMBER_START_TIMEOUT} BuiltIn.Wait_Until_Keyword_Succeeds 30s 2s ClusterManagement.Verify_Leader_Exists_For_Each_Shard shard_name_list=${SHARD_NAME_LIST} shard_type=config member_index_list=${list_of_majority} @@ -71,7 +71,7 @@ See_Cars_On_Existing_Members : FOR ${session} IN @{list_of_majority} \ TemplatedRequests.Get_As_Json_Templated folder=${VAR_DIR}/cars session=${session} verify=True iterations=${CAR_ITEMS} iter_start=${MAJORITY_START_I} -Clean_And_Start_Other_Followers +Start_Other_Followers [Documentation] Start other followers without persisted data. ClusterManagement.Start_Members_From_List_Or_All member_index_list=${list_of_reviving} wait_for_sync=True timeout=${MEMBER_START_TIMEOUT} BuiltIn.Wait_Until_Keyword_Succeeds 30s 2s ClusterManagement.Verify_Leader_Exists_For_Each_Shard shard_name_list=${SHARD_NAME_LIST} shard_type=config diff --git a/csit/suites/netconf/clusteringscale/topology_leader_ha.robot b/csit/suites/netconf/clusteringscale/topology_leader_ha.robot index 4dd7a2d5d3..b1062ca3fb 100644 --- a/csit/suites/netconf/clusteringscale/topology_leader_ha.robot +++ b/csit/suites/netconf/clusteringscale/topology_leader_ha.robot @@ -91,9 +91,7 @@ Reboot_Topology_Leader ... After cluster sync, sleep additional time to ensure manager processes requests with the rebooted member fully rejoined. [Tags] @{TAGS_NONCRITICAL} # To avoid long WUKS list expanded in log.html ClusterManagement.Kill_Single_Member ${topology_config_leader_index} - # TODO: Introduce ClusterManagement.Clean_Journals_And_Snapshots_On_Single_Member ${owner_list} = BuiltIn.Create_List ${topology_config_leader_index} - ClusterManagement.Clean_Journals_And_Snapshots_On_List_Or_All ${owner_list} ClusterManagement.Start_Single_Member ${topology_config_leader_index} BuiltIn.Comment FIXME: Replace sleep with WUKS when it becomes clear what to wait for. ${sleep_time} = Get_Typical_Time coefficient=3.0 diff --git a/csit/suites/netconf/clusteringscale/topology_owner_ha.robot b/csit/suites/netconf/clusteringscale/topology_owner_ha.robot index 8bedf52f14..db92c400f4 100644 --- a/csit/suites/netconf/clusteringscale/topology_owner_ha.robot +++ b/csit/suites/netconf/clusteringscale/topology_owner_ha.robot @@ -114,9 +114,7 @@ Reboot_Manager_Owner ... After cluster sync, sleep additional time to ensure manager processes requests with the rebooted member fully rejoined. [Tags] @{TAGS_NONCRITICAL} # To avoid long WUKS list expanded in log.html ClusterManagement.Kill_Single_Member ${netconf_manager_owner_index} - # TODO: Introduce ClusterManagement.Clean_Journals_And_Snapshots_On_Single_Member ${owner_list} = BuiltIn.Create_List ${netconf_manager_owner_index} - ClusterManagement.Clean_Journals_And_Snapshots_On_List_Or_All ${owner_list} ClusterManagement.Start_Single_Member ${netconf_manager_owner_index} BuiltIn.Comment FIXME: Replace sleep with WUKS when it becomes clear what to wait for. ${sleep_time} = Get_Typical_Time coefficient=3.0 -- 2.36.6