From 6cbb4db4d761fca35e516a294a4236cae1f5da4a Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 12 Dec 2024 14:32:09 -0500 Subject: [PATCH] Allow for existing cluster and service, reinstate existing service config Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 114 +++++++++--------- .../service_config/test_service_config.py | 114 ++++++++++-------- 2 files changed, 126 insertions(+), 102 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 51ab95e7..5a492335 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -127,7 +127,7 @@ def conn(): @pytest.fixture(scope="session") -def cm_api_client(conn): +def cm_api_client(conn) -> ApiClient: """Create a Cloudera Manager API client, resolving HTTP/S and version URL. Args: @@ -172,68 +172,74 @@ def cm_api_client(conn): def target_cluster(cm_api_client, request): """Create a test cluster.""" - if os.getenv("CDH_VERSION", None): - cdh_version = os.getenv("CDH_VERSION") - else: - raise Exception("No CDH_VERSION found. Please set this environment variable.") + cluster_api = ClustersResourceApi(cm_api_client) - name = ( - Path(request.fixturename).stem - + "_" - + "".join(random.choices(string.ascii_lowercase, k=6)) - ) + if os.getenv("CM_CLUSTER_NAME", None): + yield cluster_api.read_cluster(cluster_name=os.getenv("CM_CLUSTER_NAME")) + else: + if os.getenv("CDH_VERSION", None): + cdh_version = os.getenv("CDH_VERSION") + else: + raise Exception( + "No CDH_VERSION found. Please set this environment variable." + ) - cluster_api = ClustersResourceApi(cm_api_client) - parcels_api = ParcelsResourceApi(cm_api_client) - parcel_api = ParcelResourceApi(cm_api_client) - host_api = HostsResourceApi(cm_api_client) - - try: - # Create the initial cluster - config = ApiCluster( - name=name, - full_version=cdh_version, + name = ( + Path(request.fixturename).stem + + "_" + + "".join(random.choices(string.ascii_lowercase, k=6)) ) - cluster_api.create_clusters(body=ApiClusterList(items=[config])) - - # Get first free host and assign to the cluster - all_hosts = host_api.read_hosts() - host = next((h for h in all_hosts.items if not h.cluster_ref), None) + parcels_api = ParcelsResourceApi(cm_api_client) + parcel_api = ParcelResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) - if host is None: - # Roll back the cluster and then raise an error - cluster_api.delete_cluster(cluster_name=name) - raise Exception("No available hosts to allocate to new cluster") - else: - cluster_api.add_hosts( - cluster_name=name, - body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), + try: + # Create the initial cluster + config = ApiCluster( + name=name, + full_version=cdh_version, ) - # Find the first CDH parcel version and activate it - parcels = parcels_api.read_parcels(cluster_name=name) - cdh_parcel = next( - ( - p - for p in parcels.items - if p.product == "CDH" and p.version.startswith(cdh_version) + cluster_api.create_clusters(body=ApiClusterList(items=[config])) + + # Get first free host and assign to the cluster + all_hosts = host_api.read_hosts() + host = next((h for h in all_hosts.items if not h.cluster_ref), None) + + if host is None: + # Roll back the cluster and then raise an error + cluster_api.delete_cluster(cluster_name=name) + raise Exception("No available hosts to allocate to new cluster") + else: + cluster_api.add_hosts( + cluster_name=name, + body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), + ) + + # Find the first CDH parcel version and activate it + parcels = parcels_api.read_parcels(cluster_name=name) + cdh_parcel = next( + ( + p + for p in parcels.items + if p.product == "CDH" and p.version.startswith(cdh_version) + ) ) - ) - parcel = Parcel( - parcel_api=parcel_api, - product=cdh_parcel.product, - version=cdh_parcel.version, - cluster=name, - ) + parcel = Parcel( + parcel_api=parcel_api, + product=cdh_parcel.product, + version=cdh_parcel.version, + cluster=name, + ) - parcel.activate() + parcel.activate() - # Reread and return the cluster - yield cluster_api.read_cluster(cluster_name=name) + # Reread and return the cluster + yield cluster_api.read_cluster(cluster_name=name) - # Deprovision the cluster - cluster_api.delete_cluster(cluster_name=name) - except ApiException as ae: - raise Exception(str(ae)) + # Deprovision the cluster + cluster_api.delete_cluster(cluster_name=name) + except ApiException as ae: + raise Exception(str(ae)) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index a8ab22f8..ee2070fe 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -19,6 +19,7 @@ __metaclass__ = type import logging +import os import pytest from pathlib import Path @@ -47,29 +48,34 @@ @pytest.fixture(scope="module") def target_service(cm_api_client, target_cluster, request): api = ServicesResourceApi(cm_api_client) - cluster_api = ClustersResourceApi(cm_api_client) - name = Path(request.node.name).stem + "_zookeeper" + if os.getenv("CM_SERVICE_NAME", None): + yield api.read_service( + cluster_name=target_cluster.name, service_name=os.getenv("CM_SERVICE_NAME") + ) + else: + cluster_api = ClustersResourceApi(cm_api_client) + name = Path(request.node.name).stem + "_zookeeper" - service = ApiService( - name=name, - type="ZOOKEEPER", - ) + service = ApiService( + name=name, + type="ZOOKEEPER", + ) - api.create_services( - cluster_name=target_cluster.name, body=ApiServiceList(items=[service]) - ) - cluster_api.auto_assign_roles(cluster_name=target_cluster.name) + api.create_services( + cluster_name=target_cluster.name, body=ApiServiceList(items=[service]) + ) + cluster_api.auto_assign_roles(cluster_name=target_cluster.name) - # configure = cluster_api.auto_configure(cluster_name=target_cluster.name) - wait_for_command( - cm_api_client, - api.first_run(cluster_name=target_cluster.name, service_name=name), - ) + # configure = cluster_api.auto_configure(cluster_name=target_cluster.name) + wait_for_command( + cm_api_client, + api.first_run(cluster_name=target_cluster.name, service_name=name), + ) - yield api.read_service(cluster_name=target_cluster.name, service_name=name) + yield api.read_service(cluster_name=target_cluster.name, service_name=name) - api.delete_service(cluster_name=target_cluster.name, service_name=name) + api.delete_service(cluster_name=target_cluster.name, service_name=name) @pytest.fixture @@ -83,7 +89,13 @@ def target_service_config(cm_api_client, target_service, request): service_api = ServicesResourceApi(cm_api_client) - # Set the parameter(s) + # Retrieve all of the pre-setup configurations + pre = service_api.read_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + ) + + # Set the test configurations # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining # configuration entries to not run. Long-term solution is to check-and-set, which is # what the Ansible modules do... @@ -102,18 +114,30 @@ def target_service_config(cm_api_client, target_service, request): # Return the targeted service and go run the test yield target_service - # Reset the parameter - for k, v in marker.kwargs["service_config"].items(): - try: - service_api.update_service_config( - cluster_name=target_service.cluster_ref.cluster_name, - service_name=target_service.name, - message=f"test_service_config::{request.node.name}::reset", - body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), - ) - except ApiException as ae: - if ae.status != 400 or "delete with template" not in str(ae.body): - raise Exception(str(ae)) + # Retrieve all of the post-setup configurations + post = service_api.read_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + ) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) + + service_api.update_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + message=f"test_service_config::{request.node.name}::reset", + body=ApiServiceConfig(items=reconciled), + ) def test_missing_required(conn, module_args): @@ -202,24 +226,20 @@ def test_set_parameters(conn, module_args, target_service_config): } ) + expected = dict(autopurgeSnapRetainCount="9", tickTime="1111") + with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == True - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" - assert len(e.value.config) == 2 + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() # Idempotency with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == False - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" - assert len(e.value.config) == 2 + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) @@ -234,13 +254,15 @@ def test_unset_parameters(conn, module_args, target_service_config): } ) + expected = dict(tickTime="1111") + with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == True results = {c["name"]: c["value"] for c in e.value.config} assert "autopurgeSnapRetainCount" not in results - assert len(e.value.config) == 1 + assert expected.items() <= results.items() # Idempotency with pytest.raises(AnsibleExitJson) as e: @@ -249,7 +271,7 @@ def test_unset_parameters(conn, module_args, target_service_config): assert e.value.changed == False results = {c["name"]: c["value"] for c in e.value.config} assert "autopurgeSnapRetainCount" not in results - assert len(e.value.config) == 1 + assert expected.items() <= results.items() @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) @@ -267,23 +289,19 @@ def test_set_parameters_with_purge(conn, module_args, target_service_config): } ) + expected = dict(autopurgeSnapRetainCount="9") + with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == True - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" - assert len(e.value.config) == 1 + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == False - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" - assert len(e.value.config) == 1 + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=8, tickTime=2222))