From 67239eede40fb58632bc8b8171350545f35b152c Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:45:34 -0500 Subject: [PATCH 01/13] Add pyproject.toml for Hatch and pytest Signed-off-by: Webster Mudge --- pyproject.toml | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++ pytest.ini | 23 ------------------- 2 files changed, 61 insertions(+), 23 deletions(-) create mode 100644 pyproject.toml delete mode 100644 pytest.ini diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..d5ea7976 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,61 @@ +[project] +name = "cluster" +dynamic = ["version"] +description = "cloudera.cluster Ansible collection" +readme = "README.md" +requires-python = ">=3.8" +license = "Apache-2.0" +keywords = [] +authors = [ + { name = "Webster Mudge", email = "wmudge@cloudera.com" }, +] +classifiers = [] +dependencies = [] + +[tool.hatch.version] +path = "galaxy.yml" +pattern = "version:\\s+(?P[\\d\\.]+)" + +[tool.hatch.envs.default] +python = "3.12" +skip-install = true +dependencies = [ + "pre-commit", + "coverage[toml]", + "pytest", + "pytest-mock", + # "pytest-cov", + "molecule", + "molecule-plugins", + "molecule-plugins[ec2]", + "tox-ansible", + "ansible-core<2.17", # For RHEL 8 support + "jmespath", + "cm-client", +] + +[tool.hatch.envs.lint] +python = "3.12" +skip-install = true +extra-dependencies = [ + "ansible-lint", +] + +[tool.hatch.envs.lint.scripts] +run = "pre-commit run -a" + +[tool.pytest.ini_options] +testpaths = [ + "tests", +] +filterwarnings = [ + "ignore:AnsibleCollectionFinder has already been configured", + "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", +] +markers = [ + "prep: Prepare Cloudera Manager and resources for tests", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index 03f164ec..00000000 --- a/pytest.ini +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2023 Cloudera, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -[pytest] -filterwarnings = - ignore::DeprecationWarning - ignore:AnsibleCollectionFinder has already been configured:UserWarning - -; log_cli = 1 -; log_cli_level = INFO - -pythonpath = "../../../" From bd2e24e5c62ac4b28490457df2a0ecb25032c0a4 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:46:55 -0500 Subject: [PATCH 02/13] Update parameter reconcilation and value normalization Signed-off-by: Webster Mudge --- plugins/module_utils/cm_utils.py | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/plugins/module_utils/cm_utils.py b/plugins/module_utils/cm_utils.py index 3fbe3a84..a1667986 100644 --- a/plugins/module_utils/cm_utils.py +++ b/plugins/module_utils/cm_utils.py @@ -104,7 +104,9 @@ def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dic def normalize_values(add: dict) -> dict: - """Normalize whitespace of parameter values. + """Normalize parameter values. Strings have whitespace trimmed, integers are + converted to strings, and Boolean values are converted their string representation + and lowercased. Args: add (dict): Parameters to review @@ -112,7 +114,18 @@ def normalize_values(add: dict) -> dict: Returns: dict: Normalized parameters """ - return {k: (v.strip() if isinstance(v, str) else v) for k, v in add.items()} + + def _normalize(value): + if isinstance(value, str): + return value.strip() + elif isinstance(value, int): + return str(value) + elif isinstance(value, bool): + return str(value).lower() + else: + return value + + return {k: _normalize(v) for k, v in add.items()} def resolve_parameter_updates( @@ -120,7 +133,8 @@ def resolve_parameter_updates( ) -> dict: """Produce a change set between two parameter dictionaries. - The function will normalize parameter values to remove whitespace. + The function will normalize parameter values to remove whitespace from strings, + convert integers and Booleans to their string representations. Args: current (dict): Existing parameters @@ -131,20 +145,23 @@ def resolve_parameter_updates( dict: A change set of the updates """ updates = {} - diff = recursive_diff(current, incoming) + + diff = recursive_diff(current, normalize_values(incoming)) + if diff is not None: updates = { k: v - for k, v in normalize_values(diff[1]).items() + for k, v in diff[1].items() if k in current or (k not in current and v is not None) } if purge: - # Add the other non-defaults + # Add the remaining non-default values for removal updates = { **updates, **{k: None for k in diff[0].keys() if k not in diff[1]}, } + return updates @@ -384,7 +401,7 @@ def initialize_client(self): """Creates the CM API client""" config = Configuration() - # If provided a CML endpoint URL, use it directly + # If provided a CM endpoint URL, use it directly if self.url: config.host = str(self.url).rstrip(" /") # Otherwise, run discovery on missing parts From 8a0c9027bed00894b82dd2c8ac0dd9585da4758e Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:47:57 -0500 Subject: [PATCH 03/13] Remove pytest discovery hack Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index d86218dd..6829f307 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -26,11 +26,11 @@ from ansible.module_utils import basic from ansible.module_utils.common.text.converters import to_bytes -# Required for pytest discovery in VSCode, reasons unknown... -try: - from ansible.plugins.action import ActionBase -except ModuleNotFoundError: - pass +# # Required for pytest discovery in VSCode, reasons unknown... +# try: +# from ansible.plugins.action import ActionBase +# except ModuleNotFoundError: +# pass from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, @@ -84,3 +84,23 @@ def prep_args(args: str = ""): basic._ANSIBLE_ARGS = to_bytes(output) return prep_args + + +# class AnsibleExitJson(Exception): +# """Exception class to be raised by module.exit_json and caught by the test case""" + +# def __init__(self, kwargs): +# super(AnsibleExitJson, self).__init__( +# kwargs.get("msg", "General module success") +# ) +# self.__dict__.update(kwargs) + + +# class AnsibleFailJson(Exception): +# """Exception class to be raised by module.fail_json and caught by the test case""" + +# def __init__(self, kwargs): +# super(AnsibleFailJson, self).__init__( +# kwargs.get("msg", "General module failure") +# ) +# self.__dict__.update(kwargs) From f8c82d5d5441ec18449512ec99007dd339c726b2 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:49:11 -0500 Subject: [PATCH 04/13] Create ServiceConfigUpdates class for service-wide configuration management Signed-off-by: Webster Mudge --- plugins/module_utils/service_utils.py | 28 +++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index f77f3278..c11a2d79 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -13,14 +13,19 @@ # limitations under the License. """ -A common functions for Cloudera Manager service management +A common functions for service management """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( _parse_output, + resolve_parameter_updates, ) -from cm_client import ApiService +from cm_client import ( + ApiConfig, + ApiService, + ApiServiceConfig, +) SERVICE_OUTPUT = [ "client_config_staleness_status", @@ -44,3 +49,22 @@ def parse_service_result(service: ApiService) -> dict: output = dict(cluster_name=service.cluster_ref.cluster_name) output.update(_parse_output(service.to_dict(), SERVICE_OUTPUT)) return output + + +class ServiceConfigUpdates(object): + def __init__(self, existing: ApiServiceConfig, updates: dict, purge: bool) -> None: + current = {r.name: r.value for r in existing.items} + changeset = resolve_parameter_updates(current, updates, purge) + + self.diff = dict( + before={k: current[k] if k in current else None for k in changeset.keys()}, + after=changeset, + ) + + self.config = ApiServiceConfig( + items=[ApiConfig(name=k, value=v) for k, v in changeset.items()] + ) + + @property + def changed(self) -> bool: + return bool(self.config.items) From 4363353e85b11224e650f2bceb655cfff2438728 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:50:29 -0500 Subject: [PATCH 05/13] Update to use ServiceConfigUpdates class to manage service-wide configuration Signed-off-by: Webster Mudge --- plugins/modules/service_config.py | 67 ++++++++++++------------------- 1 file changed, 26 insertions(+), 41 deletions(-) diff --git a/plugins/modules/service_config.py b/plugins/modules/service_config.py index e0a35a19..2f30ca2f 100644 --- a/plugins/modules/service_config.py +++ b/plugins/modules/service_config.py @@ -1,3 +1,4 @@ +#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2024 Cloudera, Inc. All Rights Reserved. @@ -14,33 +15,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json - -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerMutableModule, - resolve_parameter_updates, -) - -from cm_client import ( - ApiConfig, - ApiServiceConfig, - ClustersResourceApi, - ServicesResourceApi, -) -from cm_client.rest import ApiException - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: service_config -short_description: Manage a service configuration in cluster +short_description: Manage a cluster service configuration description: - - Manage a service configuration (service-wide) in a cluster. + - Manage a configuration (service-wide) for a cluster service. author: - "Webster Mudge (@wmudge)" requirements: @@ -216,6 +195,22 @@ returned: when supported """ +import json + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + ServiceConfigUpdates, +) + + +from cm_client import ( + ClustersResourceApi, + ServicesResourceApi, +) +from cm_client.rest import ApiException + class ClusterServiceConfig(ClouderaManagerMutableModule): def __init__(self, module): @@ -257,32 +252,22 @@ def process(self): else: raise ex - current = {r.name: r.value for r in existing.items} - incoming = {k: str(v) if v is not None else v for k, v in self.params.items()} - - change_set = resolve_parameter_updates(current, incoming, self.purge) + updates = ServiceConfigUpdates(existing, self.params, self.purge) - if change_set: + if updates.changed: self.changed = True if self.module._diff: - self.diff = dict( - before={ - k: current[k] if k in current else None - for k in change_set.keys() - }, - after=change_set, - ) + self.diff = updates.diff if not self.module.check_mode: - body = ApiServiceConfig( - items=[ApiConfig(name=k, value=v) for k, v in change_set.items()] - ) - self.config = [ p.to_dict() for p in api_instance.update_service_config( - self.cluster, self.service, message=self.message, body=body + self.cluster, + self.service, + message=self.message, + body=updates.config, ).items ] From d9f0e9ce6d836f0ebc7ccb192edcccd11913fae6 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:51:16 -0500 Subject: [PATCH 06/13] Update documentation Signed-off-by: Webster Mudge --- plugins/modules/service.py | 41 +++++++++++++++++--------------------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/plugins/modules/service.py b/plugins/modules/service.py index d80f4300..ff373a7f 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -1,3 +1,4 @@ +#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2024 Cloudera, Inc. All Rights Reserved. @@ -14,29 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerMutableModule, - resolve_tag_updates, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( - parse_service_result, -) - -from cm_client import ( - ApiEntityTag, - ApiService, - ApiServiceList, - ClustersResourceApi, - ServicesResourceApi, -) -from cm_client.rest import ApiException - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" module: service short_description: Manage a service in cluster @@ -315,6 +293,23 @@ returned: when supported """ +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + resolve_tag_updates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + parse_service_result, +) + +from cm_client import ( + ApiEntityTag, + ApiService, + ApiServiceList, + ClustersResourceApi, + ServicesResourceApi, +) +from cm_client.rest import ApiException + class ClusterService(ClouderaManagerMutableModule): def __init__(self, module): From caa643d4e1dd7ce49d32ce6966a30ce46c720bcf Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:52:23 -0500 Subject: [PATCH 07/13] Add cm_service_config module for service-wide configuration management Signed-off-by: Webster Mudge --- plugins/modules/cm_service_config.py | 282 ++++++++++++++++++ .../test_cm_service_config.py | 175 +++++++++++ 2 files changed, 457 insertions(+) create mode 100644 plugins/modules/cm_service_config.py create mode 100644 tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py diff --git a/plugins/modules/cm_service_config.py b/plugins/modules/cm_service_config.py new file mode 100644 index 00000000..436eba18 --- /dev/null +++ b/plugins/modules/cm_service_config.py @@ -0,0 +1,282 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_config +short_description: Manage the Cloudera Manager service configuration +description: + - Manage a configuration (service-wide) for the Cloudera Manager service. +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + parameters: + description: + - The service-wide configuration to set. + - To unset a parameter, use C(None) as the value. + type: dict + required: yes + aliases: + - params + view: + description: + - The view to materialize. + type: str + default: summary + choices: + - summary + - full +extends_documentation_fragment: + - ansible.builtin.action_common_attributes + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.purge + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Update (append) several service-wide parameters + cloudera.cluster.cm_service_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + parameters: + a_configuration: "schema://host:port" + another_configuration: 234 + +- name: Reset a service-wide parameter + cloudera.cluster.cm_service_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + parameters: + some_conf: None + +- name: Update (purge) service-wide parameters + cloudera.cluster.cm_service_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-service + parameters: + config_one: ValueOne + config_two: 4567 + purge: yes + +- name: Reset all service-wide parameters + cloudera.cluster.cm_service_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-service + parameters: {} + purge: yes +""" + +RETURN = r""" +config: + description: Service-wide configuration details for the Cloudera Manager service. + type: list + elements: dict + contains: + name: + description: The canonical name that identifies this configuration parameter. + type: str + returned: always + value: + description: + - The user-defined value. + - When absent, the default value (if any) will be used. + - Can also be absent, when enumerating allowed configs. + type: str + returned: always + required: + description: + - Whether this configuration is required for the service. + - If any required configuration is not set, operations on the service may not work. + - Available using I(view=full). + type: bool + returned: when supported + default: + description: + - The default value. + - Available using I(view=full). + type: str + returned: when supported + display_name: + description: + - A user-friendly name of the parameters, as would have been shown in the web UI. + - Available using I(view=full). + type: str + returned: when supported + description: + description: + - A textual description of the parameter. + - Available using I(view=full). + type: str + returned: when supported + related_name: + description: + - If applicable, contains the related configuration variable used by the source project. + - Available using I(view=full). + type: str + returned: when supported + sensitive: + description: + - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. + type: bool + returned: when supported + validation_state: + description: + - State of the configuration parameter after validation. + - Available using I(view=full). + type: str + returned: when supported + sample: + - OK + - WARNING + - ERROR + validation_message: + description: + - A message explaining the parameter's validation state. + - Available using I(view=full). + type: str + returned: when supported + validation_warnings_suppressed: + description: + - Whether validation warnings associated with this parameter are suppressed. + - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. + - Configurations that do not produce warnings will not contain this field. + - Available using I(view=full). + type: bool + returned: when supported +""" + +import json + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + ServiceConfigUpdates, +) + + +from cm_client import ( + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + + +class ClouderaManagerServiceConfig(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceConfig, self).__init__(module) + + # Set the parameters + self.params = self.get_param("parameters") + self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return value + self.changed = False + self.diff = {} + self.config = [] + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + refresh = True + api_instance = MgmtServiceResourceApi(self.api_client) + + try: + existing = api_instance.read_service_config() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg=json.loads(ex.body)["message"]) + else: + raise ex + + updates = ServiceConfigUpdates(existing, self.params, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff = updates.diff + + if not self.module.check_mode: + self.config = [ + p.to_dict() + for p in api_instance.update_service_config( + message=self.message, body=updates.config + ).items + ] + + if self.view == "full": + refresh = False + + if refresh: + self.config = [ + p.to_dict() + for p in api_instance.read_service_config(view=self.view).items + ] + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + parameters=dict(type="dict", required=True, aliases=["params"]), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), + ), + supports_check_mode=True, + ) + + result = ClouderaManagerServiceConfig(module) + + output = dict( + changed=result.changed, + config=result.config, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py new file mode 100644 index 00000000..e7f81dbb --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import os +import pytest + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service_config +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture +def conn(): + conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) + + if os.getenv("CM_HOST", None): + conn.update(host=os.getenv("CM_HOST")) + + if os.getenv("CM_PORT", None): + conn.update(port=os.getenv("CM_PORT")) + + if os.getenv("CM_ENDPOINT", None): + conn.update(url=os.getenv("CM_ENDPOINT")) + + if os.getenv("CM_PROXY", None): + conn.update(proxy=os.getenv("CM_PROXY")) + + return { + **conn, + "verify_tls": "no", + "debug": "no", + } + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="parameters"): + cm_service_config.main() + + +def test_present_invalid_parameter(conn, module_args): + conn.update( + parameters=dict(example="Example"), + ) + module_args(conn) + + with pytest.raises( + AnsibleFailJson, match="Unknown configuration attribute 'example'" + ): + cm_service_config.main() + + +def test_set_parameters(conn, module_args): + conn.update( + parameters=dict(mgmt_emit_sensitive_data_in_stderr=True), + # _ansible_check_mode=True, + # _ansible_diff=True, + message="test_cm_service_config::test_set_parameters", + ) + module_args(conn) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_config.main() + + assert e.value.changed == True + assert {c["name"]: c["value"] for c in e.value.config}[ + "mgmt_emit_sensitive_data_in_stderr" + ] == "true" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_config.main() + + assert e.value.changed == False + assert {c["name"]: c["value"] for c in e.value.config}[ + "mgmt_emit_sensitive_data_in_stderr" + ] == "true" + + +def test_unset_parameters(conn, module_args): + conn.update( + parameters=dict(mgmt_emit_sensitive_data_in_stderr=None), + message="test_cm_service_config::test_unset_parameters", + ) + module_args(conn) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_config.main() + + assert e.value.changed == True + results = {c["name"]: c["value"] for c in e.value.config} + assert "mgmt_emit_sensitive_data_in_stderr" not in results + + with pytest.raises(AnsibleExitJson) as e: + cm_service_config.main() + + # Idempotency + assert e.value.changed == False + results = {c["name"]: c["value"] for c in e.value.config} + assert "mgmt_emit_sensitive_data_in_stderr" not in results + + +def test_set_parameters_with_purge(conn, module_args): + conn.update( + parameters=dict(mgmt_emit_sensitive_data_in_stderr=True), + purge=True, + message="test_cm_service_config::test_set_parameters_with_purge", + # _ansible_check_mode=True, + # _ansible_diff=True, + ) + module_args(conn) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_config.main() + + assert e.value.changed == True + assert {c["name"]: c["value"] for c in e.value.config}[ + "mgmt_emit_sensitive_data_in_stderr" + ] == "true" + + with pytest.raises(AnsibleExitJson) as e: + cm_service_config.main() + + # Idempotency + assert e.value.changed == False + assert {c["name"]: c["value"] for c in e.value.config}[ + "mgmt_emit_sensitive_data_in_stderr" + ] == "true" + + +def test_purge_all_parameters(conn, module_args): + conn.update( + parameters=dict(), + purge=True, + message="test_cm_service_config::test_purge_all_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, + ) + module_args(conn) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 From 6cdd72e780335fe53f1144b54be4b05663f2c3bf Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:54:05 -0500 Subject: [PATCH 08/13] Update service-wide cluster service configuration tests to use auto-generated cluster resources Signed-off-by: Webster Mudge --- .../service_config/test_service_config.py | 264 +++++++++++++++++- 1 file changed, 252 insertions(+), 12 deletions(-) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 1750722c..4c9a73bc 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -22,6 +22,30 @@ import os import pytest +from pathlib import Path +from time import sleep + +from cm_client import ( + ClustersResourceApi, + Configuration, + ApiClient, + ApiClusterList, + ApiCluster, + ApiCommand, + ApiConfig, + ParcelResourceApi, + ApiHostRefList, + ApiHostRef, + ApiParcel, + ApiParcelList, + ApiServiceList, + ApiService, + ApiServiceConfig, + CommandsResourceApi, + ServicesResourceApi, +) +from cm_client.rest import ApiException, RESTClientObject + from ansible_collections.cloudera.cluster.plugins.modules import service_config from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, @@ -31,7 +55,7 @@ LOG = logging.getLogger(__name__) -@pytest.fixture +@pytest.fixture(scope="session") def conn(): conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) @@ -54,6 +78,190 @@ def conn(): } +@pytest.fixture(scope="session") +def prep_client(conn): + """Create a Cloudera Manager API client, resolving HTTP/S and version URL. + + Args: + conn (dict): Connection details + + Returns: + ApiClient: Cloudera Manager API client + """ + config = Configuration() + + config.username = conn["username"] + config.password = conn["password"] + + if "url" in conn: + config.host = str(conn["url"]).rstrip(" /") + else: + rest = RESTClientObject() + + # Handle redirects + url = rest.GET(conn["host"]).urllib3_response.geturl() + + # Get version + auth = config.auth_settings().get("basic") + version = rest.GET( + f"{url}api/version", headers={auth["key"]: auth["value"]} + ).data + + # Set host + config.host = f"{url}api/{version}" + + client = ApiClient() + client.user_agent = "pytest" + return client + + +@pytest.fixture( + scope="module", +) +def prep_cluster(prep_client, request): + """Create a 7.1.9 test cluster using the module name.""" + + marker = request.node.get_closest_marker("prep") + if marker is None: + raise Exception("Preparation marker not found.") + elif "version" not in marker.kwargs: + raise Exception("Cluster version parameter not found.") + elif "hosts" not in marker.kwargs: + raise Exception("Cluster hosts parameter not found.") + else: + version = marker.kwargs["version"] + hosts = marker.kwargs["hosts"] + + cluster_api = ClustersResourceApi(prep_client) + parcel_api = ParcelResourceApi(prep_client) + + try: + config = ApiCluster( + name=request.node.name, + full_version=version, + ) + # Create the cluster + clusters = cluster_api.create_clusters(body=ApiClusterList(items=[config])) + + # Activate the parcel(s) + from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( + Parcel, + ) + + parcel = Parcel( + parcel_api=parcel_api, + product="CDH", + version=version, + cluster=request.node.name, + ) + + cluster_api.add_hosts( + cluster_name=request.node.name, + body=ApiHostRefList(items=[ApiHostRef(hostname=h) for h in hosts]), + ) + yield clusters.items[0] + cluster_api.delete_cluster(cluster_name=request.node.name) + except ApiException as ae: + raise Exception(str(ae)) + + +@pytest.mark.skip +def test_wip_cluster(prep_cluster): + results = prep_cluster + print(results) + + +def wait_for_command( + api_client: ApiClient, command: ApiCommand, polling: int = 10, delay: int = 5 +): + poll_count = 0 + while command.active: + if poll_count > polling: + raise Exception("CM command timeout") + sleep(delay) + poll_count += 1 + command = CommandsResourceApi(api_client).read_command(command.id) + if not command.success: + raise Exception(f"CM command [{command.id}] failed: {command.result_message}") + + +@pytest.fixture(scope="module") +def prep_service(prep_client, request): + api = ServicesResourceApi(prep_client) + cluster_api = ClustersResourceApi(prep_client) + + name = Path(request.node.name).stem + "_zookeeper" + + service = ApiService( + name=name, + type="ZOOKEEPER", + ) + + api.create_services(cluster_name="TestOne", body=ApiServiceList(items=[service])) + cluster_api.auto_assign_roles(cluster_name="TestOne") + + # configure = cluster_api.auto_configure(cluster_name="TestOne") + wait_for_command( + prep_client, api.first_run(cluster_name="TestOne", service_name=name) + ) + + yield api.read_service(cluster_name="TestOne", service_name=name) + + api.delete_service(cluster_name="TestOne", service_name=name) + + +def test_wip_service(prep_service): + results = prep_service + print(results) + + +@pytest.fixture +def prep_service_config(prep_client, request): + marker = request.node.get_closest_marker("prep") + + if marker is None: + raise Exception("Unable to determine parameter to prepare") + elif len(marker.args) != 3: + raise Exception("Invalid number of values for parameter preparation") + else: + cluster = marker.args[0] + service = marker.args[1] + params = marker.args[2] + + api = ServicesResourceApi(prep_client) + + # Set the parameter + try: + api.update_service_config( + cluster_name=cluster, + service_name=service, + message=f"test_service_config::{request.node.name}:set", + body=ApiServiceConfig( + items=[ApiConfig(name=k, value=v) for k, v in params.items()] + ), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Go run the test + yield + + # Reset the parameter + try: + api.update_service_config( + cluster_name=cluster, + service_name=service, + message=f"test_service_config::{request.node.name}::reset", + body=ApiServiceConfig( + items=[ApiConfig(name=k, value=v) for k, v in params.items()] + ), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + def test_missing_required(conn, module_args): module_args(conn) @@ -94,7 +302,7 @@ def test_present_invalid_cluster(conn, module_args): module_args(conn) with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): - service_config.main() + prep_service_config.main() def test_present_invalid_service(conn, module_args): @@ -123,13 +331,19 @@ def test_present_invalid_parameter(conn, module_args): service_config.main() -def test_set_parameters(conn, module_args): +@pytest.mark.prep( + os.getenv("CM_CLUSTER"), + os.getenv("CM_SERVICE"), + dict(autopurgeSnapRetainCount=None, tickTime=1111), +) +def test_set_parameters(conn, module_args, prep_service_config): conn.update( cluster=os.getenv("CM_CLUSTER"), service=os.getenv("CM_SERVICE"), parameters=dict(autopurgeSnapRetainCount=9), - _ansible_check_mode=True, - _ansible_diff=True, + message="test_service_config::test_set_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, ) module_args(conn) @@ -140,7 +354,9 @@ def test_set_parameters(conn, module_args): assert {c["name"]: c["value"] for c in e.value.config}[ "autopurgeSnapRetainCount" ] == "9" + assert len(e.value.config) == 2 + # Idempotency with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -148,13 +364,20 @@ def test_set_parameters(conn, module_args): assert {c["name"]: c["value"] for c in e.value.config}[ "autopurgeSnapRetainCount" ] == "9" + assert len(e.value.config) == 2 -def test_unset_parameters(conn, module_args): +@pytest.mark.prep( + os.getenv("CM_CLUSTER"), + os.getenv("CM_SERVICE"), + dict(autopurgeSnapRetainCount=7, tickTime=1111), +) +def test_unset_parameters(conn, module_args, prep_service_config): conn.update( cluster=os.getenv("CM_CLUSTER"), service=os.getenv("CM_SERVICE"), parameters=dict(autopurgeSnapRetainCount=None), + message="test_service_config::test_unset_parameters", ) module_args(conn) @@ -164,23 +387,32 @@ def test_unset_parameters(conn, module_args): assert e.value.changed == True results = {c["name"]: c["value"] for c in e.value.config} assert "autopurgeSnapRetainCount" not in results + assert len(e.value.config) == 1 + # Idempotency with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == False results = {c["name"]: c["value"] for c in e.value.config} assert "autopurgeSnapRetainCount" not in results + assert len(e.value.config) == 1 -def test_set_parameters_with_purge(conn, module_args): +@pytest.mark.prep( + os.getenv("CM_CLUSTER"), + os.getenv("CM_SERVICE"), + dict(autopurgeSnapRetainCount=7, tickTime=1111), +) +def test_set_parameters_with_purge(conn, module_args, prep_service_config): conn.update( cluster=os.getenv("CM_CLUSTER"), service=os.getenv("CM_SERVICE"), parameters=dict(autopurgeSnapRetainCount=9), purge=True, - _ansible_check_mode=True, - _ansible_diff=True, + message="test_service_config::test_set_parameters_with_purge", + # _ansible_check_mode=True, + # _ansible_diff=True, ) module_args(conn) @@ -191,6 +423,7 @@ def test_set_parameters_with_purge(conn, module_args): assert {c["name"]: c["value"] for c in e.value.config}[ "autopurgeSnapRetainCount" ] == "9" + assert len(e.value.config) == 1 with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -199,16 +432,23 @@ def test_set_parameters_with_purge(conn, module_args): assert {c["name"]: c["value"] for c in e.value.config}[ "autopurgeSnapRetainCount" ] == "9" + assert len(e.value.config) == 1 -def test_purge_all_parameters(conn, module_args): +@pytest.mark.prep( + os.getenv("CM_CLUSTER"), + os.getenv("CM_SERVICE"), + dict(autopurgeSnapRetainCount=8, tickTime=2222), +) +def test_purge_all_parameters(conn, module_args, prep_service_config): conn.update( cluster=os.getenv("CM_CLUSTER"), service=os.getenv("CM_SERVICE"), parameters=dict(), purge=True, - _ansible_check_mode=True, - _ansible_diff=True, + message="test_service_config::test_purge_all_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, ) module_args(conn) From c9a7ecce3a47f02012e2ec1ce629dfeed500dfc8 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 12:36:25 -0500 Subject: [PATCH 09/13] Update marker name Signed-off-by: Webster Mudge --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d5ea7976..a36945c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ filterwarnings = [ "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", ] markers = [ - "prep: Prepare Cloudera Manager and resources for tests", + "prepare: Prepare Cloudera Manager and resources for tests", ] [build-system] From d15ea7b9c92a825598c297b24d6cdfbfca5abe18 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 12:37:53 -0500 Subject: [PATCH 10/13] Configure pytest fixtures to create target service for testing Signed-off-by: Webster Mudge --- .../service_config/test_service_config.py | 201 ++++++++---------- 1 file changed, 89 insertions(+), 112 deletions(-) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 4c9a73bc..c3b5dc47 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -79,7 +79,7 @@ def conn(): @pytest.fixture(scope="session") -def prep_client(conn): +def cm_api_client(conn): """Create a Cloudera Manager API client, resolving HTTP/S and version URL. Args: @@ -115,10 +115,8 @@ def prep_client(conn): return client -@pytest.fixture( - scope="module", -) -def prep_cluster(prep_client, request): +@pytest.fixture(scope="module") +def prep_cluster(cm_api_client, request): """Create a 7.1.9 test cluster using the module name.""" marker = request.node.get_closest_marker("prep") @@ -132,8 +130,8 @@ def prep_cluster(prep_client, request): version = marker.kwargs["version"] hosts = marker.kwargs["hosts"] - cluster_api = ClustersResourceApi(prep_client) - parcel_api = ParcelResourceApi(prep_client) + cluster_api = ClustersResourceApi(cm_api_client) + parcel_api = ParcelResourceApi(cm_api_client) try: config = ApiCluster( @@ -186,9 +184,9 @@ def wait_for_command( @pytest.fixture(scope="module") -def prep_service(prep_client, request): - api = ServicesResourceApi(prep_client) - cluster_api = ClustersResourceApi(prep_client) +def target_service(cm_api_client, request): + api = ServicesResourceApi(cm_api_client) + cluster_api = ClustersResourceApi(cm_api_client) name = Path(request.node.name).stem + "_zookeeper" @@ -202,7 +200,7 @@ def prep_service(prep_client, request): # configure = cluster_api.auto_configure(cluster_name="TestOne") wait_for_command( - prep_client, api.first_run(cluster_name="TestOne", service_name=name) + cm_api_client, api.first_run(cluster_name="TestOne", service_name=name) ) yield api.read_service(cluster_name="TestOne", service_name=name) @@ -210,56 +208,48 @@ def prep_service(prep_client, request): api.delete_service(cluster_name="TestOne", service_name=name) -def test_wip_service(prep_service): - results = prep_service - print(results) - - @pytest.fixture -def prep_service_config(prep_client, request): - marker = request.node.get_closest_marker("prep") +def target_service_config(cm_api_client, target_service, request): + marker = request.node.get_closest_marker("prepare") if marker is None: - raise Exception("Unable to determine parameter to prepare") - elif len(marker.args) != 3: - raise Exception("Invalid number of values for parameter preparation") - else: - cluster = marker.args[0] - service = marker.args[1] - params = marker.args[2] - - api = ServicesResourceApi(prep_client) - - # Set the parameter - try: - api.update_service_config( - cluster_name=cluster, - service_name=service, - message=f"test_service_config::{request.node.name}:set", - body=ApiServiceConfig( - items=[ApiConfig(name=k, value=v) for k, v in params.items()] - ), - ) - except ApiException as ae: - if ae.status != 400 or "delete with template" not in str(ae.body): - raise Exception(str(ae)) - - # Go run the test - yield + raise Exception("No prepare marker found.") + elif "service_config" not in marker.kwargs: + raise Exception("No 'service_config' parameter found.") + + service_api = ServicesResourceApi(cm_api_client) + + # Set the parameter(s) + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in marker.kwargs["service_config"].items(): + try: + service_api.update_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + message=f"test_service_config::{request.node.name}:set", + body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Return the targeted service and go run the test + yield target_service # Reset the parameter - try: - api.update_service_config( - cluster_name=cluster, - service_name=service, - message=f"test_service_config::{request.node.name}::reset", - body=ApiServiceConfig( - items=[ApiConfig(name=k, value=v) for k, v in params.items()] - ), - ) - except ApiException as ae: - if ae.status != 400 or "delete with template" not in str(ae.body): - raise Exception(str(ae)) + for k, v in marker.kwargs["service_config"].items(): + try: + service_api.update_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + message=f"test_service_config::{request.node.name}::reset", + body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) def test_missing_required(conn, module_args): @@ -270,60 +260,63 @@ def test_missing_required(conn, module_args): def test_missing_service(conn, module_args): - conn.update(service="example") - module_args(conn) + module_args({**conn, "service": "example"}) with pytest.raises(AnsibleFailJson, match="cluster, parameters"): service_config.main() def test_missing_cluster(conn, module_args): - conn.update(cluster="example") - module_args(conn) + module_args({**conn, "cluster": "example"}) with pytest.raises(AnsibleFailJson, match="parameters, service"): service_config.main() def test_missing_parameters(conn, module_args): - conn.update(parameters=dict(test="example")) - module_args(conn) + module_args({**conn, "parameters": dict(test="example")}) with pytest.raises(AnsibleFailJson, match="cluster, service"): service_config.main() def test_present_invalid_cluster(conn, module_args): - conn.update( - cluster="example", - service="example", - parameters=dict(example="Example"), + module_args( + { + **conn, + "cluster": "example", + "service": "example", + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): - prep_service_config.main() + service_config.main() -def test_present_invalid_service(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service="example", - parameters=dict(example="Example"), +def test_present_invalid_service(conn, module_args, target_service): + module_args( + { + **conn, + "cluster": target_service.cluster_ref.cluster_name, + "service": "example", + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises(AnsibleFailJson, match="Service 'example' not found"): service_config.main() -def test_present_invalid_parameter(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - parameters=dict(example="Example"), +def test_present_invalid_parameter(conn, module_args, target_service): + module_args( + { + **conn, + "cluster": target_service.cluster_ref.cluster_name, + "service": target_service.name, + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises( AnsibleFailJson, match="Unknown configuration attribute 'example'" @@ -331,15 +324,11 @@ def test_present_invalid_parameter(conn, module_args): service_config.main() -@pytest.mark.prep( - os.getenv("CM_CLUSTER"), - os.getenv("CM_SERVICE"), - dict(autopurgeSnapRetainCount=None, tickTime=1111), -) -def test_set_parameters(conn, module_args, prep_service_config): +@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=None, tickTime=1111)) +def test_set_parameters(conn, module_args, target_service_config): conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), + cluster=target_service_config.cluster_ref.cluster_name, + service=target_service_config.name, parameters=dict(autopurgeSnapRetainCount=9), message="test_service_config::test_set_parameters", # _ansible_check_mode=True, @@ -367,15 +356,11 @@ def test_set_parameters(conn, module_args, prep_service_config): assert len(e.value.config) == 2 -@pytest.mark.prep( - os.getenv("CM_CLUSTER"), - os.getenv("CM_SERVICE"), - dict(autopurgeSnapRetainCount=7, tickTime=1111), -) -def test_unset_parameters(conn, module_args, prep_service_config): +@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) +def test_unset_parameters(conn, module_args, target_service_config): conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), + cluster=target_service_config.cluster_ref.cluster_name, + service=target_service_config.name, parameters=dict(autopurgeSnapRetainCount=None), message="test_service_config::test_unset_parameters", ) @@ -399,15 +384,11 @@ def test_unset_parameters(conn, module_args, prep_service_config): assert len(e.value.config) == 1 -@pytest.mark.prep( - os.getenv("CM_CLUSTER"), - os.getenv("CM_SERVICE"), - dict(autopurgeSnapRetainCount=7, tickTime=1111), -) -def test_set_parameters_with_purge(conn, module_args, prep_service_config): +@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) +def test_set_parameters_with_purge(conn, module_args, target_service_config): conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), + cluster=target_service_config.cluster_ref.cluster_name, + service=target_service_config.name, parameters=dict(autopurgeSnapRetainCount=9), purge=True, message="test_service_config::test_set_parameters_with_purge", @@ -435,15 +416,11 @@ def test_set_parameters_with_purge(conn, module_args, prep_service_config): assert len(e.value.config) == 1 -@pytest.mark.prep( - os.getenv("CM_CLUSTER"), - os.getenv("CM_SERVICE"), - dict(autopurgeSnapRetainCount=8, tickTime=2222), -) -def test_purge_all_parameters(conn, module_args, prep_service_config): +@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=8, tickTime=2222)) +def test_purge_all_parameters(conn, module_args, target_service_config): conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), + cluster=target_service_config.cluster_ref.cluster_name, + service=target_service_config.name, parameters=dict(), purge=True, message="test_service_config::test_purge_all_parameters", From 018e7947f6ca1c21d014abbae82b588b5bddf58c Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 13:19:07 -0500 Subject: [PATCH 11/13] Update module_args() for mutation tests Signed-off-by: Webster Mudge --- .../service_config/test_service_config.py | 72 ++++++++++--------- 1 file changed, 40 insertions(+), 32 deletions(-) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index c3b5dc47..342863f0 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -326,15 +326,17 @@ def test_present_invalid_parameter(conn, module_args, target_service): @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=None, tickTime=1111)) def test_set_parameters(conn, module_args, target_service_config): - conn.update( - cluster=target_service_config.cluster_ref.cluster_name, - service=target_service_config.name, - parameters=dict(autopurgeSnapRetainCount=9), - message="test_service_config::test_set_parameters", - # _ansible_check_mode=True, - # _ansible_diff=True, + module_args( + { + **conn, + "cluster": target_service_config.cluster_ref.cluster_name, + "service": target_service_config.name, + "parameters": dict(autopurgeSnapRetainCount=9), + "message": "test_service_config::test_set_parameters", + # "_ansible_check_mode": True, + # "_ansible_diff": True, + } ) - module_args(conn) with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -358,13 +360,15 @@ def test_set_parameters(conn, module_args, target_service_config): @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) def test_unset_parameters(conn, module_args, target_service_config): - conn.update( - cluster=target_service_config.cluster_ref.cluster_name, - service=target_service_config.name, - parameters=dict(autopurgeSnapRetainCount=None), - message="test_service_config::test_unset_parameters", + module_args( + { + **conn, + "cluster": target_service_config.cluster_ref.cluster_name, + "service": target_service_config.name, + "parameters": dict(autopurgeSnapRetainCount=None), + "message": "test_service_config::test_unset_parameters", + } ) - module_args(conn) with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -386,16 +390,18 @@ def test_unset_parameters(conn, module_args, target_service_config): @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) def test_set_parameters_with_purge(conn, module_args, target_service_config): - conn.update( - cluster=target_service_config.cluster_ref.cluster_name, - service=target_service_config.name, - parameters=dict(autopurgeSnapRetainCount=9), - purge=True, - message="test_service_config::test_set_parameters_with_purge", - # _ansible_check_mode=True, - # _ansible_diff=True, + module_args( + { + **conn, + "cluster": target_service_config.cluster_ref.cluster_name, + "service": target_service_config.name, + "parameters": dict(autopurgeSnapRetainCount=9), + "purge": True, + "message": "test_service_config::test_set_parameters_with_purge", + # "_ansible_check_mode": True, + # "_ansible_diff": True, + } ) - module_args(conn) with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -418,16 +424,18 @@ def test_set_parameters_with_purge(conn, module_args, target_service_config): @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=8, tickTime=2222)) def test_purge_all_parameters(conn, module_args, target_service_config): - conn.update( - cluster=target_service_config.cluster_ref.cluster_name, - service=target_service_config.name, - parameters=dict(), - purge=True, - message="test_service_config::test_purge_all_parameters", - # _ansible_check_mode=True, - # _ansible_diff=True, + module_args( + { + **conn, + "cluster": target_service_config.cluster_ref.cluster_name, + "service": target_service_config.name, + "parameters": dict(), + "purge": True, + "message": "test_service_config::test_purge_all_parameters", + # "_ansible_check_mode": True, + # "_ansible_diff": True, + } ) - module_args(conn) with pytest.raises(AnsibleExitJson) as e: service_config.main() From c0f805a2d33609d26c0bb8b7207b7537e8ca8700 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 16:17:25 -0500 Subject: [PATCH 12/13] Update to create target cluster for all session tests Signed-off-by: Webster Mudge --- .../service_config/test_service_config.py | 120 ++++++++++-------- 1 file changed, 69 insertions(+), 51 deletions(-) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 342863f0..4505f3cc 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -21,32 +21,38 @@ import logging import os import pytest +import random +import string from pathlib import Path from time import sleep from cm_client import ( - ClustersResourceApi, - Configuration, ApiClient, ApiClusterList, ApiCluster, ApiCommand, ApiConfig, - ParcelResourceApi, - ApiHostRefList, ApiHostRef, - ApiParcel, - ApiParcelList, - ApiServiceList, + ApiHostRefList, ApiService, ApiServiceConfig, + ApiServiceList, + ClustersResourceApi, CommandsResourceApi, + Configuration, + HostsResourceApi, + ParcelResourceApi, + ParcelsResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException, RESTClientObject from ansible_collections.cloudera.cluster.plugins.modules import service_config +from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( + Parcel, +) + from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, @@ -115,62 +121,71 @@ def cm_api_client(conn): return client -@pytest.fixture(scope="module") -def prep_cluster(cm_api_client, request): - """Create a 7.1.9 test cluster using the module name.""" +@pytest.fixture(scope="session") +def target_cluster(cm_api_client, request): + """Create a 7.1.9 test cluster.""" - marker = request.node.get_closest_marker("prep") - if marker is None: - raise Exception("Preparation marker not found.") - elif "version" not in marker.kwargs: - raise Exception("Cluster version parameter not found.") - elif "hosts" not in marker.kwargs: - raise Exception("Cluster hosts parameter not found.") - else: - version = marker.kwargs["version"] - hosts = marker.kwargs["hosts"] + name = ( + Path(request.fixturename).stem + + "_" + + "".join(random.choices(string.ascii_lowercase, k=6)) + ) + cdh_version = "7.1.9" cluster_api = ClustersResourceApi(cm_api_client) + parcels_api = ParcelsResourceApi(cm_api_client) parcel_api = ParcelResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) try: + # TODO Query for the latest version available - is this possible? + + # Create the initial cluster config = ApiCluster( - name=request.node.name, - full_version=version, + name=name, + full_version=cdh_version, ) - # Create the cluster - clusters = cluster_api.create_clusters(body=ApiClusterList(items=[config])) - # Activate the parcel(s) - from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( - Parcel, - ) + cluster_api.create_clusters(body=ApiClusterList(items=[config])) + + # Get first free host and assign to the cluster + all_hosts = host_api.read_hosts() + host = next((h for h in all_hosts.items if not h.cluster_ref), None) + + if host is None: + # Roll back the cluster and then raise an error + cluster_api.delete_cluster(cluster_name=name) + raise Exception("No available hosts to allocate to new cluster") + else: + cluster_api.add_hosts( + cluster_name=name, + body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), + ) + + # Find the first CDH parcel version and activate it + parcels = parcels_api.read_parcels(cluster_name=name) + cdh_parcel = next((p for p in parcels.items if p.product == "CDH")) parcel = Parcel( parcel_api=parcel_api, - product="CDH", - version=version, - cluster=request.node.name, + product=cdh_parcel.product, + version=cdh_parcel.version, + cluster=name, ) - cluster_api.add_hosts( - cluster_name=request.node.name, - body=ApiHostRefList(items=[ApiHostRef(hostname=h) for h in hosts]), - ) - yield clusters.items[0] - cluster_api.delete_cluster(cluster_name=request.node.name) - except ApiException as ae: - raise Exception(str(ae)) + parcel.activate() + # Reread and return the cluster + yield cluster_api.read_cluster(cluster_name=name) -@pytest.mark.skip -def test_wip_cluster(prep_cluster): - results = prep_cluster - print(results) + # Deprovision the cluster + cluster_api.delete_cluster(cluster_name=name) + except ApiException as ae: + raise Exception(str(ae)) def wait_for_command( - api_client: ApiClient, command: ApiCommand, polling: int = 10, delay: int = 5 + api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 ): poll_count = 0 while command.active: @@ -184,7 +199,7 @@ def wait_for_command( @pytest.fixture(scope="module") -def target_service(cm_api_client, request): +def target_service(cm_api_client, target_cluster, request): api = ServicesResourceApi(cm_api_client) cluster_api = ClustersResourceApi(cm_api_client) @@ -195,17 +210,20 @@ def target_service(cm_api_client, request): type="ZOOKEEPER", ) - api.create_services(cluster_name="TestOne", body=ApiServiceList(items=[service])) - cluster_api.auto_assign_roles(cluster_name="TestOne") + api.create_services( + cluster_name=target_cluster.name, body=ApiServiceList(items=[service]) + ) + cluster_api.auto_assign_roles(cluster_name=target_cluster.name) - # configure = cluster_api.auto_configure(cluster_name="TestOne") + # configure = cluster_api.auto_configure(cluster_name=target_cluster.name) wait_for_command( - cm_api_client, api.first_run(cluster_name="TestOne", service_name=name) + cm_api_client, + api.first_run(cluster_name=target_cluster.name, service_name=name), ) - yield api.read_service(cluster_name="TestOne", service_name=name) + yield api.read_service(cluster_name=target_cluster.name, service_name=name) - api.delete_service(cluster_name="TestOne", service_name=name) + api.delete_service(cluster_name=target_cluster.name, service_name=name) @pytest.fixture From 793ea10afdfda858def6378302e1c3da14110fe2 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 17:26:26 -0500 Subject: [PATCH 13/13] Refactor session fixtures to relocate for general test access Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 24 +++ tests/unit/conftest.py | 168 +++++++++++++++--- .../service_config/test_service_config.py | 158 +--------------- 3 files changed, 174 insertions(+), 176 deletions(-) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 6a16e733..0913603e 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# # Copyright 2024 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from time import sleep + +from cm_client import ( + ApiClient, + ApiCommand, + CommandsResourceApi, +) + class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" @@ -31,3 +41,17 @@ def __init__(self, kwargs): kwargs.get("msg", "General module failure") ) self.__dict__.update(kwargs) + + +def wait_for_command( + api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 +): + poll_count = 0 + while command.active: + if poll_count > polling: + raise Exception("CM command timeout") + sleep(delay) + poll_count += 1 + command = CommandsResourceApi(api_client).read_command(command.id) + if not command.success: + raise Exception(f"CM command [{command.id}] failed: {command.result_message}") diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 6829f307..e7b48475 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -19,18 +19,35 @@ __metaclass__ = type import json -import sys +import os import pytest +import random +import string +import sys import yaml +from pathlib import Path + +from cm_client import ( + ApiClient, + ApiClusterList, + ApiCluster, + ApiHostRef, + ApiHostRefList, + ClustersResourceApi, + Configuration, + HostsResourceApi, + ParcelResourceApi, + ParcelsResourceApi, +) +from cm_client.rest import ApiException, RESTClientObject + from ansible.module_utils import basic from ansible.module_utils.common.text.converters import to_bytes -# # Required for pytest discovery in VSCode, reasons unknown... -# try: -# from ansible.plugins.action import ActionBase -# except ModuleNotFoundError: -# pass +from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( + Parcel, +) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, @@ -86,21 +103,132 @@ def prep_args(args: str = ""): return prep_args -# class AnsibleExitJson(Exception): -# """Exception class to be raised by module.exit_json and caught by the test case""" +@pytest.fixture(scope="session") +def conn(): + conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) + + if os.getenv("CM_HOST", None): + conn.update(host=os.getenv("CM_HOST")) + + if os.getenv("CM_PORT", None): + conn.update(port=os.getenv("CM_PORT")) + + if os.getenv("CM_ENDPOINT", None): + conn.update(url=os.getenv("CM_ENDPOINT")) + + if os.getenv("CM_PROXY", None): + conn.update(proxy=os.getenv("CM_PROXY")) + + return { + **conn, + "verify_tls": "no", + "debug": "no", + } + + +@pytest.fixture(scope="session") +def cm_api_client(conn): + """Create a Cloudera Manager API client, resolving HTTP/S and version URL. + + Args: + conn (dict): Connection details + + Returns: + ApiClient: Cloudera Manager API client + """ + config = Configuration() + + config.username = conn["username"] + config.password = conn["password"] -# def __init__(self, kwargs): -# super(AnsibleExitJson, self).__init__( -# kwargs.get("msg", "General module success") -# ) -# self.__dict__.update(kwargs) + if "url" in conn: + config.host = str(conn["url"]).rstrip(" /") + else: + rest = RESTClientObject() + + # Handle redirects + url = rest.GET(conn["host"]).urllib3_response.geturl() + + # Get version + auth = config.auth_settings().get("basic") + version = rest.GET( + f"{url}api/version", headers={auth["key"]: auth["value"]} + ).data + + # Set host + config.host = f"{url}api/{version}" + + client = ApiClient() + client.user_agent = "pytest" + return client + + +@pytest.fixture(scope="session") +def target_cluster(cm_api_client, request): + """Create a test cluster.""" + + if os.getenv("CDH_VERSION", None): + cdh_version = os.getenv("CDH_VERSION") + else: + raise Exception("No CDH_VERSION found. Please set this environment variable.") + + name = ( + Path(request.fixturename).stem + + "_" + + "".join(random.choices(string.ascii_lowercase, k=6)) + ) + + cluster_api = ClustersResourceApi(cm_api_client) + parcels_api = ParcelsResourceApi(cm_api_client) + parcel_api = ParcelResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) + + try: + # Create the initial cluster + config = ApiCluster( + name=name, + full_version=cdh_version, + ) + + cluster_api.create_clusters(body=ApiClusterList(items=[config])) + + # Get first free host and assign to the cluster + all_hosts = host_api.read_hosts() + host = next((h for h in all_hosts.items if not h.cluster_ref), None) + + if host is None: + # Roll back the cluster and then raise an error + cluster_api.delete_cluster(cluster_name=name) + raise Exception("No available hosts to allocate to new cluster") + else: + cluster_api.add_hosts( + cluster_name=name, + body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), + ) + + # Find the first CDH parcel version and activate it + parcels = parcels_api.read_parcels(cluster_name=name) + cdh_parcel = next( + ( + p + for p in parcels.items + if p.product == "CDH" and p.version.startswith(cdh_version) + ) + ) + + parcel = Parcel( + parcel_api=parcel_api, + product=cdh_parcel.product, + version=cdh_parcel.version, + cluster=name, + ) + parcel.activate() -# class AnsibleFailJson(Exception): -# """Exception class to be raised by module.fail_json and caught by the test case""" + # Reread and return the cluster + yield cluster_api.read_cluster(cluster_name=name) -# def __init__(self, kwargs): -# super(AnsibleFailJson, self).__init__( -# kwargs.get("msg", "General module failure") -# ) -# self.__dict__.update(kwargs) + # Deprovision the cluster + cluster_api.delete_cluster(cluster_name=name) + except ApiException as ae: + raise Exception(str(ae)) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 4505f3cc..a8ab22f8 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -19,185 +19,31 @@ __metaclass__ = type import logging -import os import pytest -import random -import string from pathlib import Path -from time import sleep from cm_client import ( - ApiClient, - ApiClusterList, - ApiCluster, - ApiCommand, ApiConfig, - ApiHostRef, - ApiHostRefList, ApiService, ApiServiceConfig, ApiServiceList, ClustersResourceApi, - CommandsResourceApi, - Configuration, - HostsResourceApi, - ParcelResourceApi, - ParcelsResourceApi, ServicesResourceApi, ) -from cm_client.rest import ApiException, RESTClientObject +from cm_client.rest import ApiException from ansible_collections.cloudera.cluster.plugins.modules import service_config -from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( - Parcel, -) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, + wait_for_command, ) LOG = logging.getLogger(__name__) -@pytest.fixture(scope="session") -def conn(): - conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) - - if os.getenv("CM_HOST", None): - conn.update(host=os.getenv("CM_HOST")) - - if os.getenv("CM_PORT", None): - conn.update(port=os.getenv("CM_PORT")) - - if os.getenv("CM_ENDPOINT", None): - conn.update(url=os.getenv("CM_ENDPOINT")) - - if os.getenv("CM_PROXY", None): - conn.update(proxy=os.getenv("CM_PROXY")) - - return { - **conn, - "verify_tls": "no", - "debug": "no", - } - - -@pytest.fixture(scope="session") -def cm_api_client(conn): - """Create a Cloudera Manager API client, resolving HTTP/S and version URL. - - Args: - conn (dict): Connection details - - Returns: - ApiClient: Cloudera Manager API client - """ - config = Configuration() - - config.username = conn["username"] - config.password = conn["password"] - - if "url" in conn: - config.host = str(conn["url"]).rstrip(" /") - else: - rest = RESTClientObject() - - # Handle redirects - url = rest.GET(conn["host"]).urllib3_response.geturl() - - # Get version - auth = config.auth_settings().get("basic") - version = rest.GET( - f"{url}api/version", headers={auth["key"]: auth["value"]} - ).data - - # Set host - config.host = f"{url}api/{version}" - - client = ApiClient() - client.user_agent = "pytest" - return client - - -@pytest.fixture(scope="session") -def target_cluster(cm_api_client, request): - """Create a 7.1.9 test cluster.""" - - name = ( - Path(request.fixturename).stem - + "_" - + "".join(random.choices(string.ascii_lowercase, k=6)) - ) - cdh_version = "7.1.9" - - cluster_api = ClustersResourceApi(cm_api_client) - parcels_api = ParcelsResourceApi(cm_api_client) - parcel_api = ParcelResourceApi(cm_api_client) - host_api = HostsResourceApi(cm_api_client) - - try: - # TODO Query for the latest version available - is this possible? - - # Create the initial cluster - config = ApiCluster( - name=name, - full_version=cdh_version, - ) - - cluster_api.create_clusters(body=ApiClusterList(items=[config])) - - # Get first free host and assign to the cluster - all_hosts = host_api.read_hosts() - host = next((h for h in all_hosts.items if not h.cluster_ref), None) - - if host is None: - # Roll back the cluster and then raise an error - cluster_api.delete_cluster(cluster_name=name) - raise Exception("No available hosts to allocate to new cluster") - else: - cluster_api.add_hosts( - cluster_name=name, - body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), - ) - - # Find the first CDH parcel version and activate it - parcels = parcels_api.read_parcels(cluster_name=name) - cdh_parcel = next((p for p in parcels.items if p.product == "CDH")) - - parcel = Parcel( - parcel_api=parcel_api, - product=cdh_parcel.product, - version=cdh_parcel.version, - cluster=name, - ) - - parcel.activate() - - # Reread and return the cluster - yield cluster_api.read_cluster(cluster_name=name) - - # Deprovision the cluster - cluster_api.delete_cluster(cluster_name=name) - except ApiException as ae: - raise Exception(str(ae)) - - -def wait_for_command( - api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 -): - poll_count = 0 - while command.active: - if poll_count > polling: - raise Exception("CM command timeout") - sleep(delay) - poll_count += 1 - command = CommandsResourceApi(api_client).read_command(command.id) - if not command.success: - raise Exception(f"CM command [{command.id}] failed: {command.result_message}") - - @pytest.fixture(scope="module") def target_service(cm_api_client, target_cluster, request): api = ServicesResourceApi(cm_api_client)