diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py index db5db3cd..fe8688fe 100644 --- a/plugins/modules/cm_service_role.py +++ b/plugins/modules/cm_service_role.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,30 +25,25 @@ requirements: - cm-client options: - cms_hostname: + cluster_hostname: description: - The hostname of a cluster instance for the role. + - If the hostname is different that the existing host for the I(type), the role will be destroyed and rebuilt on the declared host. - Mutually exclusive with I(cluster_host_id). type: str aliases: - cluster_host - cms_host_id: + cluster_host_id: description: - The host ID of a cluster instance for the role. + - If the host ID is different that the existing host for the I(type), the role will be destroyed and rebuilt on the declared host. - Mutually exclusive with I(cluster_hostname). type: str type: description: - A role type for the role. - - Required if the I(state) creates a new role. - type: str - aliases: - - role_type - role_config_group: - description: - - A role type for the role. - - Required if the I(state) creates a new role. type: str + required: True aliases: - role_type config: @@ -65,21 +60,17 @@ type: bool aliases: - maintenance_mode - tags: - description: - - A set of tags applied to the role. - - To unset a tag, use C(None) as its value. - type: dict purge: description: - - Flag for whether the declared role tags should append or overwrite any existing tags. - - To clear all tags, set I(tags={}), i.e. an empty dictionary, and I(purge=True). + - Flag for whether the declared role configurations should append or overwrite any existing configurations. + - To clear all role configurations, set I(config={}), i.e. an empty dictionary, or omit entirely, and set I(purge=True). type: bool default: False state: description: - The state of the role. - - Note, if the declared state is invalid for the role, for example, the role is a C(HDFS GATEWAY), the module will return an error. + - Note, if the declared state is invalid for the role, the module will return an error. + - Note, I(restarted) is always force a change of state of the role. type: str default: present choices: @@ -101,139 +92,101 @@ """ EXAMPLES = r""" -- name: Establish a service role (auto-generated name) - cloudera.cluster.service_role: - host: example.cloudera.com - username: "jane_smith" - password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - type: GATEWAY - cluster_hostname: worker-01.cloudera.internal - -- name: Establish a service role (defined name) - cloudera.cluster.service_role: +- name: Establish a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - type: GATEWAY - name: example-gateway + type: HOSTMONITOR cluster_hostname: worker-01.cloudera.internal -- name: Set a service role to maintenance mode - cloudera.cluster.service_role: +- name: Set a Cloudera Manager Service role to maintenance mode + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR maintenance: yes -- name: Update (append) tags to a service role - cloudera.cluster.service_role: +- name: Update (append) role configurations to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway - tags: - tag_one: value_one - tag_two: value_two + type: HOSTMONITOR + config: + some_config: value_one + another_config: value_two -- name: Set (purge) tags to a service role - cloudera.cluster.service_role: +- name: Set (purge) role configurations to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" - cluster: example-cluster - service: example-hdfs - name: example-gateway - tags: - tag_three: value_three + type: HOSTMONITOR + config: + yet_another_config: value_three purge: yes -- name: Remove all tags on a service role - cloudera.cluster.service_role: +- name: Remove all role configurations on a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway - tags: {} + type: HOSTMONITOR purge: yes -- name: Start a service role - cloudera.cluster.service_role: +- name: Start a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR state: started -- name: Force a restart to a service role - cloudera.cluster.service_role: +- name: Force a restart to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR state: restarted -- name: Start a service role - cloudera.cluster.service_role: +- name: Remove a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway - state: started - -- name: Remove a service role - cloudera.cluster.service_role: - host: example.cloudera.com - username: "jane_smith" - password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR state: absent """ RETURN = r""" role: - description: Details about the service role. + description: Details about the Cloudera Manager Service role. type: dict contains: name: - description: The cluster service role name. + description: + - The Cloudera Manager Service role name. + - Note, this is an auto-generated name and cannot be changed. type: str returned: always type: - description: The cluster service role type. + description: The Cloudera Manager Service role type. type: str returned: always sample: - - NAMENODE - - DATANODE - - TASKTRACKER + - HOSTMONITOR host_id: description: The unique ID of the cluster host. type: str returned: always service_name: - description: The name of the cluster service, which uniquely identifies it in a cluster. + description: The name of the Cloudera Manager Service, which uniquely identifies it in a deployment. type: str - returned: always + returned: when supported role_state: - description: State of the cluster service role. + description: State of the Cloudera Manager Service role. type: str returned: always sample: @@ -245,11 +198,11 @@ - STOPPED - NA commission_state: - description: Commission state of the cluster service role. + description: Commission state of the Cloudera Manager Service role. type: str returned: always health_summary: - description: The high-level health status of the cluster service role. + description: The high-level health status of the Cloudera Manager Service role. type: str returned: always sample: @@ -260,7 +213,7 @@ - CONCERNING - BAD config_staleness_status: - description: Status of configuration staleness for the cluster service role. + description: Status of configuration staleness for the Cloudera Manager Service role. type: str returned: always sample: @@ -268,7 +221,7 @@ - STALE_REFRESHABLE - STALE health_checks: - description: Lists all available health checks for cluster service role. + description: Lists all available health checks for Cloudera Manager Service role. type: list elements: dict returned: when supported @@ -299,7 +252,7 @@ type: bool returned: when supported maintenance_mode: - description: Whether the cluster service role is in maintenance mode. + description: Whether the Cloudera Manager Service role is in maintenance mode. type: bool returned: when supported maintenance_owners: @@ -314,16 +267,16 @@ - HOST - CONTROL_PLANE role_config_group_name: - description: The name of the cluster service role config group, which uniquely identifies it in a Cloudera Manager installation. + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. type: str returned: when supported tags: - description: The dictionary of tags for the cluster service role. + description: The dictionary of tags for the Cloudera Manager Service role. type: dict returned: when supported zoo_keeper_server_mode: description: - - The Zookeeper server mode for this cluster service role. + - The Zookeeper server mode for this Cloudera Manager Service role. - Note that for non-Zookeeper Server roles, this will be C(null). type: str returned: when supported @@ -397,7 +350,8 @@ def process(self): ), None, ) - current.config = self.role_api.read_role_config(current.name) + if current is not None: + current.config = self.role_api.read_role_config(current.name) except ApiException as ex: if ex.status != 404: raise ex @@ -419,16 +373,27 @@ def process(self): config=self.config, ) current = self.provision_role(new_role) - # # If it exists, but the type has changed, destroy and rebuild completely - # elif self.type and self.type != current.type: - # new_role = create_role( - # api_client=self.api_client, - # role_type=self.type, - # hostname=current.host_ref.hostname, - # host_id=current.host_ref.host_id, - # config=self.config - # ) - # current = self.reprovision_role(current, new_role) + # If it exists, but the host has changed, destroy and rebuild completely + elif ( + self.cluster_hostname is not None + and self.cluster_hostname != current.host_ref.hostname + ) or ( + self.cluster_host_id is not None + and self.cluster_host_id != current.host_ref.host_id + ): + if self.config: + new_config = self.config + else: + new_config = {c.name: c.value for c in current.config.items} + + new_role = create_role( + api_client=self.api_client, + role_type=current.type, + hostname=self.cluster_hostname, + host_id=self.cluster_host_id, + config=new_config, + ) + current = self.reprovision_role(current, new_role) # Else it exists, so address any changes else: # Handle role override configurations @@ -560,6 +525,7 @@ def provision_role(self, role: ApiRole) -> ApiRole: self.module.fail_json( msg="Unable to create new role", role=to_native(role.to_dict()) ) + return created_role def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole: self.changed = True @@ -588,8 +554,11 @@ def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole msg="Unable to recreate role, " + existing_role.name, role=to_native(rebuilt_role.to_dict()), ) + return rebuilt_role + else: + return existing_role - def deprovision_role(self, role: ApiRole): + def deprovision_role(self, role: ApiRole) -> None: self.changed = True if self.module._diff: @@ -598,97 +567,6 @@ def deprovision_role(self, role: ApiRole): if not self.module.check_mode: self.role_api.delete_role(role.name) - # def xxxcreate_role(self) -> ApiRole: - # # Check for required creation parameters - # missing_params = [] - - # if self.type is None: - # missing_params.append("type") - - # if self.cluster_hostname is None and self.cluster_host_id is None: - # missing_params += ["cluster_hostname", "cluster_host_id"] - - # if missing_params: - # self.module.fail_json( - # msg=f"Unable to create new role, missing required arguments: {', '.join(sorted(missing_params)) }" - # ) - - # # Set up the role - # payload = ApiRole(type=str(self.type).upper()) - - # # Name - # if self.name: - # payload.name = self.name # No name allows auto-generation - - # # Host assignment - # host_ref = get_host_ref(self.api_client, self.cluster_hostname, self.cluster_host_id) - - # if host_ref is None: - # self.module.fail_json(msg="Invalid host reference") - # else: - # payload.host_ref = host_ref - - # # Role override configurations - # if self.config: - # payload.config = ApiConfigList(items=[ApiConfig(name=k, value=v) for k, v in self.config.items()]) - - # # Execute the creation - # self.changed = True - - # if self.module._diff: - # self.diff = dict( - # before={}, - # after=payload.to_dict(), - # ) - - # if not self.module.check_mode: - # created_role = next( - # ( - # iter( - # self.role_api.create_roles( - # body=ApiRoleList(items=[payload]), - # ).items - # ) - # ), - # {}, - # ) - - # # Maintenance - # if self.maintenance: - # if self.module._diff: - # self.diff["after"].update(maintenance_mode=True) - - # maintenance_cmd = self.role_api.enter_maintenance_mode( - # created_role.name - # ) - - # if maintenance_cmd.success is False: - # self.module.fail_json( - # msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" - # ) - - # if self.state in ["started", "restarted"]: - # self.handle_commands(MgmtRoleCommandsResourceApi(self.api_client).start_command( - # body=ApiRoleNameList(items=[created_role.name]), - # )) - - # elif self.state == "stopped": - # self.handle_commands(MgmtRoleCommandsResourceApi(self.api_client).stop_command( - # body=ApiRoleNameList(items=[created_role.name]), - # )) - - # if refresh: - # self.output = parse_role_result( - # self.role_api.read_role( - # self.cluster, - # created_role.name, - # self.service, - # view="full", - # ) - # ) - # else: - # self.output = parse_role_result(created_role) - def handle_commands(self, commands: ApiBulkCommandList): if commands.errors: error_msg = "\n".join(commands.errors) @@ -707,7 +585,7 @@ def main(): maintenance=dict(type="bool", aliases=["maintenance_mode"]), config=dict(type="dict", aliases=["params", "parameters"]), purge=dict(type="bool", default=False), - type=dict(required=True), + type=dict(required=True, aliases=["role_type"]), state=dict( default="present", choices=["present", "absent", "restarted", "started", "stopped"], diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index c68bf4ba..1a2f8423 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2024 Cloudera, Inc. +# Copyright 2025 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,17 +26,27 @@ ApiRole, ApiRoleConfigGroup, ApiRoleList, + ApiRoleNameList, + ApiRoleState, ApiService, ApiServiceConfig, ApiServiceList, ClustersResourceApi, CommandsResourceApi, MgmtRolesResourceApi, + MgmtRoleCommandsResourceApi, MgmtRoleConfigGroupsResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host_ref, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, +) + class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" @@ -215,6 +225,86 @@ def provision_cm_role( api.delete_role(role_name=role_name) +def set_cm_role( + api_client: ApiClient, cluster: ApiCluster, role: ApiRole +) -> Generator[ApiRole]: + """Set a net-new Cloudera Manager Service role. Yields the new role, + resetting to any existing role upon completion. Use with 'yield from' + within a pytest fixture. + """ + role_api = MgmtRolesResourceApi(api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(api_client) + + # Check for existing management role + pre_role = next( + iter([r for r in get_mgmt_roles(api_client, role.type).items]), None + ) + + if pre_role is not None: + # Get the current state + pre_role.config = role_api.read_role_config(role_name=pre_role.name) + + # Remove the prior role + role_api.delete_role(role_name=pre_role.name) + + if not role.host_ref: + cluster_api = ClustersResourceApi(api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cluster.name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + + role.host_ref = get_host_ref(api_client, host_id=hosts.items[0].host_id) + + # Create the role under test + current_role = next( + iter(role_api.create_roles(body=ApiRoleList(items=[role])).items), None + ) + current_role.config = role_api.read_role_config(role_name=current_role.name) + + if role.maintenance_mode: + role_api.enter_maintenance_mode(role_name=current_role.name) + + if role.role_state in [ApiRoleState.STARTING, ApiRoleState.STARTED]: + start_cmds = role_cmd_api.start_command( + body=ApiRoleNameList(items=[current_role.name]) + ) + if start_cmds.errors: + error_msg = "\n".join(start_cmds.errors) + raise Exception(error_msg) + + for cmd in start_cmds.items: + # Serial monitoring + wait_for_command(api_client=api_client, command=cmd) + + # Yield the role under test + yield current_role + + # Remove the role under test + current_role = role_api.delete_role(role_name=current_role.name) + + # Reinstate the previous role + if pre_role is not None: + role_api.create_roles(body=ApiRoleList(items=[pre_role])) + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(pre_role.name) + if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: + restart_cmds = role_cmd_api.restart_command( + body=ApiRoleNameList(items=[pre_role.name]) + ) + if restart_cmds.errors: + error_msg = "\n".join(restart_cmds.errors) + raise Exception(error_msg) + + for cmd in restart_cmds.items: + # Serial monitoring + wait_for_command(api_client=api_client, command=cmd) + + def set_cm_role_config( api_client: ApiClient, role: ApiRole, params: dict, message: str ) -> Generator[ApiRole]: diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index a19c2b8d..cda5e8d9 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -42,6 +42,7 @@ ApiHostRefList, ApiRole, ApiRoleConfigGroup, + ApiRoleList, ApiRoleNameList, ApiRoleState, ApiService, @@ -66,6 +67,10 @@ Parcel, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, +) + from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, AnsibleExitJson, @@ -194,7 +199,23 @@ def cm_api_client(conn) -> ApiClient: @pytest.fixture(scope="session") def base_cluster(cm_api_client, request): - """Provision a CDH Base cluster.""" + """Provision a CDH Base cluster. If the variable 'CM_CLUSTER' is present, + will attempt to read and yield a reference to this cluster. Otherwise, + will yield a new base cluster with a single host, deleting the cluster + once completed. + + Args: + cm_api_client (_type_): _description_ + request (_type_): _description_ + + Raises: + Exception: _description_ + Exception: _description_ + Exception: _description_ + + Yields: + _type_: _description_ + """ cluster_api = ClustersResourceApi(cm_api_client) @@ -270,14 +291,32 @@ def base_cluster(cm_api_client, request): @pytest.fixture(scope="session") -def cms(cm_api_client, request) -> Generator[ApiService]: - """Provisions Cloudera Manager Service.""" +def cms(cm_api_client: ApiClient, request) -> Generator[ApiService]: + """Provisions Cloudera Manager Service. If the Cloudera Manager Service + is present, will read and yield this reference. Otherwise, will + yield a new Cloudera Manager Service, deleting it after use. - api = MgmtServiceResourceApi(cm_api_client) + NOTE! A new Cloudera Manager Service will _not_ be provisioned if + there are any existing clusters within the deployment! Therefore, + you must only run this fixture to provision a net-new Cloudera Manager + Service on a bare deployment, i.e. Cloudera Manager and hosts only. + + Args: + cm_api_client (ApiClient): _description_ + request (_type_): _description_ + + Raises: + Exception: _description_ + + Yields: + Generator[ApiService]: _description_ + """ + + cms_api = MgmtServiceResourceApi(cm_api_client) # Return if the Cloudera Manager Service is already present try: - yield api.read_service() + yield cms_api.read_service() return except ApiException as ae: if ae.status != 404 or "Cannot find management service." not in str(ae.body): @@ -289,9 +328,12 @@ def cms(cm_api_client, request) -> Generator[ApiService]: type="MGMT", ) - yield api.setup_cms(body=service) + cm_service = cms_api.setup_cms(body=service) + cms_api.auto_assign_roles() - api.delete_cms() + yield cm_service + + cms_api.delete_cms() @pytest.fixture(scope="function") @@ -419,6 +461,7 @@ def host_monitor_role(cm_api_client, cms, request) -> Generator[ApiRole]: def host_monitor_role_group_config( cm_api_client, host_monitor_role, request ) -> Generator[ApiRoleConfigGroup]: + """Configures the base Role Config Group for the Host Monitor role of a Cloudera Manager Service.""" marker = request.node.get_closest_marker("role_config_group") if marker is None: @@ -532,6 +575,38 @@ def host_monitor_state(cm_api_client, host_monitor_role, request) -> Generator[A ) +@pytest.fixture(scope="function") +def host_monitor_cleared(cm_api_client, cms) -> Generator[None]: + role_api = MgmtRolesResourceApi(cm_api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) + + # Check for existing management role + pre_role = next( + iter([r for r in get_mgmt_roles(cm_api_client, "HOSTMONITOR").items]), None + ) + + if pre_role is not None: + # Get the current state + pre_role.config = role_api.read_role_config(role_name=pre_role.name) + + # Remove the prior role + role_api.delete_role(role_name=pre_role.name) + + # Yield now that the role has been removed + yield + + # Reinstate the previous role + if pre_role is not None: + role_api.create_roles(body=ApiRoleList(items=[pre_role])) + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(pre_role.name) + if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: + restart_cmds = role_cmd_api.restart_command( + body=ApiRoleNameList(items=[pre_role.name]) + ) + handle_commands(api_client=cm_api_client, commands=restart_cmds) + + def handle_commands(api_client: ApiClient, commands: ApiBulkCommandList): if commands.errors: error_msg = "\n".join(commands.errors) diff --git a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py index 2ec761d9..b1a9c98a 100644 --- a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py +++ b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,6 +28,8 @@ ApiConfig, ApiConfigList, ApiRole, + ApiRoleList, + ApiRoleState, ClustersResourceApi, MgmtRolesResourceApi, ) @@ -36,46 +38,142 @@ from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, - provision_cm_role, - cm_role_config, + set_cm_role, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host_ref, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, ) LOG = logging.getLogger(__name__) +@pytest.fixture(scope="function") +def target_cm_role(cm_api_client, cms, base_cluster, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role") + + if marker is None: + role = ApiRole( + type="HOSTMONITOR", + ) + else: + role = marker.args[0] + role.type = "HOSTMONITOR" + + yield from set_cm_role(cm_api_client, base_cluster, role) + + +@pytest.fixture(scope="function") +def target_cm_role_cleared( + cm_api_client, base_cluster, host_monitor_cleared, request +) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role") + + if marker is None: + role = ApiRole( + type="HOSTMONITOR", + ) + else: + role = marker.args[0] + role.type = "HOSTMONITOR" + + role_api = MgmtRolesResourceApi(cm_api_client) + + if not role.host_ref: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=base_cluster.name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + + role.host_ref = get_host_ref(cm_api_client, host_id=hosts.items[0].host_id) + + # Create and yield the role under test + current_role = next( + iter(role_api.create_roles(body=ApiRoleList(items=[role])).items), None + ) + current_role.config = role_api.read_role_config(role_name=current_role.name) + + yield current_role + + # Clear out any remaining roles + remaining_roles = get_mgmt_roles(cm_api_client, "HOSTMONITOR") + + for r in remaining_roles.items: + role_api.delete_role(role_name=r.name) + + def test_missing_required(conn, module_args): module_args(conn) - with pytest.raises(AnsibleFailJson, match="parameters"): + with pytest.raises(AnsibleFailJson, match="type"): cm_service_role.main() -def test_missing_required_if(conn, module_args): - module_args( - { - **conn, - "parameters": dict(), - } - ) +def test_mutually_exclusive(conn, module_args): + module_args({**conn, "cluster_hostname": "hostname", "cluster_host_id": "host_id"}) - with pytest.raises(AnsibleFailJson, match="name, type"): + with pytest.raises( + AnsibleFailJson, + match="parameters are mutually exclusive: cluster_hostname|cluster_host_id", + ): cm_service_role.main() -def test_present_invalid_parameter(conn, module_args, host_monitor): +@pytest.mark.role(ApiRole()) +def test_relocate_host( + conn, module_args, cm_api_client, base_cluster, target_cm_role_cleared, request +): + cluster_api = ClustersResourceApi(cm_api_client) + + # Get second host of the cluster + hosts = cluster_api.list_hosts(cluster_name=base_cluster.name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + filtered_hosts = [ + h for h in hosts.items if h.host_id != target_cm_role_cleared.host_ref.host_id + ] + + if len(filtered_hosts) < 1: + raise Exception( + "Not enough hosts to reassign the Cloudera Manager Service role." + ) + module_args( { **conn, - "role": host_monitor.name, - "parameters": dict(example="Example"), + "type": target_cm_role_cleared.type, + "cluster_hostname": filtered_hosts[0].hostname, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - with pytest.raises( - AnsibleFailJson, match="Unknown configuration attribute 'example'" - ): + expected = filtered_hosts[0].host_id + + with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() + assert e.value.changed == True + assert expected == e.value.role["host_id"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert expected == e.value.role["host_id"] + @pytest.mark.role( ApiRole( @@ -87,19 +185,19 @@ def test_present_invalid_parameter(conn, module_args, host_monitor): ) ) ) -def test_set_parameters(conn, module_args, host_monitor_state, request): +def test_set_config(conn, module_args, target_cm_role, request): module_args( { **conn, - "type": host_monitor_state.type, - "config": dict(mgmt_num_descriptor_fetch_tries=32), + "type": target_cm_role.type, + "config": dict(mgmt_num_descriptor_fetch_tries=55), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + expected = dict(mgmt_num_descriptor_fetch_tries="55", process_start_secs="21") with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() @@ -115,171 +213,206 @@ def test_set_parameters(conn, module_args, host_monitor_state, request): assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 12), + ApiConfig("process_start_secs", 22), + ] + ) + ) ) -def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): +def test_unset_config(conn, module_args, target_cm_role, request): module_args( { **conn, - "role_type": host_monitor_config.type, - "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "type": target_cm_role.type, + "config": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - # _ansible_check_mode=True, - # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + expected = dict(process_start_secs="22") with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 13), + ApiConfig("process_start_secs", 23), + ] + ) + ) ) -def test_unset_parameters(conn, module_args, host_monitor_config, request): +def test_set_config_purge(conn, module_args, target_cm_role, request): module_args( { **conn, - "role": host_monitor_config.name, - "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "type": target_cm_role.type, + "config": dict(mgmt_num_descriptor_fetch_tries=33), + "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - expected = dict(process_start_secs="21") + expected = dict(mgmt_num_descriptor_fetch_tries="33") with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 14), + ApiConfig("process_start_secs", 24), + ] + ) + ) ) -def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): +def test_set_config_purge_all(conn, module_args, target_cm_role, request): module_args( { **conn, - "type": host_monitor_config.type, - "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "type": target_cm_role.type, + "config": dict(), + "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - expected = dict(process_start_secs="21") - with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert len(e.value.role["config"]) == 0 # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert len(e.value.role["config"]) == 0 -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): +@pytest.mark.role(ApiRole(maintenance_mode=False)) +def test_maintenance_mode_enabled(conn, module_args, target_cm_role, request): module_args( { **conn, - "role": host_monitor_config.name, - "parameters": dict(mgmt_num_descriptor_fetch_tries=32), - "purge": True, + "type": target_cm_role.type, + "maintenance": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32") - with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["maintenance_mode"] == True # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["maintenance_mode"] == True -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_set_parameters_with_purge_role_type( - conn, module_args, host_monitor_config, request -): +@pytest.mark.role(ApiRole(maintenance_mode=True)) +def test_maintenance_mode_disabled(conn, module_args, target_cm_role, request): module_args( { **conn, - "role_type": host_monitor_config.type, - "parameters": dict(mgmt_num_descriptor_fetch_tries=32), - "purge": True, + "type": target_cm_role.type, + "maintenance": False, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32") + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["maintenance_mode"] == False + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role["maintenance_mode"] == False + + +@pytest.mark.role(ApiRole(role_state=ApiRoleState.STOPPED)) +def test_state_started(conn, module_args, target_cm_role, request): + module_args( + { + **conn, + "type": target_cm_role.type, + "state": "started", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["role_state"] == "STARTED" # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["role_state"] == "STARTED" -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_purge_all_parameters(conn, module_args, host_monitor_config, request): +@pytest.mark.role(ApiRole(role_state=ApiRoleState.STARTED)) +def test_state_started(conn, module_args, target_cm_role, request): module_args( { **conn, - "role": host_monitor_config.name, - "parameters": dict(), - "purge": True, + "type": target_cm_role.type, + "state": "stopped", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, @@ -287,31 +420,26 @@ def test_purge_all_parameters(conn, module_args, host_monitor_config, request): ) with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert len(e.value.config) == 0 + assert e.value.role["role_state"] == "STOPPED" # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert len(e.value.config) == 0 + assert e.value.role["role_state"] == "STOPPED" -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_purge_all_parameters_role_type( - conn, module_args, host_monitor_config, request -): +@pytest.mark.role(ApiRole(role_state=ApiRoleState.STOPPED)) +def test_state_restarted(conn, module_args, target_cm_role, request): module_args( { **conn, - "type": host_monitor_config.type, - "parameters": dict(), - "purge": True, + "type": target_cm_role.type, + "state": "restarted", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, @@ -319,14 +447,40 @@ def test_purge_all_parameters_role_type( ) with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert len(e.value.config) == 0 + assert e.value.role["role_state"] == "STARTED" + + # Idempotency is not possible due to this state + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["role_state"] == "STARTED" + + +def test_state_absent(conn, module_args, target_cm_role_cleared, request): + module_args( + { + **conn, + "type": target_cm_role_cleared.type, + "state": "absent", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert not e.value.role # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert len(e.value.config) == 0 + assert not e.value.role