From c7e26f381348eb96435ea36f4e5dd28d9b560397 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 9 Sep 2024 15:18:46 +0200 Subject: [PATCH 1/8] autoscaling_instance and related code --- meta/runtime.yml | 2 + plugins/module_utils/_autoscaling/__init__.py | 0 plugins/module_utils/_autoscaling/common.py | 25 + plugins/module_utils/_autoscaling/groups.py | 22 + .../module_utils/_autoscaling/instances.py | 20 + .../_autoscaling/transformations.py | 71 ++ plugins/module_utils/_autoscaling/waiters.py | 84 +++ plugins/module_utils/autoscaling.py | 69 +- plugins/modules/autoscaling_instance.py | 638 ++++++++++++++++++ plugins/modules/autoscaling_instance_info.py | 145 ++++ .../targets/autoscaling_instance/aliases | 4 + .../autoscaling_instance/defaults/main.yml | 11 + .../autoscaling_instance/meta/main.yml | 4 + .../tasks/env_cleanup.yml | 57 ++ .../autoscaling_instance/tasks/env_setup.yml | 94 +++ .../autoscaling_instance/tasks/main.yml | 19 + .../autoscaling_instance/tasks/tests.yml | 3 + .../autoscaling_instance/tmp/inventory | 8 + .../targets/autoscaling_instance/tmp/main.yml | 34 + .../targets/autoscaling_instance/tmp/runme.sh | 12 + .../targets/setup_ec2_vpc/tasks/cleanup.yml | 22 +- .../test_autoscaling_error_handler.py | 131 ++++ .../test_autoscaling_resource_transforms.py | 109 +++ tox.ini | 6 + 24 files changed, 1572 insertions(+), 18 deletions(-) create mode 100644 plugins/module_utils/_autoscaling/__init__.py create mode 100644 plugins/module_utils/_autoscaling/common.py create mode 100644 plugins/module_utils/_autoscaling/groups.py create mode 100644 plugins/module_utils/_autoscaling/instances.py create mode 100644 plugins/module_utils/_autoscaling/transformations.py create mode 100644 plugins/module_utils/_autoscaling/waiters.py create mode 100644 plugins/modules/autoscaling_instance.py create mode 100644 plugins/modules/autoscaling_instance_info.py create mode 100644 tests/integration/targets/autoscaling_instance/aliases create mode 100644 tests/integration/targets/autoscaling_instance/defaults/main.yml create mode 100644 tests/integration/targets/autoscaling_instance/meta/main.yml create mode 100644 tests/integration/targets/autoscaling_instance/tasks/env_cleanup.yml create mode 100644 tests/integration/targets/autoscaling_instance/tasks/env_setup.yml create mode 100644 tests/integration/targets/autoscaling_instance/tasks/main.yml create mode 100644 tests/integration/targets/autoscaling_instance/tasks/tests.yml create mode 100644 tests/integration/targets/autoscaling_instance/tmp/inventory create mode 100644 tests/integration/targets/autoscaling_instance/tmp/main.yml create mode 100755 tests/integration/targets/autoscaling_instance/tmp/runme.sh create mode 100644 tests/unit/module_utils/autoscaling/test_autoscaling_error_handler.py create mode 100644 tests/unit/module_utils/autoscaling/test_autoscaling_resource_transforms.py diff --git a/meta/runtime.yml b/meta/runtime.yml index a17723da068..640d817bbb1 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -2,6 +2,8 @@ requires_ansible: ">=2.15.0" action_groups: aws: + - autoscaling_instance + - autoscaling_instance_info - autoscaling_group - autoscaling_group_info - autoscaling_instance_refresh diff --git a/plugins/module_utils/_autoscaling/__init__.py b/plugins/module_utils/_autoscaling/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/plugins/module_utils/_autoscaling/common.py b/plugins/module_utils/_autoscaling/common.py new file mode 100644 index 00000000000..279d316938d --- /dev/null +++ b/plugins/module_utils/_autoscaling/common.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# try: +# import botocore +# except ImportError: +# pass # Modules are responsible for handling this. + +from ..botocore import is_boto3_error_code +from ..errors import AWSErrorHandler +from ..exceptions import AnsibleAWSError + + +class AnsibleAutoScalingError(AnsibleAWSError): + pass + + +class AutoScalingErrorHandler(AWSErrorHandler): + _CUSTOM_EXCEPTION = AnsibleAutoScalingError + + @classmethod + def _is_missing(cls): + return is_boto3_error_code("NoSuchEntity") diff --git a/plugins/module_utils/_autoscaling/groups.py b/plugins/module_utils/_autoscaling/groups.py new file mode 100644 index 00000000000..8b4f220ed98 --- /dev/null +++ b/plugins/module_utils/_autoscaling/groups.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ..retries import AWSRetry + +# from .common import AnsibleAutoScalingError +from .common import AutoScalingErrorHandler + + +@AutoScalingErrorHandler.list_error_handler("list auto scaling groups", default_value=[]) +@AWSRetry.jittered_backoff() +def describe_auto_scaling_groups(client, group_names=None, filters=None): + args = {} + if group_names: + args["AutoScalingGroupNames"] = group_names + if filters: + args["Filters"] = filters + + paginator = client.get_paginator("describe_auto_scaling_groups") + return paginator.paginate(**args).build_full_result()["AutoScalingGroups"] diff --git a/plugins/module_utils/_autoscaling/instances.py b/plugins/module_utils/_autoscaling/instances.py new file mode 100644 index 00000000000..e23271dac6a --- /dev/null +++ b/plugins/module_utils/_autoscaling/instances.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ..retries import AWSRetry + +# from .common import AnsibleAutoScalingError +from .common import AutoScalingErrorHandler + + +@AutoScalingErrorHandler.list_error_handler("list auto scaling instances", default_value=[]) +@AWSRetry.jittered_backoff() +def describe_auto_scaling_instances(client, instance_ids=None): + args = {} + if instance_ids: + args["InstanceIds"] = instance_ids + + paginator = client.get_paginator("describe_auto_scaling_instances") + return paginator.paginate(**args).build_full_result()["AutoScalingInstances"] diff --git a/plugins/module_utils/_autoscaling/transformations.py b/plugins/module_utils/_autoscaling/transformations.py new file mode 100644 index 00000000000..e17da21db7b --- /dev/null +++ b/plugins/module_utils/_autoscaling/transformations.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import annotations + +import typing + +if typing.TYPE_CHECKING: + from typing import Optional + + from ..transformation import AnsibleAWSResource + from ..transformation import AnsibleAWSResourceList + from ..transformation import BotoResource + from ..transformation import BotoResourceList + +from ..transformation import boto3_resource_list_to_ansible_dict +from ..transformation import boto3_resource_to_ansible_dict + + +def _inject_asg_name( + instance: BotoResource, + group_name: Optional[str] = None, +) -> BotoResource: + if not group_name: + return instance + if "AutoScalingGroupName" in instance: + return instance + instance["AutoScalingGroupName"] = group_name + return instance + + +def normalize_autoscaling_instance( + instance: BotoResource, + group_name: Optional[str] = None, +) -> AnsibleAWSResource: + """Converts an AutoScaling Instance from the CamelCase boto3 format to the snake_case Ansible format. + + Also handles inconsistencies in the output between describe_autoscaling_group() and describe_autoscaling_instances(). + """ + if not instance: + return instance + + # describe_autoscaling_group doesn't add AutoScalingGroupName + instance = _inject_asg_name(instance, group_name) + + try: + # describe_autoscaling_group and describe_autoscaling_instances aren't consistent + instance["HealthStatus"] = instance["HealthStatus"].upper() + except KeyError: + pass + + return boto3_resource_to_ansible_dict(instance, force_tags=False) + + +def normalize_autoscaling_instances( + autoscaling_instances: BotoResourceList, + group_name: Optional[str] = None, +) -> AnsibleAWSResourceList: + """Converts a list of AutoScaling Instances from the CamelCase boto3 format to the snake_case Ansible format""" + if not autoscaling_instances: + return autoscaling_instances + autoscaling_instances = [normalize_autoscaling_instance(i, group_name) for i in autoscaling_instances] + return sorted(autoscaling_instances, key=lambda d: d.get("instance_id", None)) + + +def normalize_autoscaling_groups(autoscaling_groups: BotoResourceList) -> AnsibleAWSResourceList: + """Converts a list of AutoScaling Groups from the CamelCase boto3 format to the snake_case Ansible format""" + autoscaling_groups = boto3_resource_list_to_ansible_dict(autoscaling_groups) + return sorted(autoscaling_groups, key=lambda d: d.get("auto_scaling_group_name", None)) diff --git a/plugins/module_utils/_autoscaling/waiters.py b/plugins/module_utils/_autoscaling/waiters.py new file mode 100644 index 00000000000..e5feabfbdd0 --- /dev/null +++ b/plugins/module_utils/_autoscaling/waiters.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ..waiter import BaseWaiterFactory + + +def _fail_on_instance_lifecycle_states(state): + return dict(state="failure", matcher="pathAny", expected=state, argument="AutoScalingInstances[].LifecycleState") + + +def _retry_on_instance_lifecycle_states(state): + return dict(state="retry", matcher="pathAny", expected=state, argument="AutoScalingInstances[].LifecycleState") + + +def _success_on_instance_lifecycle_states(state): + return dict(state="success", matcher="pathAll", expected=state, argument="AutoScalingInstances[].LifecycleState") + + +def _no_instances(result): + return dict(state=result, matcher="path", expected=True, argument="length(AutoScalingInstances[]) == `0`") + + +class AutoscalingWaiterFactory(BaseWaiterFactory): + @property + def _waiter_model_data(self): + data = dict( + instances_in_service=dict( + operation="DescribeAutoScalingInstances", + delay=5, + maxAttempts=120, + acceptors=[ + _fail_on_instance_lifecycle_states("Terminating"), + _fail_on_instance_lifecycle_states("Terminated"), + _fail_on_instance_lifecycle_states("Terminating:Wait"), + _fail_on_instance_lifecycle_states("Terminating:Proceed"), + _fail_on_instance_lifecycle_states("Detaching"), + _fail_on_instance_lifecycle_states("Detached"), + _success_on_instance_lifecycle_states("InService"), + ], + ), + instances_in_standby=dict( + operation="DescribeAutoScalingInstances", + delay=5, + maxAttempts=120, + acceptors=[ + _fail_on_instance_lifecycle_states("Terminating"), + _fail_on_instance_lifecycle_states("Terminated"), + _fail_on_instance_lifecycle_states("Terminating:Wait"), + _fail_on_instance_lifecycle_states("Terminating:Proceed"), + _fail_on_instance_lifecycle_states("Detaching"), + _fail_on_instance_lifecycle_states("Detached"), + _success_on_instance_lifecycle_states("Standby"), + ], + ), + instances_detached=dict( + operation="DescribeAutoScalingInstances", + delay=5, + maxAttempts=120, + acceptors=[ + _fail_on_instance_lifecycle_states("Terminating"), + _fail_on_instance_lifecycle_states("Terminated"), + _fail_on_instance_lifecycle_states("Terminating:Wait"), + _fail_on_instance_lifecycle_states("Terminating:Proceed"), + _success_on_instance_lifecycle_states("Detached"), + _no_instances("success"), + ], + ), + instances_terminated=dict( + operation="DescribeAutoScalingInstances", + delay=5, + maxAttempts=120, + acceptors=[ + _success_on_instance_lifecycle_states("Terminated"), + _no_instances("success"), + ], + ), + ) + + return data + + +waiter_factory = AutoscalingWaiterFactory() diff --git a/plugins/module_utils/autoscaling.py b/plugins/module_utils/autoscaling.py index 95430aa833a..d3a86f526bb 100644 --- a/plugins/module_utils/autoscaling.py +++ b/plugins/module_utils/autoscaling.py @@ -1,29 +1,68 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2024 Ansible Project +# Copyright: Contributors to the Ansible project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from typing import Any -from typing import Dict -from typing import List -from typing import Optional +# It would be nice to be able to use autoscaling.XYZ, but we're bound by Ansible's "empty-init" +# policy: https://docs.ansible.com/ansible-core/devel/dev_guide/testing/sanity/empty-init.html -from .botocore import is_boto3_error_code -from .errors import AWSErrorHandler -from .exceptions import AnsibleAWSError + +from __future__ import annotations + +import typing + +from ._autoscaling import groups as _groups +from ._autoscaling import instances as _instances +from ._autoscaling import transformations as _transformations +from ._autoscaling import waiters as _waiters +from ._autoscaling.common import AnsibleAutoScalingError # pylint: disable=unused-import +from ._autoscaling.common import AutoScalingErrorHandler # pylint: disable=unused-import from .retries import AWSRetry +if typing.TYPE_CHECKING: + from typing import Any + from typing import Dict + from typing import List + from typing import Optional + + from .retries import RetryingBotoClientWrapper + from .transformation import AnsibleAWSResourceList + from .transformation import BotoResourceList + + +def get_autoscaling_groups( + client: RetryingBotoClientWrapper, group_names: Optional[List[str]] = None +) -> AnsibleAWSResourceList: + groups = _groups.describe_auto_scaling_groups(client, group_names) + return _transformations.normalize_autoscaling_groups(groups) + + +def _get_autoscaling_instances( + client: RetryingBotoClientWrapper, instance_ids: Optional[List[str]] = None, group_name: Optional[str] = None +) -> BotoResourceList: + if group_name: + try: + groups = _groups.describe_auto_scaling_groups(client, [group_name]) + return groups[0]["Instances"] + except (KeyError, IndexError): + return None + return _instances.describe_auto_scaling_instances(client, instance_ids) + + +def get_autoscaling_instances( + client: RetryingBotoClientWrapper, instance_ids: Optional[List[str]] = None, group_name: Optional[str] = None +) -> AnsibleAWSResourceList: + instances = _get_autoscaling_instances(client, instance_ids=instance_ids, group_name=group_name) + return _transformations.normalize_autoscaling_instances(instances, group_name=group_name) -class AnsibleAutoScalingError(AnsibleAWSError): - pass +def get_autoscaling_waiter(client: RetryingBotoClientWrapper, waiter_name: str) -> Any: + return _waiters.waiter_factory.get_waiter(client, waiter_name) -class AutoScalingErrorHandler(AWSErrorHandler): - _CUSTOM_EXCEPTION = AnsibleAutoScalingError - @classmethod - def _is_missing(cls): - return is_boto3_error_code("") +# ==================================== +# TODO Move these about and refactor +# ==================================== @AutoScalingErrorHandler.list_error_handler("describe InstanceRefreshes", {}) diff --git a/plugins/modules/autoscaling_instance.py b/plugins/modules/autoscaling_instance.py new file mode 100644 index 00000000000..02c4fb37644 --- /dev/null +++ b/plugins/modules/autoscaling_instance.py @@ -0,0 +1,638 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import annotations + +DOCUMENTATION = r""" +--- +module: autoscaling_instance +version_added: 8.4.0 +short_description: manage instances associated with AWS AutoScaling Groups (ASGs) +description: + - Manage instances associated with AWS AutoScaling Groups (ASGs). +author: + - "Mark Chappell (@tremble)" +options: + group_name: + description: + - Name of the AutoScaling Group to manage. + type: str + required: True + state: + description: + - The expected state of the instances. + - V(present) - The instance(s) should be attached to the AutoScaling Group and in service. + - V(attached) - The instance(s) should be attached to the AutoScaling Group. + Instances in Standby will remain in standby. + - V(standby) - The instance(s) should be placed into standby. + Instances must already be part of the AutoScaling Group. + - V(detached) - The instance(s) will be detached from the AutoScaling Group. + - V(terminated) - The instance(s) will be terminated. + - B(Note:) When adding instances to an AutoScaling Group or returning instances to service + from standby, the desired capacity is B(always) incremented. If the total number of + instances would exceed the maximum size of the group then the operation will fail. + choices: ['present', 'attached', 'terminate', 'detached', 'standby'] + default: present + type: str + instance_ids: + description: + - The IDs of the EC2 instances. + - Required if O(state) is one of V(standby), V(detached), V(terminated). + type: list + elements: str + purge_instances: + description: + - Ignored unless O(state=present) or O(state=attached). + - If O(purge_instances=True), any instances not in O(instance_ids) will be scheduled for B(termination). + - B(Note:) Instances will be scheduled for termination B(after) any new instances are added to + the AutoScaling Group and, if O(wait=True) and they will be terminated B(after) the new instances + have reached the expected state. + default: false + type: bool + decrement_desired_capacity: + description: + - When O(decrement_desired_capacity=True), detaching instances, terminating instances, or + placing instances in standby mode will decrement the desired capacity of the AutoScaling Group + default: false + type: bool + health: + description: + - Sets the health of an instance to a specific state. + type: str + choices: ["Healthy", "Unhealthy"] + respect_grace_period: + description: + - Set O(respect_grace_period=False) to ignore the grace period associated with the AutoScaling + group when modifying the O(health). + - Ignored unless O(health) is set. + - AWS defaults to respecting the grace period when modifying the health state of an instance. + type: bool + protection: + description: + - Sets the scale-in protection attribute. + type: bool + wait: + description: + - When O(wait=True) will wait for instances to reach the requested state before returning. + type: bool + default: True + wait_timeout: + description: + - Maximum time to wait for instances to reach the desired state. + type: int + default: 120 +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +""" + +RETURN = r""" +auto_scaling_instances: + description: A description of the EC2 instances attached to an Auto Scaling group. + returned: always + type: list + contains: + availability_zone: + description: The availability zone that the instance is in. + returned: always + type: str + sample: "us-east-1a" + health_status: + description: The last reported health status of the instance. + returned: always + type: str + sample: "Healthy" + instance_id: + description: The ID of the instance. + returned: always + type: str + sample: "i-123456789abcdef01" + instance_type: + description: The instance type of the instance. + returned: always + type: str + sample: "t3.micro" + launch_configuration_name: + description: The name of the launch configuration used when launching the instance. + returned: When the instance was launched using an Auto Scaling launch configuration. + type: str + sample: "ansible-test-49630214-mchappel-thinkpadt14gen3-asg-instance-1" + launch_template: + description: A description of the launch template used when launching the instance. + returned: When the instance was launched using an Auto Scaling launch template. + type: dict + contains: + launch_template_id: + description: The ID of the launch template used when launching the instance. + returned: always + type: str + sample: "12345678-abcd-ef12-2345-6789abcdef01" + launch_template_name: + description: The name of the launch template used when launching the instance. + returned: always + type: str + sample: "example-launch-configuration" + version: + description: The version of the launch template used when launching the instance. + returned: always + type: str + sample: "$Default" + lifecycle_state: + description: The lifecycle state of the instance. + returned: always + type: str + sample: "InService" + protected_from_scale_in: + description: Whether the instance is protected from termination when the Auto Scaling group is scaled in. + returned: always + type: bool + sample: false +""" + +import typing +from copy import deepcopy + +from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import AnsibleAutoScalingError +from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import AutoScalingErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import get_autoscaling_instances +from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import get_autoscaling_waiter +from ansible_collections.amazon.aws.plugins.module_utils.errors import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.waiter import custom_waiter_config + +if typing.TYPE_CHECKING: + from typing import Any + from typing import Dict + from typing import List + from typing import Set + from typing import Tuple + + from ansible_collections.amazon.aws.plugins.module_utils.retries import RetryingBotoClientWrapper + from ansible_collections.amazon.aws.plugins.module_utils.transformations import AnsibleAWSResourceList + + +def _instance_ids_in_states(instances: List, states: List[str]) -> Set[str]: + states = [s.lower() for s in states] + return {i.get("instance_id") for i in instances if i.get("lifecycle_state", "").lower() in states} + + +@AutoScalingErrorHandler.common_error_handler("detach auto scaling instances from group") +@AWSRetry.jittered_backoff() +def _detach_instances( + client: RetryingBotoClientWrapper, instance_ids: Set[str], group_name: str, decrement_capacity: bool +): + return client.detach_instances( + InstanceIds=list(instance_ids), + AutoScalingGroupName=group_name, + ShouldDecrementDesiredCapacity=decrement_capacity, + ) + + +@AutoScalingErrorHandler.common_error_handler("attach auto scaling instances to group") +@AWSRetry.jittered_backoff() +def _attach_instances(client: RetryingBotoClientWrapper, instance_ids: Set[str], group_name: str): + return client.attach_instances( + InstanceIds=list(instance_ids), + AutoScalingGroupName=group_name, + ) + + +@AutoScalingErrorHandler.common_error_handler("terminate auto scaling instances") +@AWSRetry.jittered_backoff() +def _terminate_instances( + client: RetryingBotoClientWrapper, instance_ids: Set[str], group_name: str, decrement_capacity: bool +): + return client.terminate_instance_in_auto_scaling_group( + InstanceIds=list(instance_ids), + AutoScalingGroupName=group_name, + ShouldDecrementDesiredCapacity=decrement_capacity, + ) + + +@AutoScalingErrorHandler.common_error_handler("place auto scaling instances into standby") +@AWSRetry.jittered_backoff() +def _enter_standby( + client: RetryingBotoClientWrapper, instance_ids: Set[str], group_name: str, decrement_capacity: bool +): + return client.enter_standby( + InstanceIds=list(instance_ids), + AutoScalingGroupName=group_name, + ShouldDecrementDesiredCapacity=decrement_capacity, + ) + + +@AutoScalingErrorHandler.common_error_handler("return auto scaling instances to group from standby") +@AWSRetry.jittered_backoff() +def _leave_standby(client: RetryingBotoClientWrapper, instance_ids: Set[str], group_name: str): + return client.exit_standby( + InstanceIds=list(instance_ids), + AutoScalingGroupName=group_name, + ) + + +def _token_instance(instance_id, group_name): + # Returns the minimum information we need for a new instance when adding a new node in check mode + return dict( + instance_id=instance_id, + auto_scaling_group_name=group_name, + health_status="Healthy", + ) + + +def wait_instance_state( + client: RetryingBotoClientWrapper, + state: str, + check_mode: bool, + group_name: str, + instance_ids: Set[str], + wait: bool, + wait_timeout: int, +) -> None: + if not wait: + return + if check_mode: + return + if not instance_ids: + return + + waiter_map = { + "Standby": "instances_in_standby", + "Terminated": "instances_terminated", + "Detached": "instances_detached", + "InService": "instances_in_service", + } + + waiter_config = custom_waiter_config(timeout=wait_timeout, default_pause=10) + + waiter = get_autoscaling_waiter(client, waiter_map[state]) + AutoScalingErrorHandler.common_error_handler(f"wait for instances to reach {state}")(waiter.wait)( + InstanceIds=list(instance_ids), + WaiterConfig=waiter_config, + ) + + return + + +def ensure_instance_absent( + client: RetryingBotoClientWrapper, + check_mode: bool, + instances_start: AnsibleAWSResourceList, + group_name: str, + instance_ids: List[str], + decrement_desired_capacity: bool, + wait: bool, + wait_timeout: int, +) -> Tuple[bool, AnsibleAWSResourceList]: + instance_ids = set(instance_ids) + + # We don't need to change these instances, we may need to wait for them + detached_ids = _instance_ids_in_states(instances_start, ["Detached", "Detaching"]) & instance_ids + # On the basis of "be conservative in what you do, be liberal in what you accept from others" + # We'll treat instances that someone else has terminated, as "detached" from the ASG, since + # they won't be attached to the ASG. + terminating_ids = ( + _instance_ids_in_states( + instances_start, ["Terminating", "Terminating:Wait", "Terminating:Proceed", "Terminated"] + ) + & instance_ids + ) + + # We need to wait for these instances to enter "InService" before we can do anything with them + pending_ids = ( + _instance_ids_in_states(instances_start, ["Pending", "Pending:Proceed", "Pending:Wait", "EnteringStandby"]) + & instance_ids + ) + # These instances are ready to detach + ready_ids = _instance_ids_in_states(instances_start, ["InService", "Standby"]) & instance_ids + + # We don't normally return so we wait for Detaching instances if wait=True + changed_ids = pending_ids | ready_ids + changed = bool(changed_ids) + if check_mode: + instances_changed = deepcopy(instances_start) + for instance in instances_changed: + if instance.get("instance_id") in changed_ids: + instance["lifecycle_state"] = "Detached" + return changed, instances_changed + + if pending_ids: + # We have to wait for instances to transition to InService + wait_instance_state(client, "InService", check_mode, group_name, pending_ids, True, wait_timeout) + + if changed: + _detach_instances(client, changed_ids, group_name, decrement_desired_capacity) + + if terminating_ids: + wait_instance_state(client, "Terminated", check_mode, group_name, terminating_ids, True, wait_timeout) + + detaching_ids = changed_ids + if detaching_ids: + wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, True, wait_timeout) + + instances_complete = get_autoscaling_instances(client, group_name=group_name) + return changed, instances_complete + + +def ensure_instance_present( + client: RetryingBotoClientWrapper, + check_mode: bool, + instances_start: AnsibleAWSResourceList, + group_name: str, + instance_ids: List[str], + decrement_desired_capacity: bool, + purge: bool, + wait: bool, + wait_timeout: int, +) -> Tuple[bool, AnsibleAWSResourceList]: + instance_ids = set(instance_ids) + all_ids = {i.get("instance_id") for i in instances_start} + + # We just need to wait for these + pending_ids = ( + _instance_ids_in_states(instances_start, ["Pending", "Pending:Proceed", "Pending:Wait"]) & instance_ids + ) + # We need to wait for these before we can attach/re-activate them + detaching_ids = _instance_ids_in_states(instances_start, ["Detaching"]) & instance_ids + entering_ids = _instance_ids_in_states(instances_start, ["EnteringStandby"]) & instance_ids + # These instances need to be brought out of standby + standby_ids = _instance_ids_in_states(instances_start, ["Standby"]) & instance_ids + # These instances need to be attached + missing_ids = instance_ids - all_ids + missing_ids |= _instance_ids_in_states(instances_start, ["Detached"]) & instance_ids + + # Ids that need to be removed + purge_ids = (all_ids - instance_ids) if purge else set() + + changed_ids = (detaching_ids | entering_ids | standby_ids | missing_ids) & instance_ids + changed = bool(changed_ids | purge_ids) + if check_mode: + instances_changed = deepcopy(instances_start) + if missing_ids: + for instance in list(missing_ids): + instances_changed.append(_token_instance(instance, group_name)) + instances_changed = sorted(instances_changed, key=lambda d: d.get("instance_id", None)) + for instance in instances_changed: + if instance.get("instance_id") in purge_ids: + instance["lifecycle_state"] = "Terminated" + if instance.get("instance_id") in changed_ids: + instance["lifecycle_state"] = "InService" + return changed, instances_changed + + if not (changed or pending_ids or purge_ids): + return False, instances_start + + if detaching_ids: + # We have to wait for instances to transition to Detached before we can re-attach them + wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, True, wait_timeout) + if bool(detaching_ids | missing_ids): + _attach_instances(client, detaching_ids | missing_ids, group_name) + + if entering_ids: + # We have to wait for instances to transition to Standby before we can tell them to leave standby + wait_instance_state(client, "Standby", check_mode, group_name, detaching_ids, True, wait_timeout) + if bool(entering_ids | standby_ids): + _leave_standby(client, entering_ids | standby_ids, group_name) + + # This includes potentially waiting for instances which were Pending when we started + wait_instance_state(client, "InService", check_mode, group_name, instance_ids, True, wait_timeout) + + # While, in theory, we could make the ordering of Add/Remove configurable, the logic becomes + # difficult to test. As such we're going to hard code the order of operations. + # Add/Wait/Terminate is the order least likely to result in 0 available + # instances, so we do any termination after ensuring instances are InService. + if purge_ids: + _terminate_instances(client, purge_ids, group_name, decrement_desired_capacity) + wait_instance_state(client, "Terminated", check_mode, group_name, detaching_ids, True, wait_timeout) + + instances_complete = get_autoscaling_instances(client, group_name=group_name) + return changed, instances_complete + + +def ensure_instance_standby( + client: RetryingBotoClientWrapper, + check_mode: bool, + instances_start: AnsibleAWSResourceList, + group_name: str, + instance_ids: List[str], + decrement_desired_capacity: bool, + wait: bool, + wait_timeout: int, +) -> Tuple[bool, AnsibleAWSResourceList]: + instance_ids = set(instance_ids) + + # We need to wait for these instances to enter "InService" before we can do anything with them + pending_ids = ( + _instance_ids_in_states(instances_start, ["Pending", "Pending:Proceed", "Pending:Wait"]) & instance_ids + ) + # These instances are ready to move to Standby + ready_ids = _instance_ids_in_states(instances_start, ["InService"]) & instance_ids + # These instances are moving into Standby + entering_ids = _instance_ids_in_states(instances_start, ["EnteringStandby"]) & instance_ids + + changed_ids = pending_ids | ready_ids + changed = bool(changed_ids) + if check_mode: + instances_changed = deepcopy(instances_start) + for instance in instances_changed: + if instance.get("instance_id") in changed_ids: + instance["lifecycle_state"] = "Standby" + return changed, instances_changed + + if not (changed or entering_ids): + return False, instances_start + + if pending_ids: + # We have to wait for instances to transition to InService + wait_instance_state(client, "InService", check_mode, group_name, pending_ids, True, wait_timeout) + if changed: + _enter_standby(client, changed_ids, group_name, decrement_desired_capacity) + + # This includes potentially waiting for instances which were "Entering" Standby when we started + wait_instance_state(client, "Standby", check_mode, group_name, instance_ids, True, wait_timeout) + + instances_complete = get_autoscaling_instances(client, group_name=group_name) + return changed, instances_complete + + +def ensure_instance_pool( + client: RetryingBotoClientWrapper, + check_mode: bool, + instances_start: AnsibleAWSResourceList, + group_name: str, + state: str, + instance_ids: List[str], + purge_instances: bool, + decrement_desired_capacity: bool, + respect_grace_period: bool, + wait: bool, + wait_timeout: int, +) -> Tuple[bool, AnsibleAWSResourceList]: + if state == "standby": + return ensure_instance_standby( + client, + check_mode, + instances_start, + group_name, + instance_ids or [], + decrement_desired_capacity, + wait, + wait_timeout, + ) + if state == "detached": + return ensure_instance_absent( + client, + check_mode, + instances_start, + group_name, + instance_ids or [], + decrement_desired_capacity, + wait, + wait_timeout, + ) + + # Not valid for standby/terminated/detached + if instance_ids is None: + instance_ids = [i.get("instance_id") for i in instances_start] + + if state == "present": + return ensure_instance_present( + client, + check_mode, + instances_start, + group_name, + instance_ids, + decrement_desired_capacity, + purge_instances, + wait, + wait_timeout, + ) + + return False, instances_start + + +def _validate_standby_conditions(params: Dict[str, Any], instances: AnsibleAWSResourceList) -> None: + instance_ids = set(params.get("instance_ids")) + all_ids = {i.get("instance_id") for i in instances} + + missing_ids = instance_ids - all_ids + if missing_ids: + raise AnsibleAutoScalingError( + message=f"Unable to place instance(s) ({missing_ids}) into Standby - instances not attached to AutoScaling Group ({params['group_name']})", + ) + + # We don't need to change these instances, we may need to wait for them + standby_ids = _instance_ids_in_states(instances, ["Standby", "EnteringStandby"]) + # We need to wait for these instances to enter "InService" before we can do anything with them + pending_ids = _instance_ids_in_states(instances, ["Pending", "Pending:Proceed", "Pending:Wait"]) + # These instances are ready to move to Standby + ready_ids = _instance_ids_in_states(instances, ["InService"]) + + bad_ids = all_ids - standby_ids - pending_ids - ready_ids + if bad_ids: + raise AnsibleAutoScalingError( + message=f"Unable to place instance(s) ({bad_ids}) into Standby - instances not in a state that can transition to Standby or InService", + ) + + if pending_ids and not params.get("wait"): + raise AnsibleAutoScalingError( + message=f"Unable to plance instances ({pending_ids}) into Standby - currently in a pending state and wait is dsabled", + ) + + return + + +def validate_params(params: Dict[str, Any], instances: AnsibleAWSResourceList) -> None: + if params["state"] in ["absent", "terminate"] and params["health"] is not None: + raise AnsibleAutoScalingError(message=f"Unable to set instance health when state is {params['state']}") + + if params["state"] == "standby": + _validate_standby_conditions(params, instances) + + return + + +def do(module): + client = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff()) + + instances_start = get_autoscaling_instances(client, group_name=module.params["group_name"]) + validate_params(module.params, instances_start) + + changed, instances = ensure_instance_pool( + client, + check_mode=module.check_mode, + instances_start=deepcopy(instances_start), + group_name=module.params["group_name"], + state=module.params["state"], + instance_ids=module.params["instance_ids"], + purge_instances=module.params["purge_instances"], + decrement_desired_capacity=module.params["decrement_desired_capacity"], + respect_grace_period=module.params["respect_grace_period"], + wait=module.params["wait"], + wait_timeout=module.params["wait_timeout"], + ) + + # changed, instances = ensure_instance_health( + # client, + # check_mode=module.check_mode, + # instances_start=deepcopy(instances), + # group_name=module.params["group_name"], + # instance_ids=module.params["instance_ids"], + # decrement_desired_capacity=module.params["decrement_desired_capacity"], + # respect_grace_period=module.params["respect_grace_period"], + # wait=module.params["wait"], + # wait_timeout=module.params["wait_timeout"], + # ) + + result = {"changed": changed, "auto_scaling_instances": instances} + + if module._diff: # pylint: disable=protected-access + result["diff"] = dict( + before=dict(auto_scaling_instances=instances_start), + after=dict(auto_scaling_instances=instances), + ) + + module.exit_json(**result) + + +def main(): + argument_spec = dict( + group_name=dict(type="str", required=True), + state=dict(choices=["present", "attached", "terminated", "detached", "standby"], default="present", type="str"), + instance_ids=dict(type="list", elements="str"), + purge_instances=dict(default=False, type="bool"), + decrement_desired_capacity=dict(default=False, type="bool"), + health=dict(type="str", choices=["Healthy", "Unhealthy"]), + respect_grace_period=dict(type="bool"), + protection=dict(type="bool"), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=120), + ) + + required_if = [ + ["state", "terminated", ["instance_ids"]], + ["state", "detached", ["instance_ids"]], + ["state", "standby", ["instance_ids"]], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + ) + + try: + do(module) + except AnsibleAWSError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/autoscaling_instance_info.py b/plugins/modules/autoscaling_instance_info.py new file mode 100644 index 00000000000..ef15e91fa02 --- /dev/null +++ b/plugins/modules/autoscaling_instance_info.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import annotations + +DOCUMENTATION = r""" +--- +module: autoscaling_instance_info +version_added: 8.3.0 +short_description: describe instances associated with AWS AutoScaling Groups (ASGs) +description: + - Describe instances associated with AWS AutoScaling Groups (ASGs). +author: + - "Mark Chappell (@tremble)" +options: + group_name: + description: + - Name of the AutoScaling Group to manage. + - O(group_name) and O(instance_ids) are mutually exclusive. + type: str + instance_ids: + description: + - The IDs of the EC2 instances. + - O(group_name) and O(instance_ids) are mutually exclusive. + type: list + elements: str +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Describe all instances in a region + amazon.aws.autoscaling_instance_info: + register: instances + +- name: Describe a specific instance + amazon.aws.autoscaling_instance_info: + instance_ids: + - "i-123456789abcdef01" + register: instances + +- name: Describe the instances attached to a specific Auto Scaling Group + amazon.aws.autoscaling_instance_info: + group_name: example-asg + register: instances +""" + +RETURN = r""" +auto_scaling_instances: + description: A description of the EC2 instances attached to an Auto Scaling group. + returned: always + type: list + contains: + availability_zone: + description: The availability zone that the instance is in. + returned: always + type: str + sample: "us-east-1a" + health_status: + description: The last reported health status of the instance. + returned: always + type: str + sample: "Healthy" + instance_id: + description: The ID of the instance. + returned: always + type: str + sample: "i-123456789abcdef01" + instance_type: + description: The instance type of the instance. + returned: always + type: str + sample: "t3.micro" + launch_configuration_name: + description: The name of the launch configuration used when launching the instance. + returned: When the instance was launched using an Auto Scaling launch configuration. + type: str + sample: "ansible-test-49630214-mchappel-thinkpadt14gen3-asg-instance-1" + launch_template: + description: A description of the launch template used when launching the instance. + returned: When the instance was launched using an Auto Scaling launch template. + type: dict + contains: + launch_template_id: + description: The ID of the launch template used when launching the instance. + returned: always + type: str + sample: "12345678-abcd-ef12-2345-6789abcdef01" + launch_template_name: + description: The name of the launch template used when launching the instance. + returned: always + type: str + sample: "example-launch-configuration" + version: + description: The version of the launch template used when launching the instance. + returned: always + type: str + sample: "$Default" + lifecycle_state: + description: The lifecycle state of the instance. + returned: always + type: str + sample: "InService" + protected_from_scale_in: + description: Whether the instance is protected from termination when the Auto Scaling group is scaled in. + returned: always + type: bool + sample: false +""" + +from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import get_autoscaling_instances +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def main(): + argument_spec = dict( + group_name=dict(type="str"), + instance_ids=dict(type="list", elements="str"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[["instance_ids", "group_name"]], + ) + + client = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff()) + + instances = get_autoscaling_instances( + client, + instance_ids=module.params["instance_ids"], + group_name=module.params["group_name"], + ) + + module.exit_json(changed=False, auto_scaling_instances=instances) + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/autoscaling_instance/aliases b/tests/integration/targets/autoscaling_instance/aliases new file mode 100644 index 00000000000..cc06f7fdbfb --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/aliases @@ -0,0 +1,4 @@ +time=30m +cloud/aws + +autoscaling_instance_info diff --git a/tests/integration/targets/autoscaling_instance/defaults/main.yml b/tests/integration/targets/autoscaling_instance/defaults/main.yml new file mode 100644 index 00000000000..a35f284be06 --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/defaults/main.yml @@ -0,0 +1,11 @@ +--- +ec2_asg_setup_run_once: true +default_resource_name: "{{ resource_prefix }}-asg-instance" +default_tiny_name: "{{ tiny_prefix }}-asg-i" + +vpc_seed: "{{ default_resource_name }}" +vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.0.0/16 +subnet_a_az: "{{ ec2_availability_zone_names[0] }}" +subnet_a_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.32.0/24 +subnet_b_az: "{{ ec2_availability_zone_names[1] }}" +subnet_b_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.33.0/24 diff --git a/tests/integration/targets/autoscaling_instance/meta/main.yml b/tests/integration/targets/autoscaling_instance/meta/main.yml new file mode 100644 index 00000000000..33bfa8e0612 --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - setup_ec2_facts + - setup_ec2_vpc diff --git a/tests/integration/targets/autoscaling_instance/tasks/env_cleanup.yml b/tests/integration/targets/autoscaling_instance/tasks/env_cleanup.yml new file mode 100644 index 00000000000..963e505a69a --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tasks/env_cleanup.yml @@ -0,0 +1,57 @@ +--- +- name: kill asg + amazon.aws.autoscaling_group: + name: "{{ default_resource_name }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 + +# If we failed while an instance was detached we need to manually clean up +- name: Trigger termination of all instances + amazon.aws.ec2_instance: + state: absent + instance_ids: "{{ all_instances }}" + wait: True + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 + +- name: remove target group + community.aws.elb_target_group: + name: "{{ item }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 + loop: + - "{{ default_tiny_name }}-1" + - "{{ default_tiny_name }}-2" + +- name: remove launch templates + community.aws.ec2_launch_template: + name: "{{ item }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 + loop: + - "{{ default_resource_name }}-1" + - "{{ default_resource_name }}-2" + +- name: delete launch template + community.aws.ec2_launch_template: + name: "{{ default_resource_name }}" + state: absent + register: del_lt + retries: 10 + until: del_lt is not failed + ignore_errors: true + +- ansible.builtin.include_role: + name: setup_ec2_vpc + tasks_from: cleanup.yml diff --git a/tests/integration/targets/autoscaling_instance/tasks/env_setup.yml b/tests/integration/targets/autoscaling_instance/tasks/env_setup.yml new file mode 100644 index 00000000000..bfc77868a39 --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tasks/env_setup.yml @@ -0,0 +1,94 @@ +--- +# Set up the testing dependencies: VPC, subnet, security group, and two launch configurations +- name: Create VPC for use in testing + amazon.aws.ec2_vpc_net: + name: "{{ default_resource_name }}" + cidr_block: "{{ vpc_cidr }}" + tenancy: default + register: testing_vpc + +- ansible.builtin.set_fact: + vpc_id: "{{ testing_vpc.vpc.id }}" + +- name: Create internet gateway for use in testing + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ vpc_id }}" + state: present + tags: + Name: "{{ default_resource_name }}" + register: igw + +- name: Create subnet for use in testing + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: "{{ vpc_id }}" + cidr: "{{ subnet_a_cidr }}" + az: "{{ subnet_a_az }}" + tags: + Name: "{{ default_resource_name }}" + + register: testing_subnet +- name: create routing rules + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc_id }}" + tags: + Name: "{{ default_resource_name }}" + routes: + - dest: "0.0.0.0/0" + gateway_id: "{{ igw.gateway_id }}" + subnets: + - "{{ testing_subnet.subnet.id }}" + +- name: create a security group with the vpc created in the ec2_setup + amazon.aws.ec2_security_group: + name: "{{ default_resource_name }}" + description: a security group for ansible tests + vpc_id: "{{ vpc_id }}" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: "0.0.0.0/0" + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: "0.0.0.0/0" + register: sg + +- name: ensure launch templates exist + community.aws.ec2_launch_template: + name: "{{ item }}" + network_interfaces: + - device_index: 0 + # XXX confusing - ec2_instance (incorrectly) calls this assign_public_ip + associate_public_ip_address: true + delete_on_termination: true + # XXX confsuing - at the top level this would be security_groups, here it's groups + groups: "{{ sg.group_id }}" + image_id: "{{ ec2_ami_id }}" + user_data: "{{ lookup('ansible.builtin.file', 'user_data') | ansible.builtin.b64encode }}" + instance_type: t3.micro + loop: + - "{{ default_resource_name }}-1" + - "{{ default_resource_name }}-2" + +- name: create asg and wait for instances to be deemed healthy (no ELB) + amazon.aws.autoscaling_group: + name: "{{ default_resource_name }}" + launch_template: + launch_template_name: "{{ default_resource_name }}-1" + desired_capacity: 2 + min_size: 0 + max_size: 4 + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + state: present + wait_for_instances: true + register: create_asg +- ansible.builtin.assert: + that: + - create_asg.viable_instances == 2 + - create_asg.instances | length == 2 + +- ansible.builtin.set_fact: + initial_instances: "{{ create_asg.instances }}" + all_instances: "{{ create_asg.instances }}" diff --git a/tests/integration/targets/autoscaling_instance/tasks/main.yml b/tests/integration/targets/autoscaling_instance/tasks/main.yml new file mode 100644 index 00000000000..fbd45db5eda --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tasks/main.yml @@ -0,0 +1,19 @@ +--- +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/ec2_asg/tasks/ + +- name: Wrap up all tests and setup AWS credentials + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - community.aws + block: + - ansible.builtin.include_tasks: env_setup.yml + - ansible.builtin.include_tasks: tests.yml + always: + - ansible.builtin.include_tasks: env_cleanup.yml diff --git a/tests/integration/targets/autoscaling_instance/tasks/tests.yml b/tests/integration/targets/autoscaling_instance/tasks/tests.yml new file mode 100644 index 00000000000..a7742b40c6c --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tasks/tests.yml @@ -0,0 +1,3 @@ +--- +- ansible.builtin.include_tasks: describe.yml +- ansible.builtin.include_tasks: attach_detach.yml diff --git a/tests/integration/targets/autoscaling_instance/tmp/inventory b/tests/integration/targets/autoscaling_instance/tmp/inventory new file mode 100644 index 00000000000..edc19ef5f3c --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tmp/inventory @@ -0,0 +1,8 @@ +[tests] +create_update_delete +tag_operations +instance_detach + +[all:vars] +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/tests/integration/targets/autoscaling_instance/tmp/main.yml b/tests/integration/targets/autoscaling_instance/tmp/main.yml new file mode 100644 index 00000000000..709499c4470 --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tmp/main.yml @@ -0,0 +1,34 @@ +--- +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/ec2_asg/tasks/ +# Prepare the VPC and figure out which AMI to use +- hosts: all + gather_facts: false + tasks: + - module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - ansible.builtin.include_role: + name: setup_ec2_facts + - ansible.builtin.include_role: + name: ec2_asg + tasks_from: env_setup.yml + rescue: + - ansible.builtin.include_role: + name: ec2_asg + tasks_from: env_cleanup.yml + run_once: true + - ansible.builtin.fail: + msg: Environment preparation failed + run_once: true +- hosts: all + gather_facts: false + strategy: free + serial: 6 + roles: + - ec2_asg diff --git a/tests/integration/targets/autoscaling_instance/tmp/runme.sh b/tests/integration/targets/autoscaling_instance/tmp/runme.sh new file mode 100755 index 00000000000..aa324772bbe --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tmp/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/ + + +set -eux + +export ANSIBLE_ROLES_PATH=../ + +ansible-playbook main.yml -i inventory "$@" diff --git a/tests/integration/targets/setup_ec2_vpc/tasks/cleanup.yml b/tests/integration/targets/setup_ec2_vpc/tasks/cleanup.yml index 4efd66d30b5..32a6259a1ba 100644 --- a/tests/integration/targets/setup_ec2_vpc/tasks/cleanup.yml +++ b/tests/integration/targets/setup_ec2_vpc/tasks/cleanup.yml @@ -1,6 +1,6 @@ --- # ============================================================ -- name: Run all tests +- name: Cleanup after all tests module_defaults: group/aws: access_key: "{{ aws_access_key }}" @@ -88,7 +88,7 @@ loop: "{{ remaining_subnets.subnets }}" until: subnets_removed is not failed when: - - item.name != 'default' + - (item.name | default("")) != 'default' ignore_errors: true retries: 10 @@ -106,7 +106,7 @@ # ============================================================ - - name: (VPC Cleanup) Delete remaining route tables + - name: (VPC Cleanup) Delete route tables (excluding main table) amazon.aws.ec2_vpc_route_table: state: absent vpc_id: "{{ vpc_id }}" @@ -114,7 +114,11 @@ lookup: id register: rtbs_removed loop: "{{ remaining_rtbs.route_tables }}" + when: + - True not in main_associations ignore_errors: true + vars: + main_associations: "{{ item.associations | default([]) | map(attribute='main') | list}}" # ============================================================ @@ -126,3 +130,15 @@ until: vpc_removed is not failed ignore_errors: true retries: 10 + + # ============================================================ + + - name: (VPC Cleanup) (retry) Delete remaining route tables (including main table) + amazon.aws.ec2_vpc_route_table: + state: absent + vpc_id: "{{ vpc_id }}" + route_table_id: "{{ item.id }}" + lookup: id + register: rtbs_removed + loop: "{{ remaining_rtbs.route_tables }}" + ignore_errors: true diff --git a/tests/unit/module_utils/autoscaling/test_autoscaling_error_handler.py b/tests/unit/module_utils/autoscaling/test_autoscaling_error_handler.py new file mode 100644 index 00000000000..fc4424e6bd9 --- /dev/null +++ b/tests/unit/module_utils/autoscaling/test_autoscaling_error_handler.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest + +try: + import botocore +except ImportError: + pass + +from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import AnsibleAutoScalingError +from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import AutoScalingErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_iam_error_handler.py requires the python modules 'boto3' and 'botocore'") + + +class TestAutoScalingDeletionHandler: + def test_no_failures(self): + self.counter = 0 + + @AutoScalingErrorHandler.deletion_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_client_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @AutoScalingErrorHandler.deletion_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleAutoScalingError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) + + def test_ignore_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "NoSuchEntity"}} + + @AutoScalingErrorHandler.deletion_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "I couldn't find it") + + ret_val = raise_client_error() + assert self.counter == 1 + assert ret_val is False + + +class TestIamListHandler: + def test_no_failures(self): + self.counter = 0 + + @AutoScalingErrorHandler.list_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_client_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @AutoScalingErrorHandler.list_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleAutoScalingError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) + + def test_list_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "NoSuchEntity"}} + + @AutoScalingErrorHandler.list_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "I couldn't find it") + + ret_val = raise_client_error() + assert self.counter == 1 + assert ret_val is None + + +class TestIamCommonHandler: + def test_no_failures(self): + self.counter = 0 + + @AutoScalingErrorHandler.common_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_client_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @AutoScalingErrorHandler.common_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleAutoScalingError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) diff --git a/tests/unit/module_utils/autoscaling/test_autoscaling_resource_transforms.py b/tests/unit/module_utils/autoscaling/test_autoscaling_resource_transforms.py new file mode 100644 index 00000000000..a8f0511cb01 --- /dev/null +++ b/tests/unit/module_utils/autoscaling/test_autoscaling_resource_transforms.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible_collections.amazon.aws.plugins.module_utils._autoscaling.transformations import ( + normalize_autoscaling_instances, +) + +# The various normalize_ functions are based upon ..transformation.boto3_resource_to_ansible_dict +# As such these tests will be relatively light touch. + + +class TestAutoScalingResourceToAnsibleDict: + def setup_method(self): + pass + + def test_normalize_autoscaling_instances(self): + INPUT = [ + { + "AvailabilityZone": "us-east-1a", + "HealthStatus": "UNHEALTHY", + "InstanceId": "i-123456789abcdef12", + "InstanceType": "t3.small", + "LaunchConfigurationName": "ansible-test-lc-2", + "LifecycleState": "Standby", + "ProtectedFromScaleIn": True, + }, + { + "AutoScalingGroupName": "ansible-test-asg", + "AvailabilityZone": "us-east-1a", + "HealthStatus": "Healthy", + "InstanceId": "i-0123456789abcdef0", + "InstanceType": "t3.micro", + "LaunchConfigurationName": "ansible-test-lc", + "LifecycleState": "InService", + "ProtectedFromScaleIn": False, + }, + ] + OUTPUT = [ + { + "auto_scaling_group_name": "ansible-test-asg", + "availability_zone": "us-east-1a", + "health_status": "HEALTHY", + "instance_id": "i-0123456789abcdef0", + "instance_type": "t3.micro", + "launch_configuration_name": "ansible-test-lc", + "lifecycle_state": "InService", + "protected_from_scale_in": False, + }, + { + "availability_zone": "us-east-1a", + "health_status": "UNHEALTHY", + "instance_id": "i-123456789abcdef12", + "instance_type": "t3.small", + "launch_configuration_name": "ansible-test-lc-2", + "lifecycle_state": "Standby", + "protected_from_scale_in": True, + }, + ] + + assert OUTPUT == normalize_autoscaling_instances(INPUT) + + def test_normalize_autoscaling_instances_with_group(self): + INPUT = [ + { + "AvailabilityZone": "us-east-1a", + "HealthStatus": "Unhealthy", + "InstanceId": "i-123456789abcdef12", + "InstanceType": "t3.small", + "LaunchConfigurationName": "ansible-test-lc-2", + "LifecycleState": "Standby", + "ProtectedFromScaleIn": True, + }, + { + "AutoScalingGroupName": "ansible-test-asg", + "AvailabilityZone": "us-east-1a", + "HealthStatus": "HEALTHY", + "InstanceId": "i-0123456789abcdef0", + "InstanceType": "t3.micro", + "LaunchConfigurationName": "ansible-test-lc", + "LifecycleState": "InService", + "ProtectedFromScaleIn": False, + }, + ] + OUTPUT = [ + { + "auto_scaling_group_name": "ansible-test-asg", + "availability_zone": "us-east-1a", + "health_status": "HEALTHY", + "instance_id": "i-0123456789abcdef0", + "instance_type": "t3.micro", + "launch_configuration_name": "ansible-test-lc", + "lifecycle_state": "InService", + "protected_from_scale_in": False, + }, + { + "auto_scaling_group_name": "ansible-test-asg-2", + "availability_zone": "us-east-1a", + "health_status": "UNHEALTHY", + "instance_id": "i-123456789abcdef12", + "instance_type": "t3.small", + "launch_configuration_name": "ansible-test-lc-2", + "lifecycle_state": "Standby", + "protected_from_scale_in": True, + }, + ] + + assert OUTPUT == normalize_autoscaling_instances(INPUT, "ansible-test-asg-2") diff --git a/tox.ini b/tox.ini index 75e7bef8b9f..16529573efd 100644 --- a/tox.ini +++ b/tox.ini @@ -106,6 +106,12 @@ commands = isort --check-only --diff {toxinidir}/plugins {toxinidir}/tests flake8 {posargs} {toxinidir}/plugins {toxinidir}/tests +[testenv:ansible-sanity] +deps = + git+https://github.com/ansible/ansible.git@milestone +commands = + ansible-test sanity + [flake8] # E123, E125 skipped as they are invalid PEP-8. show-source = True From 81a6dbbd2db7866a05307157f746b502eb0106ed Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 4 Oct 2024 22:04:36 +0200 Subject: [PATCH 2/8] next --- plugins/modules/autoscaling_instance.py | 305 +++++++++++++----- .../autoscaling_instance/files/user_data | 7 + .../tasks/attach_detach.yml | 298 +++++++++++++++++ .../autoscaling_instance/tasks/describe.yml | 134 ++++++++ 4 files changed, 671 insertions(+), 73 deletions(-) create mode 100644 tests/integration/targets/autoscaling_instance/files/user_data create mode 100644 tests/integration/targets/autoscaling_instance/tasks/attach_detach.yml create mode 100644 tests/integration/targets/autoscaling_instance/tasks/describe.yml diff --git a/plugins/modules/autoscaling_instance.py b/plugins/modules/autoscaling_instance.py index 02c4fb37644..b11a00e10a0 100644 --- a/plugins/modules/autoscaling_instance.py +++ b/plugins/modules/autoscaling_instance.py @@ -179,6 +179,18 @@ from ansible_collections.amazon.aws.plugins.module_utils.transformations import AnsibleAWSResourceList +STATE_MAP = { + "terminating": ["Terminating", "Terminating:Wait", "Terminating:Proceed"], + "terminating+": ["Terminating", "Terminating:Wait", "Terminating:Proceed", "Terminated"], + "detaching": ["Detaching"], + "detaching+": ["Detaching", "Detached"], + "pending": ["Pending", "Pending:Proceed", "Pending:Wait"], + "entering": ["EnteringStandby"], + "entering+": ["EnteringStandby", "Standby"], + "stable": ["InService", "Standby"], +} + + def _instance_ids_in_states(instances: List, states: List[str]) -> Set[str]: states = [s.lower() for s in states] return {i.get("instance_id") for i in instances if i.get("lifecycle_state", "").lower() in states} @@ -281,6 +293,66 @@ def wait_instance_state( return +def _inject_instances(instances, group_name, missing_ids): + if not missing_ids: + return instances + for instance in list(missing_ids): + instances.append(_token_instance(instance, group_name)) + instances = sorted(instances, key=lambda d: d.get("instance_id", None)) + return instances + + +def _changed_instances(instances, group_name, final_state, changed_ids): + for instance in instances: + if instance.get("instance_id") in changed_ids: + instance["lifecycle_state"] = final_state + return instances + + +def ensure_instance_terminated( + client: RetryingBotoClientWrapper, + check_mode: bool, + instances_start: AnsibleAWSResourceList, + group_name: str, + instance_ids: List[str], + decrement_desired_capacity: bool, + wait: bool, + wait_timeout: int, +) -> Tuple[bool, AnsibleAWSResourceList]: + instance_ids = set(instance_ids) + + # We don't need to change these instances, we may need to wait for them + terminating_ids = _instance_ids_in_states(instances_start, STATE_MAP["terminating+"]) & instance_ids + + # We'll need to wait for the in-progress changes to complete + detaching_ids = _instance_ids_in_states(instances_start, STATE_MAP["detaching"]) & instance_ids + pending_ids = _instance_ids_in_states(instances_start, STATE_MAP["pending"]) & instance_ids + entering_ids = _instance_ids_in_states(instances_start, STATE_MAP["entering"]) & instance_ids + # These instances are ready to terminate + ready_ids = _instance_ids_in_states(instances_start, STATE_MAP["stable"]) & instance_ids + + if check_mode: + change_ids = detaching_ids | pending_ids | entering_ids | ready_ids | terminating_ids + instances_changed = _changed_instances(deepcopy(instances_start), group_name, "Terminated", change_ids) + return bool(change_ids - terminating_ids), instances_changed + + # We have to wait for instances to transition to their stable states + if entering_ids: + wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, True, wait_timeout) + if pending_ids: + wait_instance_state(client, "InService", check_mode, group_name, pending_ids, True, wait_timeout) + ready_ids |= entering_ids | pending_ids + + if ready_ids: + _terminate_instances(client, ready_ids, group_name, decrement_desired_capacity) + + terminating_ids |= ready_ids + wait_instance_state(client, "Terminated", check_mode, group_name, terminating_ids, True, wait_timeout) + + instances_complete = get_autoscaling_instances(client, group_name=group_name) + return bool(ready_ids), instances_complete + + def ensure_instance_absent( client: RetryingBotoClientWrapper, check_mode: bool, @@ -294,51 +366,96 @@ def ensure_instance_absent( instance_ids = set(instance_ids) # We don't need to change these instances, we may need to wait for them - detached_ids = _instance_ids_in_states(instances_start, ["Detached", "Detaching"]) & instance_ids + detaching_ids = _instance_ids_in_states(instances_start, STATE_MAP["detaching+"]) & instance_ids # On the basis of "be conservative in what you do, be liberal in what you accept from others" # We'll treat instances that someone else has terminated, as "detached" from the ASG, since # they won't be attached to the ASG. - terminating_ids = ( - _instance_ids_in_states( - instances_start, ["Terminating", "Terminating:Wait", "Terminating:Proceed", "Terminated"] - ) - & instance_ids - ) + terminating_ids = _instance_ids_in_states(instances_start, STATE_MAP["terminating+"]) & instance_ids - # We need to wait for these instances to enter "InService" before we can do anything with them - pending_ids = ( - _instance_ids_in_states(instances_start, ["Pending", "Pending:Proceed", "Pending:Wait", "EnteringStandby"]) - & instance_ids - ) + # We'll need to wait for the in-progress changes to complete + pending_ids = _instance_ids_in_states(instances_start, STATE_MAP["pending"]) & instance_ids + entering_ids = _instance_ids_in_states(instances_start, STATE_MAP["entering"]) & instance_ids # These instances are ready to detach - ready_ids = _instance_ids_in_states(instances_start, ["InService", "Standby"]) & instance_ids + ready_ids = _instance_ids_in_states(instances_start, STATE_MAP["stable"]) & instance_ids - # We don't normally return so we wait for Detaching instances if wait=True - changed_ids = pending_ids | ready_ids - changed = bool(changed_ids) if check_mode: - instances_changed = deepcopy(instances_start) - for instance in instances_changed: - if instance.get("instance_id") in changed_ids: - instance["lifecycle_state"] = "Detached" - return changed, instances_changed + change_ids = pending_ids | entering_ids | ready_ids | detaching_ids + instances_changed = _changed_instances(deepcopy(instances_start), group_name, "Detached", change_ids) + return bool(change_ids - detaching_ids), instances_changed + # We have to wait for instances to transition to their stable state + if entering_ids: + wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, True, wait_timeout) if pending_ids: - # We have to wait for instances to transition to InService wait_instance_state(client, "InService", check_mode, group_name, pending_ids, True, wait_timeout) - if changed: - _detach_instances(client, changed_ids, group_name, decrement_desired_capacity) + ready_ids |= entering_ids | pending_ids + if ready_ids: + _detach_instances(client, ready_ids, group_name, decrement_desired_capacity) + detaching_ids |= ready_ids if terminating_ids: wait_instance_state(client, "Terminated", check_mode, group_name, terminating_ids, True, wait_timeout) + if detaching_ids: + wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, True, wait_timeout) + + instances_complete = get_autoscaling_instances(client, group_name=group_name) + return bool(ready_ids), instances_complete + + +def ensure_instance_attached( + client: RetryingBotoClientWrapper, + check_mode: bool, + instances_start: AnsibleAWSResourceList, + group_name: str, + instance_ids: List[str], + decrement_desired_capacity: bool, + purge: bool, + wait: bool, + wait_timeout: int, +) -> Tuple[bool, AnsibleAWSResourceList]: + instance_ids = set(instance_ids) + all_ids = {i.get("instance_id") for i in instances_start} + + # These instances need to be attached + missing_ids = instance_ids - all_ids + missing_ids |= _instance_ids_in_states(instances_start, ["Detached"]) & instance_ids + detaching_ids = _instance_ids_in_states(instances_start, ["Detaching"]) & instance_ids + pending_ids = _instance_ids_in_states(instances_start, STATE_MAP["pending"]) & instance_ids + terminating_ids = _instance_ids_in_states(instances_start, STATE_MAP["terminating+"]) & instance_ids + # Ids that need to be removed + purge_ids = (all_ids - instance_ids) if purge else set() + + if check_mode: + instances_changed = _inject_instances(deepcopy(instances_start), group_name, missing_ids) + missing_ids |= detaching_ids + instances_changed = _changed_instances(instances_changed, group_name, "InService", missing_ids) + instances_changed = _changed_instances(instances_changed, group_name, "Terminated", purge_ids) + return bool(missing_ids | purge_ids), instances_changed - detaching_ids = changed_ids if detaching_ids: + # We have to wait for instances to transition to Detached before we can re-attach them wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, True, wait_timeout) + missing_ids |= detaching_ids + + if missing_ids: + _attach_instances(client, missing_ids, group_name) + pending_ids |= missing_ids + + # This includes potentially waiting for instances which were Pending when we started + wait_instance_state(client, "InService", check_mode, group_name, pending_ids, True, wait_timeout) + + # While, in theory, we could make the ordering of Add/Remove configurable, the logic becomes + # difficult to test. As such we're going to hard code the order of operations. + # Add/Wait/Terminate is the order least likely to result in 0 available + # instances, so we do any termination after ensuring instances are InService. + if purge_ids: + _terminate_instances(client, purge_ids, group_name, decrement_desired_capacity) + terminating_ids |= purge_ids + wait_instance_state(client, "Terminated", check_mode, group_name, terminating_ids, True, wait_timeout) instances_complete = get_autoscaling_instances(client, group_name=group_name) - return changed, instances_complete + return bool(purge_ids | missing_ids), instances_complete def ensure_instance_present( @@ -356,50 +473,42 @@ def ensure_instance_present( all_ids = {i.get("instance_id") for i in instances_start} # We just need to wait for these - pending_ids = ( - _instance_ids_in_states(instances_start, ["Pending", "Pending:Proceed", "Pending:Wait"]) & instance_ids - ) + pending_ids = _instance_ids_in_states(instances_start, STATE_MAP["pending"]) & instance_ids # We need to wait for these before we can attach/re-activate them - detaching_ids = _instance_ids_in_states(instances_start, ["Detaching"]) & instance_ids + detaching_ids = _instance_ids_in_states(instances_start, STATE_MAP["detaching+"]) & instance_ids entering_ids = _instance_ids_in_states(instances_start, ["EnteringStandby"]) & instance_ids # These instances need to be brought out of standby standby_ids = _instance_ids_in_states(instances_start, ["Standby"]) & instance_ids # These instances need to be attached missing_ids = instance_ids - all_ids - missing_ids |= _instance_ids_in_states(instances_start, ["Detached"]) & instance_ids # Ids that need to be removed purge_ids = (all_ids - instance_ids) if purge else set() - changed_ids = (detaching_ids | entering_ids | standby_ids | missing_ids) & instance_ids - changed = bool(changed_ids | purge_ids) if check_mode: - instances_changed = deepcopy(instances_start) - if missing_ids: - for instance in list(missing_ids): - instances_changed.append(_token_instance(instance, group_name)) - instances_changed = sorted(instances_changed, key=lambda d: d.get("instance_id", None)) - for instance in instances_changed: - if instance.get("instance_id") in purge_ids: - instance["lifecycle_state"] = "Terminated" - if instance.get("instance_id") in changed_ids: - instance["lifecycle_state"] = "InService" - return changed, instances_changed - - if not (changed or pending_ids or purge_ids): - return False, instances_start + change_ids = detaching_ids | entering_ids | standby_ids | missing_ids + instances_changed = _inject_instances(deepcopy(instances_start), group_name, missing_ids) + instances_changed = _changed_instances(instances_changed, group_name, "InService", change_ids | pending_ids) + instances_changed = _changed_instances(instances_changed, group_name, "Terminated", purge_ids) + return bool(change_ids | purge_ids), instances_changed if detaching_ids: # We have to wait for instances to transition to Detached before we can re-attach them wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, True, wait_timeout) - if bool(detaching_ids | missing_ids): - _attach_instances(client, detaching_ids | missing_ids, group_name) + missing_ids |= detaching_ids - purge_ids + # They've left the ASG of their own accord, we'll leave them be... + purge_ids = purge_ids - detaching_ids + + if missing_ids: + _attach_instances(client, missing_ids, group_name) if entering_ids: # We have to wait for instances to transition to Standby before we can tell them to leave standby - wait_instance_state(client, "Standby", check_mode, group_name, detaching_ids, True, wait_timeout) - if bool(entering_ids | standby_ids): - _leave_standby(client, entering_ids | standby_ids, group_name) + wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, True, wait_timeout) + standby_ids |= entering_ids - purge_ids + + if standby_ids: + _leave_standby(client, standby_ids, group_name) # This includes potentially waiting for instances which were Pending when we started wait_instance_state(client, "InService", check_mode, group_name, instance_ids, True, wait_timeout) @@ -413,7 +522,7 @@ def ensure_instance_present( wait_instance_state(client, "Terminated", check_mode, group_name, detaching_ids, True, wait_timeout) instances_complete = get_autoscaling_instances(client, group_name=group_name) - return changed, instances_complete + return bool(purge_ids | standby_ids | missing_ids), instances_complete def ensure_instance_standby( @@ -429,37 +538,31 @@ def ensure_instance_standby( instance_ids = set(instance_ids) # We need to wait for these instances to enter "InService" before we can do anything with them - pending_ids = ( - _instance_ids_in_states(instances_start, ["Pending", "Pending:Proceed", "Pending:Wait"]) & instance_ids - ) + pending_ids = _instance_ids_in_states(instances_start, STATE_MAP["pending"]) & instance_ids # These instances are ready to move to Standby ready_ids = _instance_ids_in_states(instances_start, ["InService"]) & instance_ids # These instances are moving into Standby entering_ids = _instance_ids_in_states(instances_start, ["EnteringStandby"]) & instance_ids - changed_ids = pending_ids | ready_ids - changed = bool(changed_ids) if check_mode: - instances_changed = deepcopy(instances_start) - for instance in instances_changed: - if instance.get("instance_id") in changed_ids: - instance["lifecycle_state"] = "Standby" - return changed, instances_changed - - if not (changed or entering_ids): - return False, instances_start + change_ids = pending_ids | ready_ids + instances_changed = _changed_instances(deepcopy(instances_start), group_name, "Standby", change_ids) + return bool(ready_ids), instances_changed if pending_ids: # We have to wait for instances to transition to InService wait_instance_state(client, "InService", check_mode, group_name, pending_ids, True, wait_timeout) - if changed: - _enter_standby(client, changed_ids, group_name, decrement_desired_capacity) + ready_ids |= pending_ids + + if ready_ids: + _enter_standby(client, ready_ids, group_name, decrement_desired_capacity) + entering_ids |= ready_ids # This includes potentially waiting for instances which were "Entering" Standby when we started - wait_instance_state(client, "Standby", check_mode, group_name, instance_ids, True, wait_timeout) + wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, True, wait_timeout) instances_complete = get_autoscaling_instances(client, group_name=group_name) - return changed, instances_complete + return bool(ready_ids), instances_complete def ensure_instance_pool( @@ -502,6 +605,18 @@ def ensure_instance_pool( if instance_ids is None: instance_ids = [i.get("instance_id") for i in instances_start] + if state == "attached": + return ensure_instance_attached( + client, + check_mode, + instances_start, + group_name, + instance_ids, + decrement_desired_capacity, + purge_instances, + wait, + wait_timeout, + ) if state == "present": return ensure_instance_present( client, @@ -543,18 +658,62 @@ def _validate_standby_conditions(params: Dict[str, Any], instances: AnsibleAWSRe if pending_ids and not params.get("wait"): raise AnsibleAutoScalingError( - message=f"Unable to plance instances ({pending_ids}) into Standby - currently in a pending state and wait is dsabled", + message=f"Unable to plance instances ({pending_ids}) into Standby - currently in a pending state and wait is disabled", ) return -def validate_params(params: Dict[str, Any], instances: AnsibleAWSResourceList) -> None: - if params["state"] in ["absent", "terminate"] and params["health"] is not None: +def _validate_remove_conditions(params: Dict[str, Any], instances: AnsibleAWSResourceList) -> None: + target_verb = {"detached": "detach", "terminated": "terminate"}[params["state"]] + + if params["health"] is not None: raise AnsibleAutoScalingError(message=f"Unable to set instance health when state is {params['state']}") + instance_ids = set(params.get("instance_ids")) + pending_ids = _instance_ids_in_states(instances, STATE_MAP["pending"] + STATE_MAP["entering"]) + + if (pending_ids & instance_ids) and not params.get("wait"): + raise AnsibleAutoScalingError( + message=f"Unable to {target_verb} instances ({pending_ids & instance_ids}) currently in a pending state and wait is disabled", + ) + + return + + +def _validate_attach_conditions(params: Dict[str, Any], instances: AnsibleAWSResourceList) -> None: + instance_ids = set(params.get("instance_ids")) + all_ids = {i.get("instance_id") for i in instances} + + # These instances are terminating, we can't do anything with them. + terminating_ids = _instance_ids_in_states(instances, ["Terminated", "Terminating"]) & instance_ids + # We need to wait for these instances to enter "InService" or "Standby" before we can do anything with them + pending_ids = _instance_ids_in_states(instances, ["EnteringStandby"]) & instance_ids + detaching_ids = _instance_ids_in_states(instances, ["Detaching"]) & instance_ids + + if terminating_ids: + raise AnsibleAutoScalingError( + message=f"Unable to attach instances ({terminating_ids}) to AutoScaling group - instances not in a state that can transition to InService", + ) + + if not params.get("wait"): + if pending_ids and params.get("state") == "present": + raise AnsibleAutoScalingError( + message=f"Unable to plance instances ({pending_ids}) into Service - currently entering standby and wait is disabled", + ) + if detaching_ids: + raise AnsibleAutoScalingError( + message=f"Unable to attach instances ({pending_ids}) to AutoScaling group - currently detaching and wait is disabled", + ) + + +def validate_params(params: Dict[str, Any], instances: AnsibleAWSResourceList) -> None: + if params["state"] in ["terminated", "detached"]: + _validate_remove_conditions(params, instances) if params["state"] == "standby": _validate_standby_conditions(params, instances) + if params["state"] in ["attached", "present"]: + _validate_attach_conditions(params, instances) return diff --git a/tests/integration/targets/autoscaling_instance/files/user_data b/tests/integration/targets/autoscaling_instance/files/user_data new file mode 100644 index 00000000000..f59d1769618 --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/files/user_data @@ -0,0 +1,7 @@ +#cloud-config +package_upgrade: true +package_update: true +packages: +- httpd +runcmd: +- "service httpd start" diff --git a/tests/integration/targets/autoscaling_instance/tasks/attach_detach.yml b/tests/integration/targets/autoscaling_instance/tasks/attach_detach.yml new file mode 100644 index 00000000000..74de7b9cc94 --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tasks/attach_detach.yml @@ -0,0 +1,298 @@ +--- +### Simple _info tests +### instance_ids - idempotency + +# All current instances passed, no purge requested +# - no change should happen +- name: instance_ids - idempotency/all - no purge - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + purge_instances: False + diff: True + register: present_no_change + check_mode: True + +- ansible.builtin.assert: + that: + - present_no_change is not changed + - "'auto_scaling_instances' in present_no_change" + - present_no_change.auto_scaling_instances | length == 2 + - initial_instances[0] in listed_instance_ids + - "'auto_scaling_group_name' in specific_instance_info" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - "'availability_zone' in specific_instance_info" + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'instance_id' in specific_instance_info" + - specific_instance_info.instance_id == initial_instances[0] + - "'instance_type' in specific_instance_info" + - specific_instance_info.instance_type == "t3.micro" + - "'launch_template' in specific_instance_info" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - "'lifecycle_state' in specific_instance_info" + - specific_instance_info.lifecycle_state == "InService" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ present_no_change.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ present_no_change.auto_scaling_instances[0] }}" + +- name: instance_ids - idempotency/all - no purge + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + purge_instances: False + diff: True + register: present_no_change + +- ansible.builtin.assert: + that: + - present_no_change is not changed + - "'auto_scaling_instances' in present_no_change" + - present_no_change.auto_scaling_instances | length == 2 + - initial_instances[0] in listed_instance_ids + - "'auto_scaling_group_name' in specific_instance_info" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - "'availability_zone' in specific_instance_info" + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'instance_id' in specific_instance_info" + - specific_instance_info.instance_id == initial_instances[0] + - "'instance_type' in specific_instance_info" + - specific_instance_info.instance_type == "t3.micro" + - "'launch_template' in specific_instance_info" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - "'lifecycle_state' in specific_instance_info" + - specific_instance_info.lifecycle_state == "InService" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ present_no_change.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ present_no_change.auto_scaling_instances[0] }}" + +# One of the current instances passed, no purge requested +# - no change should happen +- name: instance_ids - idempotency/partial - no purge - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[0] }}" + group_name: "{{ default_resource_name }}" + state: present + purge_instances: False + diff: True + register: present_no_change + check_mode: True + +- ansible.builtin.assert: + that: + - present_no_change is not changed + - "'auto_scaling_instances' in present_no_change" + - present_no_change.auto_scaling_instances | length == 2 + - initial_instances[0] in listed_instance_ids + - "'auto_scaling_group_name' in specific_instance_info" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - "'availability_zone' in specific_instance_info" + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'instance_id' in specific_instance_info" + - specific_instance_info.instance_id == initial_instances[0] + - "'instance_type' in specific_instance_info" + - specific_instance_info.instance_type == "t3.micro" + - "'launch_template' in specific_instance_info" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - "'lifecycle_state' in specific_instance_info" + - specific_instance_info.lifecycle_state == "InService" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ present_no_change.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ present_no_change.auto_scaling_instances[0] }}" + +- name: instance_ids - idempotency/partial - no purge + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[0] }}" + group_name: "{{ default_resource_name }}" + state: present + purge_instances: False + diff: True + register: present_no_change + +- ansible.builtin.assert: + that: + - present_no_change is not changed + - "'auto_scaling_instances' in present_no_change" + - present_no_change.auto_scaling_instances | length == 2 + - initial_instances[0] in listed_instance_ids + - "'auto_scaling_group_name' in specific_instance_info" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - "'availability_zone' in specific_instance_info" + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'instance_id' in specific_instance_info" + - specific_instance_info.instance_id == initial_instances[0] + - "'instance_type' in specific_instance_info" + - specific_instance_info.instance_type == "t3.micro" + - "'launch_template' in specific_instance_info" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - "'lifecycle_state' in specific_instance_info" + - specific_instance_info.lifecycle_state == "InService" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ present_no_change.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ present_no_change.auto_scaling_instances[0] }}" + +# All current instances passed, purge requested +# - no change should happen as there are no instances that are attached but not requested +- name: instance_ids - idempotency/all - purge - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + purge_instances: True + state: present + diff: True + register: present_no_change + check_mode: True + +- ansible.builtin.assert: + that: + - present_no_change is not changed + - "'auto_scaling_instances' in present_no_change" + - present_no_change.auto_scaling_instances | length == 2 + - initial_instances[0] in listed_instance_ids + - "'auto_scaling_group_name' in specific_instance_info" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - "'availability_zone' in specific_instance_info" + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'instance_id' in specific_instance_info" + - specific_instance_info.instance_id == initial_instances[0] + - "'instance_type' in specific_instance_info" + - specific_instance_info.instance_type == "t3.micro" + - "'launch_template' in specific_instance_info" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - "'lifecycle_state' in specific_instance_info" + - specific_instance_info.lifecycle_state == "InService" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ present_no_change.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ present_no_change.auto_scaling_instances[0] }}" + +### instance_ids - attach/detach +# Detach a specific instance +- name: instance_ids - single instance - detach - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[0] }}" + group_name: "{{ default_resource_name }}" + state: detached + decrement_desired_capacity: True + diff: True + register: absent_one + check_mode: True + +- ansible.builtin.assert: + that: + - absent_one is changed + - "'autoscaling:DetachInstances' not in absent_one.resource_actions" # CHECK_MODE + - "'auto_scaling_instances' in absent_one" + - initial_instances[1] in listed_instance_ids + - "'auto_scaling_group_name' in specific_instance_info" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - "'availability_zone' in specific_instance_info" + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'instance_id' in specific_instance_info" + - specific_instance_info.instance_id == initial_instances[1] + - "'instance_type' in specific_instance_info" + - specific_instance_info.instance_type == "t3.micro" + - "'launch_template' in specific_instance_info" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - "'lifecycle_state' in specific_instance_info" + - specific_instance_info.lifecycle_state == "InService" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ absent_one.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ absent_one.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + +- name: instance_ids - single instance - detach + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[0] }}" + group_name: "{{ default_resource_name }}" + state: detached + decrement_desired_capacity: True + diff: True + register: absent_one + +- ansible.builtin.assert: + that: + - absent_one is changed + - "'auto_scaling_instances' in absent_one" + - initial_instances[1] in listed_instance_ids + - "'auto_scaling_group_name' in specific_instance_info" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - "'availability_zone' in specific_instance_info" + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'instance_id' in specific_instance_info" + - specific_instance_info.instance_id == initial_instances[1] + - "'instance_type' in specific_instance_info" + - specific_instance_info.instance_type == "t3.micro" + - "'launch_template' in specific_instance_info" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - "'lifecycle_state' in specific_instance_info" + - specific_instance_info.lifecycle_state == "InService" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ absent_one.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ absent_one.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + +# Ensure present state (not using standby - should be the same as attached) +- name: instance_ids - single instance - attach/present - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[0] }}" + group_name: "{{ default_resource_name }}" + state: present + diff: True + register: present_one + check_mode: True + +- name: instance_ids - single instance - attach/present + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[0] }}" + group_name: "{{ default_resource_name }}" + state: present + diff: True + register: present_one + +# Detach it again so we can reattach +- name: instance_ids - single instance - detach (again - prepare to attach) + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[0] }}" + group_name: "{{ default_resource_name }}" + state: detached + decrement_desired_capacity: True + diff: True + register: detach_one + +# Ensure attached state (not using standby - should be the same as attached) +- name: instance_ids - single instance - attach/attach - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[0] }}" + group_name: "{{ default_resource_name }}" + state: attached + diff: True + register: attached_one + check_mode: True + +- name: instance_ids - single instance - attach/attach + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[0] }}" + group_name: "{{ default_resource_name }}" + state: attached + diff: True + register: attached_one diff --git a/tests/integration/targets/autoscaling_instance/tasks/describe.yml b/tests/integration/targets/autoscaling_instance/tasks/describe.yml new file mode 100644 index 00000000000..0415290b4f0 --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tasks/describe.yml @@ -0,0 +1,134 @@ +--- +### Simple _info tests + +- name: List all instances + amazon.aws.autoscaling_instance_info: + register: instance_info + +- ansible.builtin.assert: + that: + - "'auto_scaling_instances' in instance_info" + - instance_info.auto_scaling_instances | length >= 2 + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'auto_scaling_group_name' in specific_instance_info" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - "'availability_zone' in specific_instance_info" + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'instance_id' in specific_instance_info" + - specific_instance_info.instance_id == initial_instances[0] + - "'instance_type' in specific_instance_info" + - specific_instance_info.instance_type == "t3.micro" + - "'launch_template' in specific_instance_info" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - "'lifecycle_state' in specific_instance_info" + - specific_instance_info.lifecycle_state == "InService" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ instance_info.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ instance_info.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: List all instances attached to a specific ASG + amazon.aws.autoscaling_instance_info: + group_name: "{{ default_resource_name }}" + register: instance_info + +- ansible.builtin.assert: + that: + - "'auto_scaling_instances' in instance_info" + - instance_info.auto_scaling_instances | length == 2 + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'auto_scaling_group_name' in instance_info.auto_scaling_instances[0]" + - "'availability_zone' in instance_info.auto_scaling_instances[0]" + - "'health_status' in instance_info.auto_scaling_instances[0]" + - "'instance_id' in instance_info.auto_scaling_instances[0]" + - "'instance_type' in instance_info.auto_scaling_instances[0]" + - "'launch_template' in instance_info.auto_scaling_instances[0]" + - "'lifecycle_state' in instance_info.auto_scaling_instances[0]" + - "'protected_from_scale_in' in instance_info.auto_scaling_instances[0]" + - "'auto_scaling_group_name' in instance_info.auto_scaling_instances[1]" + - "'availability_zone' in instance_info.auto_scaling_instances[1]" + - "'health_status' in instance_info.auto_scaling_instances[1]" + - "'instance_id' in instance_info.auto_scaling_instances[1]" + - "'instance_type' in instance_info.auto_scaling_instances[1]" + - "'launch_template' in instance_info.auto_scaling_instances[1]" + - "'lifecycle_state' in instance_info.auto_scaling_instances[1]" + - "'protected_from_scale_in' in instance_info.auto_scaling_instances[1]" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - specific_instance_info.health_status == "HEALTHY" + - specific_instance_info.instance_id == initial_instances[0] + - specific_instance_info.instance_type == "t3.micro" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - specific_instance_info.lifecycle_state == "InService" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ instance_info.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ instance_info.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- amazon.aws.autoscaling_instance_info: + instance_ids: "{{ instance_info.auto_scaling_instances | map(attribute='instance_id') | list }}" + register: instance_info + +- ansible.builtin.assert: + that: + - "'auto_scaling_instances' in instance_info" + - instance_info.auto_scaling_instances | length == 2 + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'auto_scaling_group_name' in instance_info.auto_scaling_instances[0]" + - "'availability_zone' in instance_info.auto_scaling_instances[0]" + - "'health_status' in instance_info.auto_scaling_instances[0]" + - "'instance_id' in instance_info.auto_scaling_instances[0]" + - "'instance_type' in instance_info.auto_scaling_instances[0]" + - "'launch_template' in instance_info.auto_scaling_instances[0]" + - "'lifecycle_state' in instance_info.auto_scaling_instances[0]" + - "'protected_from_scale_in' in instance_info.auto_scaling_instances[0]" + - "'auto_scaling_group_name' in instance_info.auto_scaling_instances[1]" + - "'availability_zone' in instance_info.auto_scaling_instances[1]" + - "'health_status' in instance_info.auto_scaling_instances[1]" + - "'instance_id' in instance_info.auto_scaling_instances[1]" + - "'instance_type' in instance_info.auto_scaling_instances[1]" + - "'launch_template' in instance_info.auto_scaling_instances[1]" + - "'lifecycle_state' in instance_info.auto_scaling_instances[1]" + - "'protected_from_scale_in' in instance_info.auto_scaling_instances[1]" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - specific_instance_info.health_status == "HEALTHY" + - specific_instance_info.instance_id == initial_instances[0] + - specific_instance_info.instance_type == "t3.micro" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - specific_instance_info.lifecycle_state == "InService" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ instance_info.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ instance_info.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- amazon.aws.autoscaling_instance_info: + instance_ids: "{{ initial_instances[0] }}" + register: instance_info + +- ansible.builtin.assert: + that: + - "'auto_scaling_instances' in instance_info" + - instance_info.auto_scaling_instances | length == 1 + - initial_instances[0] in listed_instance_ids + - "'auto_scaling_group_name' in specific_instance_info" + - specific_instance_info.auto_scaling_group_name == default_resource_name + - "'availability_zone' in specific_instance_info" + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'instance_id' in specific_instance_info" + - specific_instance_info.instance_id == initial_instances[0] + - "'instance_type' in specific_instance_info" + - specific_instance_info.instance_type == "t3.micro" + - "'launch_template' in specific_instance_info" + - specific_instance_info.launch_template.launch_template_name.startswith(default_resource_name) + - "'lifecycle_state' in specific_instance_info" + - specific_instance_info.lifecycle_state == "InService" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + vars: + listed_instance_ids: "{{ instance_info.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ instance_info.auto_scaling_instances[0] }}" From c3e793bb6d143455d7ad7031830eaa665aa631f7 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 6 Oct 2024 11:37:58 +0200 Subject: [PATCH 3/8] refactor --- plugins/modules/autoscaling_instance.py | 204 +++++++++++++++--------- 1 file changed, 129 insertions(+), 75 deletions(-) diff --git a/plugins/modules/autoscaling_instance.py b/plugins/modules/autoscaling_instance.py index b11a00e10a0..6ddfcfe0e23 100644 --- a/plugins/modules/autoscaling_instance.py +++ b/plugins/modules/autoscaling_instance.py @@ -70,6 +70,7 @@ - Ignored unless O(health) is set. - AWS defaults to respecting the grace period when modifying the health state of an instance. type: bool + default: True protection: description: - Sets the scale-in protection attribute. @@ -172,6 +173,7 @@ from typing import Any from typing import Dict from typing import List + from typing import Optional from typing import Set from typing import Tuple @@ -179,23 +181,38 @@ from ansible_collections.amazon.aws.plugins.module_utils.transformations import AnsibleAWSResourceList +# There's also a number of "Warmed" states that we could support with relatively minimal effort, but +# we can't test them STATE_MAP = { - "terminating": ["Terminating", "Terminating:Wait", "Terminating:Proceed"], - "terminating+": ["Terminating", "Terminating:Wait", "Terminating:Proceed", "Terminated"], - "detaching": ["Detaching"], - "detaching+": ["Detaching", "Detached"], "pending": ["Pending", "Pending:Proceed", "Pending:Wait"], + "stable": ["InService", "Standby"], "entering": ["EnteringStandby"], "entering+": ["EnteringStandby", "Standby"], - "stable": ["InService", "Standby"], + "detaching": ["Detaching"], + "detaching+": ["Detaching", "Detached"], + "terminating": ["Terminating", "Terminating:Wait", "Terminating:Proceed"], + "terminating+": ["Terminating", "Terminating:Wait", "Terminating:Proceed", "Terminated"], } +def _all_instance_ids(instances: List) -> Set[str]: + return {i.get("instance_id") for i in instances} + + def _instance_ids_in_states(instances: List, states: List[str]) -> Set[str]: states = [s.lower() for s in states] return {i.get("instance_id") for i in instances if i.get("lifecycle_state", "").lower() in states} +def _token_instance(instance_id, group_name): + # Returns the minimum information we need for a new instance when adding a new node in check mode + return dict( + instance_id=instance_id, + auto_scaling_group_name=group_name, + health_status="Healthy", + ) + + @AutoScalingErrorHandler.common_error_handler("detach auto scaling instances from group") @AWSRetry.jittered_backoff() def _detach_instances( @@ -250,15 +267,6 @@ def _leave_standby(client: RetryingBotoClientWrapper, instance_ids: Set[str], gr ) -def _token_instance(instance_id, group_name): - # Returns the minimum information we need for a new instance when adding a new node in check mode - return dict( - instance_id=instance_id, - auto_scaling_group_name=group_name, - health_status="Healthy", - ) - - def wait_instance_state( client: RetryingBotoClientWrapper, state: str, @@ -302,10 +310,11 @@ def _inject_instances(instances, group_name, missing_ids): return instances -def _changed_instances(instances, group_name, final_state, changed_ids): +def _change_instances(instances, group_name, change_ids, state=None, health=None, protection=None): for instance in instances: - if instance.get("instance_id") in changed_ids: - instance["lifecycle_state"] = final_state + if instance.get("instance_id") in change_ids: + if state: + instance["lifecycle_state"] = state return instances @@ -333,21 +342,21 @@ def ensure_instance_terminated( if check_mode: change_ids = detaching_ids | pending_ids | entering_ids | ready_ids | terminating_ids - instances_changed = _changed_instances(deepcopy(instances_start), group_name, "Terminated", change_ids) + instances_changed = _change_instances(deepcopy(instances_start), group_name, change_ids, state="Terminated") return bool(change_ids - terminating_ids), instances_changed - # We have to wait for instances to transition to their stable states + # We have to wait for instances to transition to their stable states before changing them if entering_ids: - wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, True, wait_timeout) + wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, wait, wait_timeout) if pending_ids: - wait_instance_state(client, "InService", check_mode, group_name, pending_ids, True, wait_timeout) + wait_instance_state(client, "InService", check_mode, group_name, pending_ids, wait, wait_timeout) ready_ids |= entering_ids | pending_ids if ready_ids: _terminate_instances(client, ready_ids, group_name, decrement_desired_capacity) terminating_ids |= ready_ids - wait_instance_state(client, "Terminated", check_mode, group_name, terminating_ids, True, wait_timeout) + wait_instance_state(client, "Terminated", check_mode, group_name, terminating_ids, wait, wait_timeout) instances_complete = get_autoscaling_instances(client, group_name=group_name) return bool(ready_ids), instances_complete @@ -380,14 +389,14 @@ def ensure_instance_absent( if check_mode: change_ids = pending_ids | entering_ids | ready_ids | detaching_ids - instances_changed = _changed_instances(deepcopy(instances_start), group_name, "Detached", change_ids) + instances_changed = _change_instances(deepcopy(instances_start), group_name, change_ids, state="Detached") return bool(change_ids - detaching_ids), instances_changed - # We have to wait for instances to transition to their stable state + # We have to wait for instances to transition to their stable state before changing them if entering_ids: - wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, True, wait_timeout) + wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, wait, wait_timeout) if pending_ids: - wait_instance_state(client, "InService", check_mode, group_name, pending_ids, True, wait_timeout) + wait_instance_state(client, "InService", check_mode, group_name, pending_ids, wait, wait_timeout) ready_ids |= entering_ids | pending_ids if ready_ids: @@ -395,9 +404,9 @@ def ensure_instance_absent( detaching_ids |= ready_ids if terminating_ids: - wait_instance_state(client, "Terminated", check_mode, group_name, terminating_ids, True, wait_timeout) + wait_instance_state(client, "Terminated", check_mode, group_name, terminating_ids, wait, wait_timeout) if detaching_ids: - wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, True, wait_timeout) + wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, wait, wait_timeout) instances_complete = get_autoscaling_instances(client, group_name=group_name) return bool(ready_ids), instances_complete @@ -415,12 +424,12 @@ def ensure_instance_attached( wait_timeout: int, ) -> Tuple[bool, AnsibleAWSResourceList]: instance_ids = set(instance_ids) - all_ids = {i.get("instance_id") for i in instances_start} + all_ids = _all_instance_ids(instances_start) # These instances need to be attached missing_ids = instance_ids - all_ids missing_ids |= _instance_ids_in_states(instances_start, ["Detached"]) & instance_ids - detaching_ids = _instance_ids_in_states(instances_start, ["Detaching"]) & instance_ids + detaching_ids = _instance_ids_in_states(instances_start, STATE_MAP["detaching"]) & instance_ids pending_ids = _instance_ids_in_states(instances_start, STATE_MAP["pending"]) & instance_ids terminating_ids = _instance_ids_in_states(instances_start, STATE_MAP["terminating+"]) & instance_ids # Ids that need to be removed @@ -429,13 +438,13 @@ def ensure_instance_attached( if check_mode: instances_changed = _inject_instances(deepcopy(instances_start), group_name, missing_ids) missing_ids |= detaching_ids - instances_changed = _changed_instances(instances_changed, group_name, "InService", missing_ids) - instances_changed = _changed_instances(instances_changed, group_name, "Terminated", purge_ids) + instances_changed = _change_instances(instances_changed, group_name, missing_ids, state="InService") + instances_changed = _change_instances(instances_changed, group_name, purge_ids, state="Terminated") return bool(missing_ids | purge_ids), instances_changed if detaching_ids: # We have to wait for instances to transition to Detached before we can re-attach them - wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, True, wait_timeout) + wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, wait, wait_timeout) missing_ids |= detaching_ids if missing_ids: @@ -443,7 +452,7 @@ def ensure_instance_attached( pending_ids |= missing_ids # This includes potentially waiting for instances which were Pending when we started - wait_instance_state(client, "InService", check_mode, group_name, pending_ids, True, wait_timeout) + wait_instance_state(client, "InService", check_mode, group_name, pending_ids, wait, wait_timeout) # While, in theory, we could make the ordering of Add/Remove configurable, the logic becomes # difficult to test. As such we're going to hard code the order of operations. @@ -452,7 +461,7 @@ def ensure_instance_attached( if purge_ids: _terminate_instances(client, purge_ids, group_name, decrement_desired_capacity) terminating_ids |= purge_ids - wait_instance_state(client, "Terminated", check_mode, group_name, terminating_ids, True, wait_timeout) + wait_instance_state(client, "Terminated", check_mode, group_name, terminating_ids, wait, wait_timeout) instances_complete = get_autoscaling_instances(client, group_name=group_name) return bool(purge_ids | missing_ids), instances_complete @@ -470,13 +479,13 @@ def ensure_instance_present( wait_timeout: int, ) -> Tuple[bool, AnsibleAWSResourceList]: instance_ids = set(instance_ids) - all_ids = {i.get("instance_id") for i in instances_start} + all_ids = _all_instance_ids(instances_start) # We just need to wait for these pending_ids = _instance_ids_in_states(instances_start, STATE_MAP["pending"]) & instance_ids # We need to wait for these before we can attach/re-activate them detaching_ids = _instance_ids_in_states(instances_start, STATE_MAP["detaching+"]) & instance_ids - entering_ids = _instance_ids_in_states(instances_start, ["EnteringStandby"]) & instance_ids + entering_ids = _instance_ids_in_states(instances_start, STATE_MAP["entering"]) & instance_ids # These instances need to be brought out of standby standby_ids = _instance_ids_in_states(instances_start, ["Standby"]) & instance_ids # These instances need to be attached @@ -488,13 +497,14 @@ def ensure_instance_present( if check_mode: change_ids = detaching_ids | entering_ids | standby_ids | missing_ids instances_changed = _inject_instances(deepcopy(instances_start), group_name, missing_ids) - instances_changed = _changed_instances(instances_changed, group_name, "InService", change_ids | pending_ids) - instances_changed = _changed_instances(instances_changed, group_name, "Terminated", purge_ids) + instances_changed = _change_instances( + instances_changed, group_name, change_ids | pending_ids, state="InService" + ) + instances_changed = _change_instances(instances_changed, group_name, purge_ids, state="Terminated") return bool(change_ids | purge_ids), instances_changed if detaching_ids: - # We have to wait for instances to transition to Detached before we can re-attach them - wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, True, wait_timeout) + wait_instance_state(client, "Detached", check_mode, group_name, detaching_ids, wait, wait_timeout) missing_ids |= detaching_ids - purge_ids # They've left the ASG of their own accord, we'll leave them be... purge_ids = purge_ids - detaching_ids @@ -503,15 +513,14 @@ def ensure_instance_present( _attach_instances(client, missing_ids, group_name) if entering_ids: - # We have to wait for instances to transition to Standby before we can tell them to leave standby - wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, True, wait_timeout) + wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, wait, wait_timeout) standby_ids |= entering_ids - purge_ids if standby_ids: _leave_standby(client, standby_ids, group_name) # This includes potentially waiting for instances which were Pending when we started - wait_instance_state(client, "InService", check_mode, group_name, instance_ids, True, wait_timeout) + wait_instance_state(client, "InService", check_mode, group_name, instance_ids, wait, wait_timeout) # While, in theory, we could make the ordering of Add/Remove configurable, the logic becomes # difficult to test. As such we're going to hard code the order of operations. @@ -519,7 +528,7 @@ def ensure_instance_present( # instances, so we do any termination after ensuring instances are InService. if purge_ids: _terminate_instances(client, purge_ids, group_name, decrement_desired_capacity) - wait_instance_state(client, "Terminated", check_mode, group_name, detaching_ids, True, wait_timeout) + wait_instance_state(client, "Terminated", check_mode, group_name, detaching_ids, wait, wait_timeout) instances_complete = get_autoscaling_instances(client, group_name=group_name) return bool(purge_ids | standby_ids | missing_ids), instances_complete @@ -542,16 +551,16 @@ def ensure_instance_standby( # These instances are ready to move to Standby ready_ids = _instance_ids_in_states(instances_start, ["InService"]) & instance_ids # These instances are moving into Standby - entering_ids = _instance_ids_in_states(instances_start, ["EnteringStandby"]) & instance_ids + entering_ids = _instance_ids_in_states(instances_start, STATE_MAP["entering"]) & instance_ids if check_mode: change_ids = pending_ids | ready_ids - instances_changed = _changed_instances(deepcopy(instances_start), group_name, "Standby", change_ids) + instances_changed = _change_instances(deepcopy(instances_start), group_name, change_ids, state="Standby") return bool(ready_ids), instances_changed if pending_ids: # We have to wait for instances to transition to InService - wait_instance_state(client, "InService", check_mode, group_name, pending_ids, True, wait_timeout) + wait_instance_state(client, "InService", check_mode, group_name, pending_ids, wait, wait_timeout) ready_ids |= pending_ids if ready_ids: @@ -559,12 +568,44 @@ def ensure_instance_standby( entering_ids |= ready_ids # This includes potentially waiting for instances which were "Entering" Standby when we started - wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, True, wait_timeout) + wait_instance_state(client, "Standby", check_mode, group_name, entering_ids, wait, wait_timeout) instances_complete = get_autoscaling_instances(client, group_name=group_name) return bool(ready_ids), instances_complete +def ensure_instance_health( + client: RetryingBotoClientWrapper, + check_mode: bool, + instances_start: AnsibleAWSResourceList, + group_name: str, + health: Optional[str], + instance_ids: List[str], + respect_grace_period: bool, + wait: bool, + wait_timeout: int, +) -> Tuple[bool, AnsibleAWSResourceList]: + return False, instances_start + + +def ensure_instance_protection( + client: RetryingBotoClientWrapper, + check_mode: bool, + instances_start: AnsibleAWSResourceList, + group_name: str, + protection: Optional[bool], + instance_ids: List[str], + wait: bool, + wait_timeout: int, +) -> Tuple[bool, AnsibleAWSResourceList]: + if protection is None: + return False, instances_start + if instance_ids is None: + instance_ids = _all_instance_ids(instances_start) + + return False, instances_start + + def ensure_instance_pool( client: RetryingBotoClientWrapper, check_mode: bool, @@ -574,7 +615,6 @@ def ensure_instance_pool( instance_ids: List[str], purge_instances: bool, decrement_desired_capacity: bool, - respect_grace_period: bool, wait: bool, wait_timeout: int, ) -> Tuple[bool, AnsibleAWSResourceList]: @@ -603,7 +643,7 @@ def ensure_instance_pool( # Not valid for standby/terminated/detached if instance_ids is None: - instance_ids = [i.get("instance_id") for i in instances_start] + instance_ids = _all_instance_ids(instances_start) if state == "attached": return ensure_instance_attached( @@ -635,7 +675,7 @@ def ensure_instance_pool( def _validate_standby_conditions(params: Dict[str, Any], instances: AnsibleAWSResourceList) -> None: instance_ids = set(params.get("instance_ids")) - all_ids = {i.get("instance_id") for i in instances} + all_ids = _all_instance_ids(instances) missing_ids = instance_ids - all_ids if missing_ids: @@ -644,9 +684,9 @@ def _validate_standby_conditions(params: Dict[str, Any], instances: AnsibleAWSRe ) # We don't need to change these instances, we may need to wait for them - standby_ids = _instance_ids_in_states(instances, ["Standby", "EnteringStandby"]) + standby_ids = _instance_ids_in_states(instances, STATE_MAP["entering+"]) # We need to wait for these instances to enter "InService" before we can do anything with them - pending_ids = _instance_ids_in_states(instances, ["Pending", "Pending:Proceed", "Pending:Wait"]) + pending_ids = _instance_ids_in_states(instances, STATE_MAP["pending"]) # These instances are ready to move to Standby ready_ids = _instance_ids_in_states(instances, ["InService"]) @@ -683,13 +723,13 @@ def _validate_remove_conditions(params: Dict[str, Any], instances: AnsibleAWSRes def _validate_attach_conditions(params: Dict[str, Any], instances: AnsibleAWSResourceList) -> None: instance_ids = set(params.get("instance_ids")) - all_ids = {i.get("instance_id") for i in instances} + all_ids = _all_instance_ids(instances) # These instances are terminating, we can't do anything with them. - terminating_ids = _instance_ids_in_states(instances, ["Terminated", "Terminating"]) & instance_ids + terminating_ids = _instance_ids_in_states(instances, STATE_MAP["terminating+"]) & instance_ids # We need to wait for these instances to enter "InService" or "Standby" before we can do anything with them - pending_ids = _instance_ids_in_states(instances, ["EnteringStandby"]) & instance_ids - detaching_ids = _instance_ids_in_states(instances, ["Detaching"]) & instance_ids + pending_ids = _instance_ids_in_states(instances, STATE_MAP["entering"]) & instance_ids + detaching_ids = _instance_ids_in_states(instances, STATE_MAP["detaching"]) & instance_ids if terminating_ids: raise AnsibleAutoScalingError( @@ -724,31 +764,45 @@ def do(module): instances_start = get_autoscaling_instances(client, group_name=module.params["group_name"]) validate_params(module.params, instances_start) - changed, instances = ensure_instance_pool( + instances = deepcopy(instances_start) + + changed_pool, instances = ensure_instance_pool( client, check_mode=module.check_mode, - instances_start=deepcopy(instances_start), + instances_start=instances, group_name=module.params["group_name"], state=module.params["state"], instance_ids=module.params["instance_ids"], purge_instances=module.params["purge_instances"], decrement_desired_capacity=module.params["decrement_desired_capacity"], + wait=module.params["wait"], + wait_timeout=module.params["wait_timeout"], + ) + + changed_protection, instances = ensure_instance_protection( + client, + check_mode=module.check_mode, + instances_start=instances, + group_name=module.params["group_name"], + protection=module.params["protection"], + instance_ids=module.params["instance_ids"], + wait=module.params["wait"], + wait_timeout=module.params["wait_timeout"], + ) + + changed_health, instances = ensure_instance_health( + client, + check_mode=module.check_mode, + instances_start=instances, + group_name=module.params["group_name"], + health=module.params["health"], + instance_ids=module.params["instance_ids"], respect_grace_period=module.params["respect_grace_period"], wait=module.params["wait"], wait_timeout=module.params["wait_timeout"], ) - # changed, instances = ensure_instance_health( - # client, - # check_mode=module.check_mode, - # instances_start=deepcopy(instances), - # group_name=module.params["group_name"], - # instance_ids=module.params["instance_ids"], - # decrement_desired_capacity=module.params["decrement_desired_capacity"], - # respect_grace_period=module.params["respect_grace_period"], - # wait=module.params["wait"], - # wait_timeout=module.params["wait_timeout"], - # ) + changed = changed_pool or changed_protection or changed_health result = {"changed": changed, "auto_scaling_instances": instances} @@ -764,12 +818,12 @@ def do(module): def main(): argument_spec = dict( group_name=dict(type="str", required=True), - state=dict(choices=["present", "attached", "terminated", "detached", "standby"], default="present", type="str"), + state=dict(type="str", choices=["present", "attached", "terminated", "detached", "standby"], default="present"), instance_ids=dict(type="list", elements="str"), - purge_instances=dict(default=False, type="bool"), - decrement_desired_capacity=dict(default=False, type="bool"), + purge_instances=dict(type="bool", default=False), + decrement_desired_capacity=dict(type="bool", default=False), health=dict(type="str", choices=["Healthy", "Unhealthy"]), - respect_grace_period=dict(type="bool"), + respect_grace_period=dict(type="bool", default=True), protection=dict(type="bool"), wait=dict(type="bool", default=True), wait_timeout=dict(type="int", default=120), From 690d724f1835bf8649488df3e8f406d2b8049b05 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 7 Oct 2024 16:10:14 +0200 Subject: [PATCH 4/8] Health and Protection --- plugins/module_utils/_autoscaling/waiters.py | 78 ++++++++++--- plugins/module_utils/autoscaling.py | 9 +- plugins/modules/autoscaling_instance.py | 106 +++++++++++++++--- .../autoscaling_instance/tasks/env_setup.yml | 12 ++ .../autoscaling_instance/tasks/tests.yml | 2 + 5 files changed, 178 insertions(+), 29 deletions(-) diff --git a/plugins/module_utils/_autoscaling/waiters.py b/plugins/module_utils/_autoscaling/waiters.py index e5feabfbdd0..e611565d18c 100644 --- a/plugins/module_utils/_autoscaling/waiters.py +++ b/plugins/module_utils/_autoscaling/waiters.py @@ -5,19 +5,36 @@ from ..waiter import BaseWaiterFactory +WAITER_MAP = { + "Standby": "instances_in_standby", + "Terminated": "instances_terminated", + "Detached": "instances_detached", + "InService": "instances_in_service", + "HEALTHY": "instances_healthy", + "Healthy": "instances_healthy", + "UNHEALTHY": "instances_unhealthy", + "Unhealthy": "instances_unhealthy", + "Protected": "instances_protected", + "NotProtected": "instances_not_protected", +} + def _fail_on_instance_lifecycle_states(state): return dict(state="failure", matcher="pathAny", expected=state, argument="AutoScalingInstances[].LifecycleState") -def _retry_on_instance_lifecycle_states(state): - return dict(state="retry", matcher="pathAny", expected=state, argument="AutoScalingInstances[].LifecycleState") - - def _success_on_instance_lifecycle_states(state): return dict(state="success", matcher="pathAll", expected=state, argument="AutoScalingInstances[].LifecycleState") +def _success_on_instance_health(health): + return dict(state="success", matcher="pathAll", expected=health, argument="AutoScalingInstances[].HealthStatus") + + +def _success_on_instance_protection(state): + return dict(state="success", matcher="pathAll", expected=state, argument="AutoScalingInstances[].ProtectedFromScaleIn") + + def _no_instances(result): return dict(state=result, matcher="path", expected=True, argument="length(AutoScalingInstances[]) == `0`") @@ -26,43 +43,78 @@ class AutoscalingWaiterFactory(BaseWaiterFactory): @property def _waiter_model_data(self): data = dict( - instances_in_service=dict( + instances_healthy=dict( operation="DescribeAutoScalingInstances", delay=5, maxAttempts=120, acceptors=[ + _success_on_instance_health("HEALTHY"), + # Terminated Instances can't reach "Healthy" _fail_on_instance_lifecycle_states("Terminating"), _fail_on_instance_lifecycle_states("Terminated"), _fail_on_instance_lifecycle_states("Terminating:Wait"), _fail_on_instance_lifecycle_states("Terminating:Proceed"), - _fail_on_instance_lifecycle_states("Detaching"), - _fail_on_instance_lifecycle_states("Detached"), - _success_on_instance_lifecycle_states("InService"), ], ), - instances_in_standby=dict( + instances_unhealthy=dict( + operation="DescribeAutoScalingInstances", + delay=5, + maxAttempts=120, + acceptors=[ + _success_on_instance_health("UNHEALTHY"), + # Instances in an unhealthy state can end up being automatically terminated + _no_instances("success"), + ], + ), + instances_protected=dict( operation="DescribeAutoScalingInstances", delay=5, maxAttempts=120, acceptors=[ + _success_on_instance_protection(True), + ], + ), + instances_not_protected=dict( + operation="DescribeAutoScalingInstances", + delay=5, + maxAttempts=120, + acceptors=[ + _success_on_instance_protection(False), + # Instances without protection can end up being automatically terminated + _no_instances("success"), + ], + ), + instances_in_service=dict( + operation="DescribeAutoScalingInstances", + delay=5, + maxAttempts=120, + acceptors=[ + _success_on_instance_lifecycle_states("InService"), + # Terminated instances can't reach InService _fail_on_instance_lifecycle_states("Terminating"), _fail_on_instance_lifecycle_states("Terminated"), _fail_on_instance_lifecycle_states("Terminating:Wait"), _fail_on_instance_lifecycle_states("Terminating:Proceed"), - _fail_on_instance_lifecycle_states("Detaching"), - _fail_on_instance_lifecycle_states("Detached"), - _success_on_instance_lifecycle_states("Standby"), ], ), - instances_detached=dict( + instances_in_standby=dict( operation="DescribeAutoScalingInstances", delay=5, maxAttempts=120, acceptors=[ + _success_on_instance_lifecycle_states("Standby"), + # Terminated instances can't reach Standby _fail_on_instance_lifecycle_states("Terminating"), _fail_on_instance_lifecycle_states("Terminated"), _fail_on_instance_lifecycle_states("Terminating:Wait"), _fail_on_instance_lifecycle_states("Terminating:Proceed"), + ], + ), + instances_detached=dict( + operation="DescribeAutoScalingInstances", + delay=5, + maxAttempts=120, + acceptors=[ _success_on_instance_lifecycle_states("Detached"), _no_instances("success"), ], diff --git a/plugins/module_utils/autoscaling.py b/plugins/module_utils/autoscaling.py index d3a86f526bb..3c12b444c29 100644 --- a/plugins/module_utils/autoscaling.py +++ b/plugins/module_utils/autoscaling.py @@ -11,12 +11,12 @@ import typing +# Not intended for general re-use / re-import +from ._autoscaling import common as _common from ._autoscaling import groups as _groups from ._autoscaling import instances as _instances from ._autoscaling import transformations as _transformations from ._autoscaling import waiters as _waiters -from ._autoscaling.common import AnsibleAutoScalingError # pylint: disable=unused-import -from ._autoscaling.common import AutoScalingErrorHandler # pylint: disable=unused-import from .retries import AWSRetry if typing.TYPE_CHECKING: @@ -29,6 +29,11 @@ from .transformation import AnsibleAWSResourceList from .transformation import BotoResourceList +# Intended for general use / re-import +AnsibleAutoScalingError = _common.AnsibleAutoScalingError +AutoScalingErrorHandler = _common.AutoScalingErrorHandler +WAITER_MAP = _waiters.WAITER_MAP + def get_autoscaling_groups( client: RetryingBotoClientWrapper, group_names: Optional[List[str]] = None diff --git a/plugins/modules/autoscaling_instance.py b/plugins/modules/autoscaling_instance.py index 6ddfcfe0e23..94e72487d96 100644 --- a/plugins/modules/autoscaling_instance.py +++ b/plugins/modules/autoscaling_instance.py @@ -34,7 +34,7 @@ - B(Note:) When adding instances to an AutoScaling Group or returning instances to service from standby, the desired capacity is B(always) incremented. If the total number of instances would exceed the maximum size of the group then the operation will fail. - choices: ['present', 'attached', 'terminate', 'detached', 'standby'] + choices: ['present', 'attached', 'terminated', 'detached', 'standby'] default: present type: str instance_ids: @@ -160,6 +160,7 @@ import typing from copy import deepcopy +from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import WAITER_MAP from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import AnsibleAutoScalingError from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import AutoScalingErrorHandler from ansible_collections.amazon.aws.plugins.module_utils.autoscaling import get_autoscaling_instances @@ -182,7 +183,7 @@ # There's also a number of "Warmed" states that we could support with relatively minimal effort, but -# we can't test them +# we can't test them (currently) STATE_MAP = { "pending": ["Pending", "Pending:Proceed", "Pending:Wait"], "stable": ["InService", "Standby"], @@ -199,6 +200,15 @@ def _all_instance_ids(instances: List) -> Set[str]: return {i.get("instance_id") for i in instances} +def _instance_ids_with_health(instances: List, health: str) -> Set[str]: + health = health.lower() + return {i.get("instance_id") for i in instances if i.get("health_status", "").lower() == health} + + +def _instance_ids_with_protection(instances: List, protection: bool) -> Set[str]: + return {i.get("instance_id") for i in instances if i.get("protected_from_scale_in", False) == protection} + + def _instance_ids_in_states(instances: List, states: List[str]) -> Set[str]: states = [s.lower() for s in states] return {i.get("instance_id") for i in instances if i.get("lifecycle_state", "").lower() in states} @@ -213,6 +223,28 @@ def _token_instance(instance_id, group_name): ) +@AutoScalingErrorHandler.common_error_handler("set instance health") +@AWSRetry.jittered_backoff() +def _set_instance_health(client: RetryingBotoClientWrapper, instance_id: str, health: str, respect_grace: bool): + return client.set_instance_health( + InstanceId=instance_id, + HealthStatus=health, + ShouldRespectGracePeriod=respect_grace, + ) + + +@AutoScalingErrorHandler.common_error_handler("set instance protection") +@AWSRetry.jittered_backoff() +def _set_instance_protection( + client: RetryingBotoClientWrapper, instance_ids: Set[str], group_name: str, protected: bool +): + return client.set_instance_protection( + InstanceIds=list(instance_ids), + AutoScalingGroupName=group_name, + ProtectedFromScaleIn=protected, + ) + + @AutoScalingErrorHandler.common_error_handler("detach auto scaling instances from group") @AWSRetry.jittered_backoff() def _detach_instances( @@ -283,16 +315,9 @@ def wait_instance_state( if not instance_ids: return - waiter_map = { - "Standby": "instances_in_standby", - "Terminated": "instances_terminated", - "Detached": "instances_detached", - "InService": "instances_in_service", - } - waiter_config = custom_waiter_config(timeout=wait_timeout, default_pause=10) - waiter = get_autoscaling_waiter(client, waiter_map[state]) + waiter = get_autoscaling_waiter(client, WAITER_MAP[state]) AutoScalingErrorHandler.common_error_handler(f"wait for instances to reach {state}")(waiter.wait)( InstanceIds=list(instance_ids), WaiterConfig=waiter_config, @@ -313,8 +338,12 @@ def _inject_instances(instances, group_name, missing_ids): def _change_instances(instances, group_name, change_ids, state=None, health=None, protection=None): for instance in instances: if instance.get("instance_id") in change_ids: - if state: + if state is not None: instance["lifecycle_state"] = state + if health is not None: + instance["health_status"] = health + if protection is not None: + instance["protected_from_scale_in"] = protection return instances @@ -585,7 +614,33 @@ def ensure_instance_health( wait: bool, wait_timeout: int, ) -> Tuple[bool, AnsibleAWSResourceList]: - return False, instances_start + # nb. With Health the API documentation's inconsistent: + # it appears to want Capitalized for set(), but spits out UPPERCASE for get() + if health is None: + return False, instances_start + if instance_ids is None: + instance_ids = _all_instance_ids(instances_start) + else: + instance_ids = set(instance_ids) + + ready_ids = _instance_ids_with_health(instances_start, health) & instance_ids + changed_ids = instance_ids - ready_ids + + if not changed_ids: + return False, instances_start + + if check_mode: + health = health.upper() + changed_instances = _change_instances(deepcopy(instances_start), group_name, changed_ids, health=health) + return True, changed_instances + + for instance_id in changed_ids: + _set_instance_health(client, instance_id, health, respect_grace_period) + health = health.upper() + wait_instance_state(client, health.upper(), check_mode, group_name, changed_ids, wait, wait_timeout) + + instances_complete = get_autoscaling_instances(client, group_name=group_name) + return True, instances_complete def ensure_instance_protection( @@ -602,8 +657,27 @@ def ensure_instance_protection( return False, instances_start if instance_ids is None: instance_ids = _all_instance_ids(instances_start) + else: + instance_ids = set(instance_ids) - return False, instances_start + ready_ids = _instance_ids_with_protection(instances_start, protection) & instance_ids + changed_ids = instance_ids - ready_ids + + if not changed_ids: + return False, instances_start + + if check_mode: + changed_instances = _change_instances(deepcopy(instances_start), group_name, changed_ids, protection=protection) + return True, changed_instances + + _set_instance_protection(client, changed_ids, group_name, protection) + + state = "Protected" if protection else "NotProtected" + + wait_instance_state(client, state, check_mode, group_name, changed_ids, wait, wait_timeout) + + instances_complete = get_autoscaling_instances(client, group_name=group_name) + return True, instances_complete def ensure_instance_pool( @@ -722,8 +796,8 @@ def _validate_remove_conditions(params: Dict[str, Any], instances: AnsibleAWSRes def _validate_attach_conditions(params: Dict[str, Any], instances: AnsibleAWSResourceList) -> None: - instance_ids = set(params.get("instance_ids")) all_ids = _all_instance_ids(instances) + instance_ids = set(params.get("instance_ids") or []) # These instances are terminating, we can't do anything with them. terminating_ids = _instance_ids_in_states(instances, STATE_MAP["terminating+"]) & instance_ids @@ -812,6 +886,10 @@ def do(module): after=dict(auto_scaling_instances=instances), ) + result["changed_pool"] = changed_pool + result["changed_protection"] = changed_protection + result["changed_health"] = changed_health + module.exit_json(**result) diff --git a/tests/integration/targets/autoscaling_instance/tasks/env_setup.yml b/tests/integration/targets/autoscaling_instance/tasks/env_setup.yml index bfc77868a39..489503ad954 100644 --- a/tests/integration/targets/autoscaling_instance/tasks/env_setup.yml +++ b/tests/integration/targets/autoscaling_instance/tasks/env_setup.yml @@ -55,6 +55,8 @@ cidr_ip: "0.0.0.0/0" register: sg +# Note: main autoscaling tests use ASG "Launch Configs", these are being phased out in favour of EC2 +# Launch Templates - name: ensure launch templates exist community.aws.ec2_launch_template: name: "{{ item }}" @@ -78,6 +80,8 @@ launch_template: launch_template_name: "{{ default_resource_name }}-1" desired_capacity: 2 + health_check_period: 10 + default_cooldown: 10 min_size: 0 max_size: 4 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" @@ -89,6 +93,14 @@ - create_asg.viable_instances == 2 - create_asg.instances | length == 2 +- ansible.builtin.assert: + that: + - create_asg.viable_instances == 2 + - create_asg.instances | length == 2 + - ansible.builtin.set_fact: initial_instances: "{{ create_asg.instances }}" all_instances: "{{ create_asg.instances }}" + +- amazon.aws.autoscaling_group_info: + name: "{{ default_resource_name }}" diff --git a/tests/integration/targets/autoscaling_instance/tasks/tests.yml b/tests/integration/targets/autoscaling_instance/tasks/tests.yml index a7742b40c6c..38935f554a7 100644 --- a/tests/integration/targets/autoscaling_instance/tasks/tests.yml +++ b/tests/integration/targets/autoscaling_instance/tasks/tests.yml @@ -1,3 +1,5 @@ --- - ansible.builtin.include_tasks: describe.yml - ansible.builtin.include_tasks: attach_detach.yml +- ansible.builtin.include_tasks: health.yml +- ansible.builtin.include_tasks: protection.yml From 122cc58d9c6107e73e71516950162a6524adcc2d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 8 Oct 2024 10:47:24 +0200 Subject: [PATCH 5/8] lint --- plugins/module_utils/_autoscaling/waiters.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/module_utils/_autoscaling/waiters.py b/plugins/module_utils/_autoscaling/waiters.py index e611565d18c..d51b3bb6547 100644 --- a/plugins/module_utils/_autoscaling/waiters.py +++ b/plugins/module_utils/_autoscaling/waiters.py @@ -32,7 +32,9 @@ def _success_on_instance_health(health): def _success_on_instance_protection(state): - return dict(state="success", matcher="pathAll", expected=state, argument="AutoScalingInstances[].ProtectedFromScaleIn") + return dict( + state="success", matcher="pathAll", expected=state, argument="AutoScalingInstances[].ProtectedFromScaleIn" + ) def _no_instances(result): From cc441159d7a6d33f5dcfedb048250d91cc5f5b72 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 11 Oct 2024 09:05:52 +0200 Subject: [PATCH 6/8] bump version_added --- plugins/modules/autoscaling_instance.py | 2 +- plugins/modules/autoscaling_instance_info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/modules/autoscaling_instance.py b/plugins/modules/autoscaling_instance.py index 94e72487d96..01d25afd99a 100644 --- a/plugins/modules/autoscaling_instance.py +++ b/plugins/modules/autoscaling_instance.py @@ -9,7 +9,7 @@ DOCUMENTATION = r""" --- module: autoscaling_instance -version_added: 8.4.0 +version_added: 9.0.0 short_description: manage instances associated with AWS AutoScaling Groups (ASGs) description: - Manage instances associated with AWS AutoScaling Groups (ASGs). diff --git a/plugins/modules/autoscaling_instance_info.py b/plugins/modules/autoscaling_instance_info.py index ef15e91fa02..9a7ed113eb0 100644 --- a/plugins/modules/autoscaling_instance_info.py +++ b/plugins/modules/autoscaling_instance_info.py @@ -9,7 +9,7 @@ DOCUMENTATION = r""" --- module: autoscaling_instance_info -version_added: 8.3.0 +version_added: 9.0.0 short_description: describe instances associated with AWS AutoScaling Groups (ASGs) description: - Describe instances associated with AWS AutoScaling Groups (ASGs). From 69827e310ebf6e23a5fb0f941862ccde0a9736b4 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 11 Oct 2024 16:25:44 +0200 Subject: [PATCH 7/8] missing files --- .../autoscaling_instance/tasks/health.yml | 663 ++++++++++++++++++ .../autoscaling_instance/tasks/protection.yml | 436 ++++++++++++ 2 files changed, 1099 insertions(+) create mode 100644 tests/integration/targets/autoscaling_instance/tasks/health.yml create mode 100644 tests/integration/targets/autoscaling_instance/tasks/protection.yml diff --git a/tests/integration/targets/autoscaling_instance/tasks/health.yml b/tests/integration/targets/autoscaling_instance/tasks/health.yml new file mode 100644 index 00000000000..02f6f00ab83 --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tasks/health.yml @@ -0,0 +1,663 @@ +--- +- name: Disable Health Check replacement + amazon.aws.autoscaling_group: + name: "{{ default_resource_name }}" + health_check_period: 10 + default_cooldown: 5 + suspend_processes: + - ReplaceUnhealthy + state: present + register: suspend_health_replacement +- ansible.builtin.assert: + that: + - suspend_health_replacement is successful + +# ===================================================== +# Test updating the health of all instances +# ===================================================== + +# Set all to Healthy - no change +- name: instance_ids - all Healthy (idempotency) - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + health: Healthy + diff: True + register: healthy_all + check_mode: True + +- ansible.builtin.assert: + that: + - current_result is not changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ healthy_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - all Healthy (idempotency) + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + health: Healthy + diff: True + register: healthy_all + +- ansible.builtin.assert: + that: + - current_result is not changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ healthy_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# Set all to UnHealthy +- name: instance_ids - all Unhealthy - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + health: Unhealthy + diff: True + register: unhealthy_all + check_mode: True + +- ansible.builtin.assert: + that: + - current_result is changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ unhealthy_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - all Unhealthy + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + health: Unhealthy + diff: True + register: unhealthy_all + +- ansible.builtin.assert: + that: + - current_result is changed + - "'autoscaling:SetInstanceHealth' in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ unhealthy_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - all Unhealthy (idempotency) - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + health: Unhealthy + diff: True + register: unhealthy_all + check_mode: True + +- ansible.builtin.assert: + that: + - current_result is not changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ unhealthy_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - all Unhealthy (idempotency) + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + health: Unhealthy + diff: True + register: unhealthy_all + +- ansible.builtin.assert: + that: + - current_result is not changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ unhealthy_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# Set all to Healthy again +- name: instance_ids - all Healthy - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + health: Healthy + diff: True + register: healthy_all + check_mode: True + +- ansible.builtin.assert: + that: + - current_result is changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ healthy_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - all Healthy + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + health: Healthy + diff: True + register: healthy_all + +- ansible.builtin.assert: + that: + - current_result is changed + - "'autoscaling:SetInstanceHealth' in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ healthy_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# -- implicit instance list -- + +# implicitly change all +- name: no instance_ids - all Unhealthy + amazon.aws.autoscaling_instance: + group_name: "{{ default_resource_name }}" + state: present + health: Unhealthy + diff: True + register: unhealthy_all + +- ansible.builtin.assert: + that: + - current_result is changed + - "'autoscaling:SetInstanceHealth' in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ unhealthy_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# implicitly change all +- name: no instance_ids - all Healthy + amazon.aws.autoscaling_instance: + group_name: "{{ default_resource_name }}" + state: present + health: Healthy + diff: True + register: healthy_all + +- ansible.builtin.assert: + that: + - current_result is changed + - "'autoscaling:SetInstanceHealth' in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ healthy_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# ===================================================== +# Test updating the health of one instance +# ===================================================== + +# Set to Healthy - no change +- name: instance_ids - one Healthy (idempotency) - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + health: Healthy + diff: True + register: healthy_one + check_mode: True + +- ansible.builtin.assert: + that: + - current_result is not changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ healthy_one }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - one Healthy (idempotency) + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + health: Healthy + diff: True + register: healthy_one + +- ansible.builtin.assert: + that: + - current_result is not changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ healthy_one }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# Set to UnHealthy +- name: instance_ids - one Unhealthy - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + health: Unhealthy + diff: True + register: unhealthy_one + check_mode: True + +- ansible.builtin.assert: + that: + - current_result is changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ unhealthy_one }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - one Unhealthy + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + health: Unhealthy + diff: True + register: unhealthy_one + +- ansible.builtin.assert: + that: + - current_result is changed + - "'autoscaling:SetInstanceHealth' in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ unhealthy_one }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - one Unhealthy (idempotency) - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + health: Unhealthy + diff: True + register: unhealthy_one + check_mode: True + +- ansible.builtin.assert: + that: + - current_result is not changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ unhealthy_one }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - one Unhealthy (idempotency) + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + health: Unhealthy + diff: True + register: unhealthy_one + +- ansible.builtin.assert: + that: + - current_result is not changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "UNHEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ unhealthy_one }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# Set to Healthy again +- name: instance_ids - one Healthy - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + health: Healthy + diff: True + register: healthy_one + check_mode: True + +- ansible.builtin.assert: + that: + - current_result is changed + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ healthy_one }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - one Healthy + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + health: Healthy + diff: True + register: healthy_one + +- ansible.builtin.assert: + that: + - current_result is changed + - "'autoscaling:SetInstanceHealth' in current_result.resource_actions" + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + vars: + current_result: "{{ healthy_one }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# ===================================================== + +- name: Enable Health Check replacement + amazon.aws.autoscaling_group: + name: "{{ default_resource_name }}" + suspend_processes: [] + health_check_period: 10 + default_cooldown: 5 + state: present + register: restore_health_replacement +- ansible.builtin.assert: + that: + - restore_health_replacement is successful diff --git a/tests/integration/targets/autoscaling_instance/tasks/protection.yml b/tests/integration/targets/autoscaling_instance/tasks/protection.yml new file mode 100644 index 00000000000..23608d8ed47 --- /dev/null +++ b/tests/integration/targets/autoscaling_instance/tasks/protection.yml @@ -0,0 +1,436 @@ +--- +# ===================================================== +# Test updating the protection status of all instances +# ===================================================== + +# Set all to Unprotected - no change +- name: instance_ids - all Unprotected (idempotency) - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + protection: False + diff: True + register: unprotected_all + check_mode: True + +- ansible.builtin.assert: + that: + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + - current_result is not changed + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + vars: + current_result: "{{ unprotected_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - all Unprotected (idempotency) + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + protection: False + diff: True + register: unprotected_all + +- ansible.builtin.assert: + that: + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + - current_result is not changed + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + vars: + current_result: "{{ unprotected_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# Set all to Protected +- name: instance_ids - all Protected - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + protection: True + diff: True + register: protected_all + check_mode: True + +- ansible.builtin.assert: + that: + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + - current_result is changed + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == True + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == True + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + vars: + current_result: "{{ protected_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - all Protected + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + protection: True + diff: True + register: protected_all + +- ansible.builtin.assert: + that: + - "'autoscaling:SetInstanceProtection' in current_result.resource_actions" + - current_result is changed + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == True + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == True + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + vars: + current_result: "{{ protected_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - all Protected (idempotency) - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + protection: True + diff: True + register: protected_all + check_mode: True + +- ansible.builtin.assert: + that: + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + - current_result is not changed + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == True + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == True + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + vars: + current_result: "{{ protected_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - all Protected (idempotency) + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + protection: True + diff: True + register: protected_all + +- ansible.builtin.assert: + that: + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + - current_result is not changed + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == True + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == True + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + vars: + current_result: "{{ protected_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# Set all to Unprotected again +- name: instance_ids - all Unprotected - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + protection: False + diff: True + register: unprotected_all + check_mode: True + +- ansible.builtin.assert: + that: + - "'autoscaling:SetInstanceProtection' not in current_result.resource_actions" + - current_result is changed + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + vars: + current_result: "{{ unprotected_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +- name: instance_ids - all Unprotected + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances }}" + group_name: "{{ default_resource_name }}" + state: present + protection: False + diff: True + register: unprotected_all + +- ansible.builtin.assert: + that: + - "'autoscaling:SetInstanceProtection' in current_result.resource_actions" + - current_result is changed + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + vars: + current_result: "{{ unprotected_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# -- implicit instance list -- + +# implicitly change all +- name: no instance_ids - all Protected + amazon.aws.autoscaling_instance: + group_name: "{{ default_resource_name }}" + state: present + protection: True + diff: True + register: protected_all + +- ansible.builtin.assert: + that: + - "'autoscaling:SetInstanceProtection' in current_result.resource_actions" + - current_result is changed + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == True + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == True + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + vars: + current_result: "{{ protected_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# implicitly change all +- name: no instance_ids - all Unprotected + amazon.aws.autoscaling_instance: + group_name: "{{ default_resource_name }}" + state: present + protection: False + diff: True + register: unprotected_all + +- ansible.builtin.assert: + that: + - "'autoscaling:SetInstanceProtection' in current_result.resource_actions" + - current_result is changed + - "'auto_scaling_instances' in current_result" + - initial_instances[0] in listed_instance_ids + - initial_instances[1] in listed_instance_ids + - "'protected_from_scale_in' in specific_instance_info" + - specific_instance_info.protected_from_scale_in == False + - "'health_status' in specific_instance_info" + - specific_instance_info.health_status == "HEALTHY" + - "'protected_from_scale_in' in other_instance_info" + - other_instance_info.protected_from_scale_in == False + - "'health_status' in other_instance_info" + - other_instance_info.health_status == "HEALTHY" + # Ensure we only make the limited calls we expect + - "'autoscaling:DetachInstances' not in current_result.resource_actions" + - "'autoscaling:AttachInstances' not in current_result.resource_actions" + - "'autoscaling:SetInstanceHealth' not in current_result.resource_actions" + vars: + current_result: "{{ unprotected_all }}" + listed_instance_ids: "{{ current_result.auto_scaling_instances | map(attribute='instance_id') | list }}" + specific_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[1]) | first }}" + other_instance_info: "{{ current_result.auto_scaling_instances | selectattr('instance_id', 'equalto', initial_instances[0]) | first }}" + +# ===================================================== +# Test updating the health of one instance +# ===================================================== + +# Set to Unprotected - no change +- name: instance_ids - one Unprotected (idempotency) - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + protection: False + diff: True + register: unprotected_one + check_mode: True + +- name: instance_ids - one Unprotected (idempotency) + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + protection: False + diff: True + register: unprotected_one + +# Set to Protected +- name: instance_ids - one Protected - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + protection: True + diff: True + register: protected_one + check_mode: True + +- name: instance_ids - one Protected + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + protection: True + diff: True + register: protected_one + +- name: instance_ids - one Protected (idempotency) - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + protection: True + diff: True + register: protected_one + check_mode: True + +- name: instance_ids - one Protected (idempotency) + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + protection: True + diff: True + register: protected_one + +# Set to Unprotected again +- name: instance_ids - one Unprotected - check_mode + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + protection: False + diff: True + register: unprotected_one + check_mode: True + +- name: instance_ids - one Unprotected + amazon.aws.autoscaling_instance: + instance_ids: "{{ initial_instances[1] }}" + group_name: "{{ default_resource_name }}" + state: present + protection: False + diff: True + register: unprotected_one From 6139c2ef86bf284a1f83ec5a122b9ac6583df36b Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 25 Oct 2024 10:48:53 +0200 Subject: [PATCH 8/8] Disable tests due to missing permissions --- tests/integration/targets/autoscaling_instance/aliases | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/targets/autoscaling_instance/aliases b/tests/integration/targets/autoscaling_instance/aliases index cc06f7fdbfb..10cd19605f2 100644 --- a/tests/integration/targets/autoscaling_instance/aliases +++ b/tests/integration/targets/autoscaling_instance/aliases @@ -1,3 +1,5 @@ +# reason: missing-policy +disabled time=30m cloud/aws