diff --git a/changelogs/fragments/add_ruff.yml b/changelogs/fragments/add_ruff.yml new file mode 100644 index 00000000000..cbd7e35de80 --- /dev/null +++ b/changelogs/fragments/add_ruff.yml @@ -0,0 +1,5 @@ +trivial: + - "Lint collection using ``ruff`` linter." + +minor_changes: + - "Introduce ``ruff`` linter into CI." diff --git a/plugins/action/s3_object.py b/plugins/action/s3_object.py index f78a42fa39b..378eb4d694b 100644 --- a/plugins/action/s3_object.py +++ b/plugins/action/s3_object.py @@ -19,7 +19,7 @@ class ActionModule(ActionBase): TRANSFERS_FILES = True def run(self, tmp=None, task_vars=None): - """handler for s3_object operations + """Handler for s3_object operations. This adds the magic that means 'src' can point to both a 'remote' file on the 'host' or in the 'files/' lookup path on the controller. diff --git a/plugins/inventory/aws_ec2.py b/plugins/inventory/aws_ec2.py index bf0bc50b12c..edfb4c99ce1 100644 --- a/plugins/inventory/aws_ec2.py +++ b/plugins/inventory/aws_ec2.py @@ -749,7 +749,6 @@ def _add_hosts( :param str hostvars_suffix: ends the hostvars variable name with this suffix :param bool use_contrib_script_compatible_ec2_tag_keys: transform the host name with the legacy naming system """ - for name, host_vars in self.iter_entry( hosts, hostnames, diff --git a/plugins/lookup/secretsmanager_secret.py b/plugins/lookup/secretsmanager_secret.py index 254182f30f0..2ef7bbe3d57 100644 --- a/plugins/lookup/secretsmanager_secret.py +++ b/plugins/lookup/secretsmanager_secret.py @@ -149,7 +149,6 @@ def run(self, terms, variables, **kwargs): :variables: ansible variables active at the time of the lookup :returns: A list of parameter values or a list of dictionaries if bypath=True. """ - super().run(terms, variables, **kwargs) on_missing = self.get_option("on_missing") diff --git a/plugins/lookup/ssm_parameter.py b/plugins/lookup/ssm_parameter.py index 0ca3afdd8a8..aa794697563 100644 --- a/plugins/lookup/ssm_parameter.py +++ b/plugins/lookup/ssm_parameter.py @@ -153,7 +153,6 @@ def run(self, terms, variables, **kwargs): :kwarg variables: ansible variables active at the time of the lookup :returns: A list of parameter values or a list of dictionaries if bypath=True. """ - super().run(terms, variables, **kwargs) on_missing = self.get_option("on_missing") diff --git a/plugins/module_utils/acm.py b/plugins/module_utils/acm.py index 4febe874346..7db9de1f25e 100644 --- a/plugins/module_utils/acm.py +++ b/plugins/module_utils/acm.py @@ -11,9 +11,7 @@ # - acm_certificate # - acm_certificate_info -""" -Common Amazon Certificate Manager facts shared between modules -""" +"""Common Amazon Certificate Manager facts shared between modules.""" try: from botocore.exceptions import BotoCoreError @@ -49,7 +47,7 @@ def runner(*args, **kwargs): class ACMServiceManager: - """Handles ACM Facts Services""" + """Handles ACM Facts Services.""" def __init__(self, module): self.module = module @@ -206,7 +204,7 @@ def _filter_certificate(cert): def get_domain_of_cert(self, arn, **kwargs): """ returns the domain name of a certificate (encoded in the public cert) - for a given ARN A cert with that ARN must already exist + for a given ARN A cert with that ARN must already exist. """ if arn is None: self.module.fail_json(msg="Internal error with ACM domain fetching, no certificate ARN specified") diff --git a/plugins/module_utils/arn.py b/plugins/module_utils/arn.py index d62b4c4d800..e198387a86a 100644 --- a/plugins/module_utils/arn.py +++ b/plugins/module_utils/arn.py @@ -34,7 +34,7 @@ def validate_aws_arn( def parse_aws_arn(arn): """ - Based on https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html + Based on https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html. The following are the general formats for ARNs. arn:partition:service:region:account-id:resource-id @@ -70,8 +70,7 @@ def parse_aws_arn(arn): # aren't specific to the EC2 service def is_outpost_arn(arn): """ - Validates that the ARN is for an AWS Outpost - + Validates that the ARN is for an AWS Outpost. API Specification Document: https://docs.aws.amazon.com/outposts/latest/APIReference/API_Outpost.html diff --git a/plugins/module_utils/batch.py b/plugins/module_utils/batch.py index 47281307edf..fd7d3d6d114 100644 --- a/plugins/module_utils/batch.py +++ b/plugins/module_utils/batch.py @@ -27,9 +27,7 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" -This module adds shared support for Batch modules. -""" +"""This module adds shared support for Batch modules.""" from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict diff --git a/plugins/module_utils/botocore.py b/plugins/module_utils/botocore.py index d5ad7ea8357..f8214bd5c1f 100644 --- a/plugins/module_utils/botocore.py +++ b/plugins/module_utils/botocore.py @@ -86,7 +86,7 @@ def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None ValueError, botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError, - botocore.exceptions.NoRegionError + botocore.exceptions.NoRegionError. """ try: return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) @@ -113,7 +113,7 @@ def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None def _merge_botocore_config(config_a, config_b): """ Merges the extra configuration options from config_b into config_a. - Supports both botocore.config.Config objects and dicts + Supports both botocore.config.Config objects and dicts. """ if not config_b: return config_a @@ -316,7 +316,7 @@ def paginated_query_with_retries(client, paginator_name, retry_decorator=None, * def gather_sdk_versions(): - """Gather AWS SDK (boto3 and botocore) dependency versions + """Gather AWS SDK (boto3 and botocore) dependency versions. Returns {'boto3_version': str, 'botocore_version': str} Returns {} if either module is not installed @@ -412,9 +412,7 @@ def normalize_boto3_result(result): def enable_placebo(session): - """ - Helper to record or replay offline modules for testing purpose. - """ + """Helper to record or replay offline modules for testing purpose.""" if "_ANSIBLE_PLACEBO_RECORD" in os.environ: import placebo @@ -454,9 +452,8 @@ def check_sdk_version_supported(botocore_version=None, boto3_version=None, warn= AnsibleBotocoreError - If botocore/boto3 is missing returns False if boto3 or botocore is less than the minimum supported versions - True if boto3 and botocore are greater than or equal the the minimum supported versions + True if boto3 and botocore are greater than or equal the the minimum supported versions. """ - botocore_version = botocore_version or MINIMUM_BOTOCORE_VERSION boto3_version = boto3_version or MINIMUM_BOTO3_VERSION diff --git a/plugins/module_utils/cloud.py b/plugins/module_utils/cloud.py index 4b2775cb309..920e65e802f 100644 --- a/plugins/module_utils/cloud.py +++ b/plugins/module_utils/cloud.py @@ -80,9 +80,7 @@ def _retry_func( class CloudRetry: - """ - The base class to be used by other cloud providers to provide a backoff/retry decorator based on status codes. - """ + """The base class to be used by other cloud providers to provide a backoff/retry decorator based on status codes.""" base_class = type(None) diff --git a/plugins/module_utils/cloudfront_facts.py b/plugins/module_utils/cloudfront_facts.py index 342adc82d24..a0ac3873c2d 100644 --- a/plugins/module_utils/cloudfront_facts.py +++ b/plugins/module_utils/cloudfront_facts.py @@ -11,9 +11,7 @@ # - cloudfront_invalidation # - cloudfront_origin_access_identity -""" -Common cloudfront facts shared between modules -""" +"""Common cloudfront facts shared between modules.""" from functools import partial @@ -49,7 +47,7 @@ def _cloudfront_paginate_build_full_result(client, client_method, **kwargs): class CloudFrontFactsServiceManager: - """Handles CloudFront Facts Services""" + """Handles CloudFront Facts Services.""" CLOUDFRONT_CLIENT_API_MAPPING = { "get_distribution": { diff --git a/plugins/module_utils/core.py b/plugins/module_utils/core.py index 44fd1d80be8..61433a4a4e3 100644 --- a/plugins/module_utils/core.py +++ b/plugins/module_utils/core.py @@ -3,7 +3,7 @@ # Copyright 2017 Michael De La Rue | Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -"""This module adds shared support for generic Amazon AWS modules +"""This module adds shared support for generic Amazon AWS modules. In order to use this module, include it as part of a custom module as shown below. diff --git a/plugins/module_utils/direct_connect.py b/plugins/module_utils/direct_connect.py index 8fdaf94b85c..04602432bcc 100644 --- a/plugins/module_utils/direct_connect.py +++ b/plugins/module_utils/direct_connect.py @@ -27,9 +27,7 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" -This module adds shared support for Direct Connect modules. -""" +"""This module adds shared support for Direct Connect modules.""" import traceback diff --git a/plugins/module_utils/ec2.py b/plugins/module_utils/ec2.py index f3aa9f3f133..9917aabbd3a 100644 --- a/plugins/module_utils/ec2.py +++ b/plugins/module_utils/ec2.py @@ -96,7 +96,7 @@ def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id """Return list of security group IDs from security group names. Note that security group names are not unique across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in - a try block + a try block. """ def get_sg_name(sg): @@ -159,7 +159,6 @@ def add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes=None): :param tags_to_set: A dictionary of key/value pairs to set :param retry_codes: additional boto3 error codes to trigger retries """ - if not tags_to_set: return False if module.check_mode: @@ -188,7 +187,6 @@ def remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes=None :param tags_to_unset: a list of tag keys to removes :param retry_codes: additional boto3 error codes to trigger retries """ - if not tags_to_unset: return False if module.check_mode: @@ -251,7 +249,6 @@ def ensure_ec2_tags(client, module, resource_id, resource_type=None, tags=None, :param retry_codes: additional boto3 error codes to trigger retries :return: changed: returns True if the tags are changed """ - if tags is None: return False @@ -292,7 +289,7 @@ def normalize_ec2_vpc_dhcp_config(option_config): "netbios-name-servers": ["10.0.0.1", "10.0.1.1"], "netbios-node-type": "1", "ntp-servers": ["10.0.0.2", "10.0.1.2"] - }, + },. """ config_data = {} diff --git a/plugins/module_utils/elb_utils.py b/plugins/module_utils/elb_utils.py index 8dc5eabfe6e..6308358136f 100644 --- a/plugins/module_utils/elb_utils.py +++ b/plugins/module_utils/elb_utils.py @@ -38,7 +38,6 @@ def _get_elb(connection, module, elb_name): :param elb_name: Name of load balancer to get :return: boto3 ELB dict or None if not found """ - try: load_balancer_paginator = connection.get_paginator("describe_load_balancers") return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())["LoadBalancers"][0] @@ -56,7 +55,6 @@ def get_elb_listener(connection, module, elb_arn, listener_port): :param listener_port: Port of the listener to look for :return: boto3 ELB listener dict or None if not found """ - try: listener_paginator = connection.get_paginator("describe_listeners") listeners = ( @@ -84,7 +82,6 @@ def get_elb_listener_rules(connection, module, listener_arn): :param listener_arn: ARN of the ELB listener :return: boto3 ELB rules list """ - try: return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)["Rules"] except (BotoCoreError, ClientError) as e: @@ -93,14 +90,13 @@ def get_elb_listener_rules(connection, module, listener_arn): def convert_tg_name_to_arn(connection, module, tg_name): """ - Get ARN of a target group using the target group's name + Get ARN of a target group using the target group's name. :param connection: AWS boto3 elbv2 connection :param module: Ansible module :param tg_name: Name of the target group :return: target group ARN string """ - try: response = AWSRetry.jittered_backoff()(connection.describe_target_groups)(Names=[tg_name]) except (BotoCoreError, ClientError) as e: diff --git a/plugins/module_utils/elbv2.py b/plugins/module_utils/elbv2.py index 3da2114c771..fbe642e39f0 100644 --- a/plugins/module_utils/elbv2.py +++ b/plugins/module_utils/elbv2.py @@ -73,7 +73,7 @@ def _simple_forward_config_arn(config, parent_arn): def _prune_ForwardConfig(action): """ Drops a redundant ForwardConfig where TargetGroupARN has already been set. - (So we can perform comparisons) + (So we can perform comparisons). """ if action.get("Type", "") != "forward": return action @@ -162,12 +162,11 @@ def __init__(self, connection, module): def wait_for_ip_type(self, elb_arn, ip_type): """ - Wait for load balancer to reach 'active' status + Wait for load balancer to reach 'active' status. :param elb_arn: The load balancer ARN :return: """ - if not self.wait: return @@ -186,12 +185,11 @@ def wait_for_ip_type(self, elb_arn, ip_type): def wait_for_status(self, elb_arn): """ - Wait for load balancer to reach 'active' status + Wait for load balancer to reach 'active' status. :param elb_arn: The load balancer ARN :return: """ - if not self.wait: return @@ -203,12 +201,11 @@ def wait_for_status(self, elb_arn): def wait_for_deletion(self, elb_arn): """ - Wait for load balancer to reach 'active' status + Wait for load balancer to reach 'active' status. :param elb_arn: The load balancer ARN :return: """ - if not self.wait: return @@ -220,11 +217,10 @@ def wait_for_deletion(self, elb_arn): def get_elb_attributes(self): """ - Get load balancer attributes + Get load balancer attributes. :return: """ - try: attr_list = AWSRetry.jittered_backoff()(self.connection.describe_load_balancer_attributes)( LoadBalancerArn=self.elb["LoadBalancerArn"] @@ -239,11 +235,10 @@ def get_elb_attributes(self): def get_elb_ip_address_type(self): """ - Retrieve load balancer ip address type using describe_load_balancers + Retrieve load balancer ip address type using describe_load_balancers. :return: """ - return self.elb.get("IpAddressType", None) def update_elb_attributes(self): @@ -255,11 +250,10 @@ def update_elb_attributes(self): def get_elb_tags(self): """ - Get load balancer tags + Get load balancer tags. :return: """ - try: return AWSRetry.jittered_backoff()(self.connection.describe_tags)( ResourceArns=[self.elb["LoadBalancerArn"]] @@ -269,11 +263,10 @@ def get_elb_tags(self): def delete_tags(self, tags_to_delete): """ - Delete elb tags + Delete elb tags. :return: """ - try: AWSRetry.jittered_backoff()(self.connection.remove_tags)( ResourceArns=[self.elb["LoadBalancerArn"]], TagKeys=tags_to_delete @@ -285,11 +278,10 @@ def delete_tags(self, tags_to_delete): def modify_tags(self): """ - Modify elb tags + Modify elb tags. :return: """ - try: AWSRetry.jittered_backoff()(self.connection.add_tags)( ResourceArns=[self.elb["LoadBalancerArn"]], Tags=self.tags @@ -304,7 +296,6 @@ def delete(self): Delete elb :return: """ - try: AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)( LoadBalancerArn=self.elb["LoadBalancerArn"] @@ -318,11 +309,10 @@ def delete(self): def compare_subnets(self): """ - Compare user subnets with current ELB subnets + Compare user subnets with current ELB subnets. :return: bool True if they match otherwise False """ - subnet_mapping_id_list = [] subnet_mappings = [] @@ -356,7 +346,6 @@ def modify_subnets(self): Modify elb subnets to match module parameters :return: """ - try: AWSRetry.jittered_backoff()(self.connection.set_subnets)( LoadBalancerArn=self.elb["LoadBalancerArn"], Subnets=self.subnets @@ -371,7 +360,6 @@ def update(self): Update the elb from AWS :return: """ - self.elb = get_elb(self.connection, self.module, self.module.params.get("name")) self.elb["tags"] = self.get_elb_tags() @@ -420,7 +408,6 @@ def create_elb(self): Create a load balancer :return: """ - params = self._elb_create_params() try: @@ -485,9 +472,8 @@ def _elb_create_params(self): def compare_elb_attributes(self): """ Compare user attributes with current ELB attributes - :return: bool True if they match otherwise False + :return: bool True if they match otherwise False. """ - update_attributes = [] if ( self.access_logs_enabled is not None @@ -568,11 +554,10 @@ def compare_elb_attributes(self): def modify_elb_attributes(self): """ - Update Application ELB attributes if required + Update Application ELB attributes if required. :return: """ - update_attributes = [] if ( @@ -663,11 +648,10 @@ def modify_elb_attributes(self): def compare_security_groups(self): """ - Compare user security groups with current ELB security groups + Compare user security groups with current ELB security groups. :return: bool True if they match otherwise False """ - if set(self.elb["SecurityGroups"]) != set(self.security_groups): return False else: @@ -678,7 +662,6 @@ def modify_security_groups(self): Modify elb security groups to match module parameters :return: """ - try: AWSRetry.jittered_backoff()(self.connection.set_security_groups)( LoadBalancerArn=self.elb["LoadBalancerArn"], SecurityGroups=self.security_groups @@ -718,11 +701,10 @@ def _elb_create_params(self): def modify_elb_attributes(self): """ - Update Network ELB attributes if required + Update Network ELB attributes if required. :return: """ - update_attributes = [] if ( @@ -759,7 +741,6 @@ def modify_subnets(self): Modify elb subnets to match module parameters (unsupported for NLB) :return: """ - self.module.fail_json(msg="Modifying subnets and elastic IPs is not supported for Network Load Balancer") @@ -785,7 +766,7 @@ def __init__(self, connection, module, elb_arn): def update(self): """ - Update the listeners for the ELB + Update the listeners for the ELB. :return: """ @@ -793,11 +774,10 @@ def update(self): def _get_elb_listeners(self): """ - Get ELB listeners + Get ELB listeners. :return: """ - try: listener_paginator = self.connection.get_paginator("describe_listeners") return ( @@ -826,7 +806,6 @@ def _ensure_listeners_default_action_has_arn(self, listeners): :param listeners: a list of listener dicts :return: the same list of dicts ensuring that each listener DefaultActions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed. """ - if not listeners: listeners = [] @@ -846,10 +825,7 @@ def _ensure_listeners_default_action_has_arn(self, listeners): return fixed_listeners def compare_listeners(self): - """ - - :return: - """ + """:return:""" listeners_to_modify = [] listeners_to_delete = [] listeners_to_add = deepcopy(self.listeners) @@ -885,7 +861,6 @@ def _compare_listener(current_listener, new_listener): :param new_listener: :return: """ - modified_listener = {} # Port @@ -959,7 +934,6 @@ def __init__(self, connection, module, listener, elb_arn): :param listener: :param elb_arn: """ - self.connection = connection self.module = module self.listener = listener @@ -1032,7 +1006,6 @@ def _ensure_rules_action_has_arn(self, rules): :param rules: a list of rule dicts :return: the same list of dicts ensuring that each rule Actions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed. """ - fixed_rules = [] for rule in rules: fixed_actions = [] @@ -1063,7 +1036,6 @@ def _compare_condition(self, current_conditions, condition): :param condition: :return: """ - condition_found = False for current_condition in current_conditions: @@ -1126,11 +1098,7 @@ def _compare_condition(self, current_conditions, condition): return condition_found def _compare_rule(self, current_rule, new_rule): - """ - - :return: - """ - + """:return:""" modified_rule = {} # Priority @@ -1171,11 +1139,7 @@ def _compare_rule(self, current_rule, new_rule): return modified_rule def compare_rules(self): - """ - - :return: - """ - + """:return:""" rules_to_modify = [] rules_to_delete = [] rules_to_add = deepcopy(self.rules) @@ -1254,11 +1218,10 @@ def __init__(self, connection, module, rule, listener_arn): def create(self): """ - Create a listener rule + Create a listener rule. :return: """ - try: self.rule["ListenerArn"] = self.listener_arn self.rule["Priority"] = int(self.rule["Priority"]) @@ -1270,11 +1233,10 @@ def create(self): def modify(self): """ - Modify a listener rule + Modify a listener rule. :return: """ - try: del self.rule["Priority"] AWSRetry.jittered_backoff()(self.connection.modify_rule)(**self.rule) @@ -1285,11 +1247,10 @@ def modify(self): def delete(self): """ - Delete a listener rule + Delete a listener rule. :return: """ - try: AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule["RuleArn"]) except (BotoCoreError, ClientError) as e: @@ -1303,7 +1264,6 @@ def set_rule_priorities(self): :return: """ - try: rules = [self.rule] if isinstance(self.rule, list): diff --git a/plugins/module_utils/errors.py b/plugins/module_utils/errors.py index 38e9b380072..c64ebd021e5 100644 --- a/plugins/module_utils/errors.py +++ b/plugins/module_utils/errors.py @@ -14,14 +14,13 @@ class AWSErrorHandler: - - """_CUSTOM_EXCEPTION can be overridden by subclasses to customize the exception raised""" + """_CUSTOM_EXCEPTION can be overridden by subclasses to customize the exception raised.""" _CUSTOM_EXCEPTION = AnsibleAWSError @classmethod def _is_missing(cls): - """Should be overridden with a class method that returns the value from is_boto3_error_code (or similar)""" + """Should be overridden with a class method that returns the value from is_boto3_error_code (or similar).""" return type("NeverEverRaisedException", (Exception,), {}) @classmethod @@ -54,7 +53,7 @@ def list_error_handler(cls, description, default_value=None): """A simple error handler that catches the standard Boto3 exceptions and raises an AnsibleAWSError exception. Error codes representing a non-existent entity will result in None being returned - Generally used for Get/List calls where the exception just means the resource isn't there + Generally used for Get/List calls where the exception just means the resource isn't there. param: description: a description of the action being taken. Exception raised will include a message of @@ -82,7 +81,7 @@ def deletion_error_handler(cls, description): """A simple error handler that catches the standard Boto3 exceptions and raises an AnsibleAWSError exception. Error codes representing a non-existent entity will result in None being returned - Generally used in deletion calls where NoSuchEntity means it's already gone + Generally used in deletion calls where NoSuchEntity means it's already gone. param: description: a description of the action being taken. Exception raised will include a message of diff --git a/plugins/module_utils/iam.py b/plugins/module_utils/iam.py index 155a63152d1..bd0cfcf4ebd 100644 --- a/plugins/module_utils/iam.py +++ b/plugins/module_utils/iam.py @@ -216,14 +216,13 @@ def convert_managed_policy_names_to_arns(client, policy_names): def get_aws_account_id(module): - """Given an AnsibleAWSModule instance, get the active AWS account ID""" - + """Given an AnsibleAWSModule instance, get the active AWS account ID.""" return get_aws_account_info(module)[0] def get_aws_account_info(module): """Given an AnsibleAWSModule instance, return the account information - (account id and partition) we are currently working on + (account id and partition) we are currently working on. get_account_info tries too find out the account that we are working on. It's not guaranteed that this will be easy so we try in @@ -401,41 +400,41 @@ def validate_iam_identifiers(resource_type, name=None, path=None): def normalize_iam_mfa_device(device: BotoResource) -> AnsibleAWSResource: - """Converts IAM MFA Device from the CamelCase boto3 format to the snake_case Ansible format""" + """Converts IAM MFA Device from the CamelCase boto3 format to the snake_case Ansible format.""" # MFA Devices don't support Tags (as of 1.34.52) return boto3_resource_to_ansible_dict(device) def normalize_iam_mfa_devices(devices: BotoResourceList) -> AnsibleAWSResourceList: - """Converts a list of IAM MFA Devices from the CamelCase boto3 format to the snake_case Ansible format""" + """Converts a list of IAM MFA Devices from the CamelCase boto3 format to the snake_case Ansible format.""" # MFA Devices don't support Tags (as of 1.34.52) return boto3_resource_list_to_ansible_dict(devices) def normalize_iam_user(user: BotoResource) -> AnsibleAWSResource: - """Converts IAM users from the CamelCase boto3 format to the snake_case Ansible format""" + """Converts IAM users from the CamelCase boto3 format to the snake_case Ansible format.""" return boto3_resource_to_ansible_dict(user) def normalize_iam_policy(policy: BotoResource) -> AnsibleAWSResource: - """Converts IAM policies from the CamelCase boto3 format to the snake_case Ansible format""" + """Converts IAM policies from the CamelCase boto3 format to the snake_case Ansible format.""" return boto3_resource_to_ansible_dict(policy) def normalize_iam_group(group: BotoResource) -> AnsibleAWSResource: - """Converts IAM Groups from the CamelCase boto3 format to the snake_case Ansible format""" + """Converts IAM Groups from the CamelCase boto3 format to the snake_case Ansible format.""" # Groups don't support Tags (as of 1.34.52) return boto3_resource_to_ansible_dict(group, force_tags=False) def normalize_iam_access_key(access_key: BotoResource) -> AnsibleAWSResource: - """Converts IAM access keys from the CamelCase boto3 format to the snake_case Ansible format""" + """Converts IAM access keys from the CamelCase boto3 format to the snake_case Ansible format.""" # Access Keys don't support Tags (as of 1.34.52) return boto3_resource_to_ansible_dict(access_key, force_tags=False) def normalize_iam_access_keys(access_keys: BotoResourceList) -> AnsibleAWSResourceList: - """Converts a list of IAM access keys from the CamelCase boto3 format to the snake_case Ansible format""" + """Converts a list of IAM access keys from the CamelCase boto3 format to the snake_case Ansible format.""" # Access Keys don't support Tags (as of 1.34.52) if not access_keys: return access_keys @@ -444,9 +443,7 @@ def normalize_iam_access_keys(access_keys: BotoResourceList) -> AnsibleAWSResour def normalize_iam_instance_profile(profile: BotoResource) -> AnsibleAWSResource: - """ - Converts a boto3 format IAM instance profile into "Ansible" format - """ + """Converts a boto3 format IAM instance profile into "Ansible" format.""" transforms = {"Roles": _normalize_iam_roles} transformed_profile = boto3_resource_to_ansible_dict(profile, nested_transforms=transforms) return transformed_profile @@ -454,7 +451,7 @@ def normalize_iam_instance_profile(profile: BotoResource) -> AnsibleAWSResource: def normalize_iam_role(role: BotoResource, _v7_compat: bool = False) -> AnsibleAWSResource: """ - Converts a boto3 format IAM instance role into "Ansible" format + Converts a boto3 format IAM instance role into "Ansible" format. _v7_compat is deprecated and will be removed in release after 2026-05-01 DO NOT USE. """ diff --git a/plugins/module_utils/modules.py b/plugins/module_utils/modules.py index 82a81811d09..99f3eb6a80b 100644 --- a/plugins/module_utils/modules.py +++ b/plugins/module_utils/modules.py @@ -3,7 +3,7 @@ # Copyright 2017 Michael De La Rue | Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -"""This module adds shared support for generic Amazon AWS modules +"""This module adds shared support for generic Amazon AWS modules. In order to use this module, include it as part of a custom module as shown below. @@ -69,7 +69,7 @@ class AnsibleAWSModule: - """An ansible module class for AWS modules + """An ansible module class for AWS modules. AnsibleAWSModule provides an a class for building modules which connect to Amazon Web Services. The interface is currently more @@ -209,7 +209,7 @@ def region(self): return get_aws_region(self) def fail_json_aws(self, exception, msg=None, **kwargs): - """call fail_json with processed exception + """Call fail_json with processed exception. function for converting exceptions thrown by AWS SDK modules, botocore, boto3 and boto, into nice error messages. @@ -243,13 +243,13 @@ def fail_json_aws(self, exception, msg=None, **kwargs): self.fail_json(**failure) def fail_json_aws_error(self, exception): - """A helper to call the right failure mode after catching an AnsibleAWSError""" + """A helper to call the right failure mode after catching an AnsibleAWSError.""" if exception.exception: self.fail_json_aws(exception.exception, msg=exception.message) self.fail_json(msg=exception.message) def _gather_versions(self): - """Gather AWS SDK (boto3 and botocore) dependency versions + """Gather AWS SDK (boto3 and botocore) dependency versions. Returns {'boto3_version': str, 'botocore_version': str} Returns {} if either is not installed @@ -366,9 +366,7 @@ def _aws_common_argument_spec(): def aws_argument_spec(): - """ - Returns a dictionary containing the argument_spec common to all AWS modules. - """ + """Returns a dictionary containing the argument_spec common to all AWS modules.""" region_spec = dict( region=dict( aliases=["aws_region", "ec2_region"], diff --git a/plugins/module_utils/policy.py b/plugins/module_utils/policy.py index 61b5edc1c98..36669d5b5f9 100644 --- a/plugins/module_utils/policy.py +++ b/plugins/module_utils/policy.py @@ -44,9 +44,7 @@ def _canonify_root_arn(arn): def _canonify_policy_dict_item(item, key): - """ - Converts special cases where there are multiple ways to write the same thing into a single form - """ + """Converts special cases where there are multiple ways to write the same thing into a single form.""" # There are multiple ways to specify anonymous principals # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-anonymous if key in ["NotPrincipal", "Principal"]: @@ -77,7 +75,7 @@ def _hashable_policy(policy, policy_list): ('Effect', ('Allow',)), ('Principal', ('AWS', (('arn:aws:iam::XXXXXXXXXXXX:user/username1',), ('arn:aws:iam::XXXXXXXXXXXX:user/username2',)))), ('Resource', ('arn:aws:s3:::test_policy/*',)), ('Sid', ('AddCannedAcl2',)))), - ('Version', ('2012-10-17',)))] + ('Version', ('2012-10-17',)))]. """ # Amazon will automatically convert bool and int to strings for us diff --git a/plugins/module_utils/rds.py b/plugins/module_utils/rds.py index 20e0ae5e083..5503bdb2adb 100644 --- a/plugins/module_utils/rds.py +++ b/plugins/module_utils/rds.py @@ -407,7 +407,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): def compare_iam_roles(existing_roles, target_roles, purge_roles): """ - Returns differences between target and existing IAM roles + Returns differences between target and existing IAM roles. Parameters: existing_roles (list): Existing IAM roles @@ -426,7 +426,7 @@ def compare_iam_roles(existing_roles, target_roles, purge_roles): def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove): """ - Update a DB instance's associated IAM roles + Update a DB instance's associated IAM roles. Parameters: client: RDS client diff --git a/plugins/module_utils/s3.py b/plugins/module_utils/s3.py index 961f36f22f0..c9b0ee3e62e 100644 --- a/plugins/module_utils/s3.py +++ b/plugins/module_utils/s3.py @@ -102,7 +102,7 @@ def validate_bucket_name(name): # Spot special case of fakes3. def is_fakes3(url): - """Return True if endpoint_url has scheme fakes3://""" + """Return True if endpoint_url has scheme fakes3://.""" result = False if url is not None: result = urlparse(url).scheme in ("fakes3", "fakes3s") diff --git a/plugins/module_utils/tagging.py b/plugins/module_utils/tagging.py index 9201c8979c0..4ccf4f39d8f 100644 --- a/plugins/module_utils/tagging.py +++ b/plugins/module_utils/tagging.py @@ -52,9 +52,8 @@ def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_ Dict: Dict of key:value pairs representing AWS tags { 'MyTagKey': 'MyTagValue', - } + }. """ - if tag_name_key_name and tag_value_key_name: tag_candidates = {tag_name_key_name: tag_value_key_name} else: @@ -70,7 +69,7 @@ def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_ def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name="Key", tag_value_key_name="Value"): - """Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts + """Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts. Note: booleans are converted to their Capitalized text form ("True" and "False"), this is different to ansible_dict_to_boto3_filter_list because historically we've used "to_text()" and @@ -96,7 +95,6 @@ def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name="Key", tag_value } ] """ - if not tags_dict: return [] @@ -191,7 +189,6 @@ def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True): :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty """ - tag_key_value_pairs_to_set = {} tag_keys_to_unset = [] diff --git a/plugins/module_utils/transformation.py b/plugins/module_utils/transformation.py index a5bc23607df..012de9ec633 100644 --- a/plugins/module_utils/transformation.py +++ b/plugins/module_utils/transformation.py @@ -68,9 +68,8 @@ def ansible_dict_to_boto3_filter_list(filters_dict): 'i-01234567', ] } - ] + ]. """ - filters_list = [] for k, v in filters_dict.items(): filter_dict = {"Name": k} @@ -103,7 +102,6 @@ def map_complex_type(complex_type, type_map): This ensures all keys within the root element are casted and valid integers """ - if complex_type is None: return new_type = type(complex_type)() @@ -126,7 +124,7 @@ def map_complex_type(complex_type, type_map): def scrub_none_parameters(parameters, descend_into_lists=True): """ - Iterate over a dictionary removing any keys that have a None value + Iterate over a dictionary removing any keys that have a None value. Reference: https://github.com/ansible-collections/community.aws/issues/251 Credit: https://medium.com/better-programming/how-to-remove-null-none-values-from-a-dictionary-in-python-1bedf1aab5e4 @@ -135,7 +133,6 @@ def scrub_none_parameters(parameters, descend_into_lists=True): :param parameters: parameter dict :return: parameter dict with all keys = None removed """ - clean_parameters = {} for k, v in parameters.items(): diff --git a/plugins/module_utils/waf.py b/plugins/module_utils/waf.py index 5e1cf107174..33b78e2743f 100644 --- a/plugins/module_utils/waf.py +++ b/plugins/module_utils/waf.py @@ -27,9 +27,7 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" -This module adds shared support for Web Application Firewall modules -""" +"""This module adds shared support for Web Application Firewall modules.""" try: import botocore diff --git a/plugins/modules/autoscaling_group.py b/plugins/modules/autoscaling_group.py index 378c84405dd..0b6722b7a2c 100644 --- a/plugins/modules/autoscaling_group.py +++ b/plugins/modules/autoscaling_group.py @@ -795,7 +795,8 @@ def detach_asg_instances(connection, instance_ids, as_group_name, decrement_capa def enforce_required_arguments_for_create(): """As many arguments are not required for autoscale group deletion they cannot be mandatory arguments for the module, so we enforce - them here""" + them here. + """ missing_args = [] if module.params.get("launch_config_name") is None and module.params.get("launch_template") is None: module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create") diff --git a/plugins/modules/autoscaling_group_info.py b/plugins/modules/autoscaling_group_info.py index 59884bfc639..4d4d4dc015a 100644 --- a/plugins/modules/autoscaling_group_info.py +++ b/plugins/modules/autoscaling_group_info.py @@ -369,7 +369,6 @@ def find_asgs(conn, module, name=None, tags=None): } ] """ - try: asgs_paginator = conn.get_paginator("describe_auto_scaling_groups") asgs = asgs_paginator.paginate().build_full_result() diff --git a/plugins/modules/backup_plan.py b/plugins/modules/backup_plan.py index 1dd672933bc..24605910ba5 100644 --- a/plugins/modules/backup_plan.py +++ b/plugins/modules/backup_plan.py @@ -566,7 +566,6 @@ def tag_backup_plan( plan_arn : The ARN of the Backup Plan to operate on curr_tags : Dict of the current tags on resource, if any """ - if not new_tags and not current_tags: return False @@ -597,7 +596,7 @@ def tag_backup_plan( def delete_backup_plan(module: AnsibleAWSModule, client, backup_plan_id: str) -> dict: """ - Deletes a Backup Plan + Deletes a Backup Plan. module : AnsibleAWSModule object client : boto3 backup client connection object diff --git a/plugins/modules/backup_vault.py b/plugins/modules/backup_vault.py index ab408df3c4a..9a23f54f43f 100644 --- a/plugins/modules/backup_vault.py +++ b/plugins/modules/backup_vault.py @@ -107,7 +107,7 @@ def create_backup_vault(module, client, params): """ - Creates a Backup Vault + Creates a Backup Vault. module : AnsibleAWSModule object client : boto3 client connection object @@ -127,7 +127,7 @@ def create_backup_vault(module, client, params): def tag_vault(module, client, tags, vault_arn, curr_tags=None, purge_tags=True): """ - Creates, updates, removes tags on a Backup Vault resource + Creates, updates, removes tags on a Backup Vault resource. module : AnsibleAWSModule object client : boto3 client connection object @@ -136,7 +136,6 @@ def tag_vault(module, client, tags, vault_arn, curr_tags=None, purge_tags=True): curr_tags : Dict of the current tags on resource, if any purge_tags : true/false to determine if current tags will be retained or not """ - if tags is None: return False @@ -166,7 +165,7 @@ def tag_vault(module, client, tags, vault_arn, curr_tags=None, purge_tags=True): def get_vault_facts(module, client, vault_name): """ - Describes existing vault in an account + Describes existing vault in an account. module : AnsibleAWSModule object client : boto3 client connection object @@ -196,7 +195,7 @@ def get_vault_facts(module, client, vault_name): def delete_backup_vault(module, client, vault_name): """ - Delete a Backup Vault + Delete a Backup Vault. module : AnsibleAWSModule object client : boto3 client connection object diff --git a/plugins/modules/cloudformation.py b/plugins/modules/cloudformation.py index c24b3d55bf6..5a01061cbb5 100644 --- a/plugins/modules/cloudformation.py +++ b/plugins/modules/cloudformation.py @@ -511,7 +511,7 @@ def update_stack(module, stack_params, cfn, events_limit): def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state): - """updates termination protection of a stack""" + """Updates termination protection of a stack.""" stack = get_stack_facts(module, cfn, stack_name) if stack: if stack["EnableTerminationProtection"] is not desired_termination_protection_state: @@ -526,7 +526,7 @@ def update_termination_protection(module, cfn, stack_name, desired_termination_p def stack_operation(module, cfn, stack_name, operation, events_limit, op_token=None): - """gets the status of a stack while it is created/updated/deleted""" + """Gets the status of a stack while it is created/updated/deleted.""" existed = [] while True: try: diff --git a/plugins/modules/cloudformation_info.py b/plugins/modules/cloudformation_info.py index afc02e4ab40..28c5fe19452 100644 --- a/plugins/modules/cloudformation_info.py +++ b/plugins/modules/cloudformation_info.py @@ -303,7 +303,7 @@ class CloudFormationServiceManager: - """Handles CloudFormation Services""" + """Handles CloudFormation Services.""" def __init__(self, module): self.module = module @@ -402,7 +402,7 @@ def get_template(self, stack_name): def to_dict(items, key, value): - """Transforms a list of items to a Key/Value dictionary""" + """Transforms a list of items to a Key/Value dictionary.""" if items: return dict(zip([i.get(key) for i in items], [i.get(value) for i in items])) else: diff --git a/plugins/modules/cloudtrail.py b/plugins/modules/cloudtrail.py index 45c97cc8abd..82d5e7d9049 100644 --- a/plugins/modules/cloudtrail.py +++ b/plugins/modules/cloudtrail.py @@ -270,7 +270,7 @@ def get_kms_key_aliases(module, client, keyId): """ - get list of key aliases + get list of key aliases. module : AnsibleAWSModule object client : boto3 client connection object for kms @@ -288,7 +288,7 @@ def get_kms_key_aliases(module, client, keyId): def create_trail(module, client, ct_params): """ - Creates a CloudTrail + Creates a CloudTrail. module : AnsibleAWSModule object client : boto3 client connection object @@ -305,7 +305,7 @@ def create_trail(module, client, ct_params): def tag_trail(module, client, tags, trail_arn, curr_tags=None, purge_tags=True): """ - Creates, updates, removes tags on a CloudTrail resource + Creates, updates, removes tags on a CloudTrail resource. module : AnsibleAWSModule object client : boto3 client connection object @@ -314,7 +314,6 @@ def tag_trail(module, client, tags, trail_arn, curr_tags=None, purge_tags=True): curr_tags : Dict of the current tags on resource, if any dry_run : true/false to determine if changes will be made if needed """ - if tags is None: return False @@ -347,7 +346,7 @@ def tag_trail(module, client, tags, trail_arn, curr_tags=None, purge_tags=True): def set_logging(module, client, name, action): """ - Starts or stops logging based on given state + Starts or stops logging based on given state. module : AnsibleAWSModule object client : boto3 client connection object @@ -372,7 +371,7 @@ def set_logging(module, client, name, action): def get_trail_facts(module, client, name): """ - Describes existing trail in an account + Describes existing trail in an account. module : AnsibleAWSModule object client : boto3 client connection object @@ -417,7 +416,7 @@ def get_trail_facts(module, client, name): def delete_trail(module, client, trail_arn): """ - Delete a CloudTrail + Delete a CloudTrail. module : AnsibleAWSModule object client : boto3 client connection object @@ -431,7 +430,7 @@ def delete_trail(module, client, trail_arn): def update_trail(module, client, ct_params): """ - Delete a CloudTrail + Delete a CloudTrail. module : AnsibleAWSModule object client : boto3 client connection object diff --git a/plugins/modules/cloudwatchevent_rule.py b/plugins/modules/cloudwatchevent_rule.py index 800edb1e2b3..d0566856f30 100644 --- a/plugins/modules/cloudwatchevent_rule.py +++ b/plugins/modules/cloudwatchevent_rule.py @@ -262,7 +262,7 @@ def __init__( self.module = module def describe(self): - """Returns the existing details of the rule in AWS""" + """Returns the existing details of the rule in AWS.""" try: rule_info = self.client.describe_rule(Name=self.name) except is_boto3_error_code("ResourceNotFoundException"): @@ -275,7 +275,7 @@ def describe(self): return camel_dict_to_snake_dict(rule_info) def put(self, enabled=True): - """Creates or updates the rule in AWS""" + """Creates or updates the rule in AWS.""" request = { "Name": self.name, "State": "ENABLED" if enabled else "DISABLED", @@ -296,7 +296,7 @@ def put(self, enabled=True): return response def delete(self): - """Deletes the rule in AWS""" + """Deletes the rule in AWS.""" self.remove_all_targets() try: @@ -307,7 +307,7 @@ def delete(self): return response def enable(self): - """Enables the rule in AWS""" + """Enables the rule in AWS.""" try: response = self.client.enable_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -316,7 +316,7 @@ def enable(self): return response def disable(self): - """Disables the rule in AWS""" + """Disables the rule in AWS.""" try: response = self.client.disable_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -325,7 +325,7 @@ def disable(self): return response def list_targets(self): - """Lists the existing targets for the rule in AWS""" + """Lists the existing targets for the rule in AWS.""" try: targets = self.client.list_targets_by_rule(Rule=self.name) except is_boto3_error_code("ResourceNotFoundException"): @@ -338,7 +338,7 @@ def list_targets(self): return camel_dict_to_snake_dict(targets)["targets"] def put_targets(self, targets): - """Creates or updates the provided targets on the rule in AWS""" + """Creates or updates the provided targets on the rule in AWS.""" if not targets: return request = { @@ -353,7 +353,7 @@ def put_targets(self, targets): return response def remove_targets(self, target_ids): - """Removes the provided targets from the rule in AWS""" + """Removes the provided targets from the rule in AWS.""" if not target_ids: return request = {"Rule": self.name, "Ids": target_ids} @@ -365,12 +365,12 @@ def remove_targets(self, target_ids): return response def remove_all_targets(self): - """Removes all targets on rule""" + """Removes all targets on rule.""" targets = self.list_targets() return self.remove_targets([t["id"] for t in targets]) def _targets_request(self, targets): - """Formats each target for the request""" + """Formats each target for the request.""" targets_request = [] for target in targets: target_request = scrub_none_parameters(snake_dict_to_camel_dict(target, True)) @@ -395,7 +395,7 @@ def __init__(self, rule, targets): self.targets = targets def ensure_present(self, enabled=True): - """Ensures the rule and targets are present and synced""" + """Ensures the rule and targets are present and synced.""" rule_description = self.rule.describe() if rule_description: # Rule exists so update rule, targets and state @@ -407,11 +407,11 @@ def ensure_present(self, enabled=True): self._create(enabled) def ensure_disabled(self): - """Ensures the rule and targets are present, but disabled, and synced""" + """Ensures the rule and targets are present, but disabled, and synced.""" self.ensure_present(enabled=False) def ensure_absent(self): - """Ensures the rule and targets are absent""" + """Ensures the rule and targets are absent.""" rule_description = self.rule.describe() if not rule_description: # Rule doesn't exist so don't need to delete @@ -419,7 +419,7 @@ def ensure_absent(self): self.rule.delete() def fetch_aws_state(self): - """Retrieves rule and target state from AWS""" + """Retrieves rule and target state from AWS.""" aws_state = {"rule": {}, "targets": [], "changed": self.rule.changed} rule_description = self.rule.describe() if not rule_description: @@ -433,12 +433,12 @@ def fetch_aws_state(self): return aws_state def _sync_rule(self, enabled=True): - """Syncs local rule state with AWS""" + """Syncs local rule state with AWS.""" if not self._rule_matches_aws(): self.rule.put(enabled) def _sync_targets(self): - """Syncs local targets with AWS""" + """Syncs local targets with AWS.""" # Identify and remove extraneous targets on AWS target_ids_to_remove = self._remote_target_ids_to_remove() if target_ids_to_remove: @@ -450,7 +450,7 @@ def _sync_targets(self): self.rule.put_targets(targets_to_put) def _sync_state(self, enabled=True): - """Syncs local rule state with AWS""" + """Syncs local rule state with AWS.""" remote_state = self._remote_state() if enabled and remote_state != "ENABLED": self.rule.enable() @@ -458,12 +458,12 @@ def _sync_state(self, enabled=True): self.rule.disable() def _create(self, enabled=True): - """Creates rule and targets on AWS""" + """Creates rule and targets on AWS.""" self.rule.put(enabled) self.rule.put_targets(self.targets) def _rule_matches_aws(self): - """Checks if the local rule data matches AWS""" + """Checks if the local rule data matches AWS.""" aws_rule_data = self.rule.describe() # The rule matches AWS only if all rule data fields are equal @@ -471,7 +471,7 @@ def _rule_matches_aws(self): return all(getattr(self.rule, field) == aws_rule_data.get(field, None) for field in self.RULE_FIELDS) def _targets_to_put(self): - """Returns a list of targets that need to be updated or added remotely""" + """Returns a list of targets that need to be updated or added remotely.""" remote_targets = self.rule.list_targets() # keys with none values must be scrubbed off of self.targets @@ -495,13 +495,13 @@ def _targets_to_put(self): return [t for t in self.targets if camel_dict_to_snake_dict(t) not in remote_targets] def _remote_target_ids_to_remove(self): - """Returns a list of targets that need to be removed remotely""" + """Returns a list of targets that need to be removed remotely.""" target_ids = [t["id"] for t in self.targets] remote_targets = self.rule.list_targets() return [rt["id"] for rt in remote_targets if rt["id"] not in target_ids] def _remote_state(self): - """Returns the remote state from AWS""" + """Returns the remote state from AWS.""" description = self.rule.describe() if not description: return diff --git a/plugins/modules/ec2_ami.py b/plugins/modules/ec2_ami.py index 699f22680c3..c20ce6f5883 100644 --- a/plugins/modules/ec2_ami.py +++ b/plugins/modules/ec2_ami.py @@ -655,7 +655,7 @@ def timeout(connection, image_id, wait_timeout): @classmethod def do(cls, module, connection, image_id): - """Entry point to deregister an image""" + """Entry point to deregister an image.""" delete_snapshot = module.params.get("delete_snapshot") wait = module.params.get("wait") wait_timeout = module.params.get("wait_timeout") @@ -778,7 +778,7 @@ def set_description(connection, module, image, description): @classmethod def do(cls, module, connection, image_id): - """Entry point to update an image""" + """Entry point to update an image.""" launch_permissions = module.params.get("launch_permissions") # remove any keys with value=None if launch_permissions: @@ -947,7 +947,7 @@ def build_create_image_parameters(**kwargs): @classmethod def do(cls, module, connection, _image_id): - """Entry point to create image""" + """Entry point to create image.""" create_image_parameters = cls.build_create_image_parameters(**module.params) func = cls.create_or_register(connection, create_image_parameters) diff --git a/plugins/modules/ec2_eip.py b/plugins/modules/ec2_eip.py index 52080ff3685..a7d6788fc66 100644 --- a/plugins/modules/ec2_eip.py +++ b/plugins/modules/ec2_eip.py @@ -297,7 +297,7 @@ def disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_i @AWSRetry.jittered_backoff() def find_address(ec2, module, public_ip, device_id, is_instance=True): - """Find an existing Elastic IP address""" + """Find an existing Elastic IP address.""" filters = [] kwargs = {} @@ -331,7 +331,7 @@ def find_address(ec2, module, public_ip, device_id, is_instance=True): def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True): - """Check if the elastic IP is currently associated with the device""" + """Check if the elastic IP is currently associated with the device.""" address = find_address(ec2, module, address["PublicIp"], device_id, is_instance) if address: if is_instance: @@ -353,7 +353,7 @@ def allocate_address( search_tags=None, public_ipv4_pool=None, ): - """Allocate a new elastic IP address (when needed) and return it""" + """Allocate a new elastic IP address (when needed) and return it.""" if not domain: domain = "standard" @@ -405,8 +405,7 @@ def allocate_address( def release_address(ec2, module, address, check_mode): - """Release a previously allocated elastic IP address""" - + """Release a previously allocated elastic IP address.""" # If we're in check mode, nothing else to do if not check_mode: try: @@ -426,8 +425,7 @@ def describe_eni_with_backoff(ec2, module, device_id): def find_device(ec2, module, device_id, is_instance=True): - """Attempt to find the EC2 instance and return it""" - + """Attempt to find the EC2 instance and return it.""" if is_instance: try: paginator = ec2.get_paginator("describe_instances") @@ -526,7 +524,7 @@ def allocate_address_from_pool( tags, ): # type: (EC2Connection, AnsibleAWSModule, str, bool, str) -> Address - """Overrides botocore's allocate_address function to support BYOIP""" + """Overrides botocore's allocate_address function to support BYOIP.""" if check_mode: return None @@ -550,7 +548,7 @@ def allocate_address_from_pool( def generate_tag_dict(module, tag_name, tag_value): # type: (AnsibleAWSModule, str, str) -> Optional[Dict] - """Generates a dictionary to be passed as a filter to Amazon""" + """Generates a dictionary to be passed as a filter to Amazon.""" if tag_name and not tag_value: if tag_name.startswith("tag:"): tag_name = tag_name.strip("tag:") diff --git a/plugins/modules/ec2_import_image.py b/plugins/modules/ec2_import_image.py index 7ace4c7aa6b..9dcef4a60a3 100644 --- a/plugins/modules/ec2_import_image.py +++ b/plugins/modules/ec2_import_image.py @@ -331,10 +331,7 @@ def ensure_ec2_import_image_result(import_image_info): def absent(client, module): - """ - Cancel an in-process import virtual machine - """ - + """Cancel an in-process import virtual machine.""" filters = { "Filters": [ {"Name": "tag:Name", "Values": [module.params["task_name"]]}, diff --git a/plugins/modules/ec2_instance.py b/plugins/modules/ec2_instance.py index b853dc544d5..eea5f387eda 100644 --- a/plugins/modules/ec2_instance.py +++ b/plugins/modules/ec2_instance.py @@ -1311,9 +1311,8 @@ def build_network_spec(params): ], 'SecondaryPrivateIpAddressCount': 123, 'SubnetId': 'string' - }, + },. """ - interfaces = [] network = params.get("network") or {} if not network.get("interfaces"): @@ -1669,8 +1668,7 @@ def await_instances(ids, desired_module_state="present", force_wait=False): def diff_instance_and_params(instance, params, skip=None): - """boto3 instance obj, module params""" - + """boto3 instance obj, module params.""" if skip is None: skip = [] @@ -1888,9 +1886,7 @@ def get_default_subnet(vpc, availability_zone=None): def ensure_instance_state(desired_module_state, filters): - """ - Sets return keys depending on the desired instance state - """ + """Sets return keys depending on the desired instance state.""" results = dict() changed = False if desired_module_state in ("running", "started"): diff --git a/plugins/modules/ec2_key.py b/plugins/modules/ec2_key.py index f2e98e069b4..2b0c99a5c13 100644 --- a/plugins/modules/ec2_key.py +++ b/plugins/modules/ec2_key.py @@ -222,7 +222,7 @@ def get_key_fingerprint(check_mode, ec2_client, key_material): EC2's fingerprints are non-trivial to generate, so push this key to a temporary name and make ec2 calculate the fingerprint for us. http://blog.jbrowne.com/?p=23 - https://forums.aws.amazon.com/thread.jspa?messageID=352828 + https://forums.aws.amazon.com/thread.jspa?messageID=352828. """ # find an unused name name_in_use = True @@ -283,9 +283,7 @@ def _write_private_key(key_data, file_name): def create_new_key_pair(ec2_client, name, key_material, key_type, tags, file_name, check_mode): - """ - key does not exist, we create new key - """ + """Key does not exist, we create new key.""" if check_mode: return {"changed": True, "key": None, "msg": "key pair created"} diff --git a/plugins/modules/ec2_metadata_facts.py b/plugins/modules/ec2_metadata_facts.py index 83fdd441783..a724380f975 100644 --- a/plugins/modules/ec2_metadata_facts.py +++ b/plugins/modules/ec2_metadata_facts.py @@ -598,7 +598,7 @@ def fetch(self, uri, recurse=True): self._data["%s" % (new_uri)] = content # not a stringified JSON string def fix_invalid_varnames(self, data): - """Change ':'' and '-' to '_' to ensure valid template variable names""" + """Change ':'' and '-' to '_' to ensure valid template variable names.""" new_data = data.copy() for key, value in data.items(): if ":" in key or "-" in key: @@ -609,7 +609,7 @@ def fix_invalid_varnames(self, data): return new_data def fetch_session_token(self, uri_token): - """Used to get a session token for IMDSv2""" + """Used to get a session token for IMDSv2.""" headers = {"X-aws-ec2-metadata-token-ttl-seconds": "60"} response, info = fetch_url(self.module, uri_token, method="PUT", headers=headers, force=True) diff --git a/plugins/modules/ec2_security_group.py b/plugins/modules/ec2_security_group.py index c339370fa83..0f05a9a598e 100644 --- a/plugins/modules/ec2_security_group.py +++ b/plugins/modules/ec2_security_group.py @@ -618,7 +618,7 @@ def fail(self, module): def rule_cmp(a, b): - """Compare rules without descriptions""" + """Compare rules without descriptions.""" for prop in ["port_range", "protocol", "target", "target_type"]: if prop == "port_range" and to_text(a.protocol) == to_text(b.protocol): # equal protocols can interchange `(-1, -1)` and `(None, None)` @@ -765,7 +765,7 @@ def sg_exists_with_backoff(client, **kwargs): def deduplicate_rules_args(rules): - """Returns unique rules""" + """Returns unique rules.""" if rules is None: return None return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) @@ -931,7 +931,7 @@ def _strip_rule(rule): """ Returns a copy of the rule with the Target/Source and Port information from a rule stripped out. - This can then be combined with the expanded information + This can then be combined with the expanded information. """ stripped_rule = deepcopy(rule) # Get just the non-source/port info from the rule diff --git a/plugins/modules/ec2_spot_instance.py b/plugins/modules/ec2_spot_instance.py index 246e1fe157a..f977f980e7b 100644 --- a/plugins/modules/ec2_spot_instance.py +++ b/plugins/modules/ec2_spot_instance.py @@ -431,7 +431,7 @@ def build_launch_specification(launch_spec): block_device_mappings monitoring placement - iam_instance_profile + iam_instance_profile. """ assigned_keys = dict((k, v) for k, v in launch_spec.items() if v is not None) diff --git a/plugins/modules/ec2_vol.py b/plugins/modules/ec2_vol.py index e68c978c41c..f34fb486304 100644 --- a/plugins/modules/ec2_vol.py +++ b/plugins/modules/ec2_vol.py @@ -602,8 +602,7 @@ def attach_volume(module, ec2_conn, volume_dict, instance_dict, device_name): def modify_dot_attribute(module, ec2_conn, instance_dict, device_name): - """Modify delete_on_termination attribute""" - + """Modify delete_on_termination attribute.""" delete_on_termination = module.params.get("delete_on_termination") changed = False diff --git a/plugins/modules/ec2_vpc_dhcp_option.py b/plugins/modules/ec2_vpc_dhcp_option.py index 305a9814984..98739200acd 100644 --- a/plugins/modules/ec2_vpc_dhcp_option.py +++ b/plugins/modules/ec2_vpc_dhcp_option.py @@ -293,7 +293,7 @@ def remove_dhcp_options_by_id(client, module, dhcp_options_id): def match_dhcp_options(client, module, new_config): """ Returns a DhcpOptionsId if the module parameters match; else None - Filter by tags, if any are specified + Filter by tags, if any are specified. """ try: all_dhcp_options = client.describe_dhcp_options(aws_retry=True) @@ -320,7 +320,7 @@ def create_dhcp_config(module): 'Values': [{'Value': 'us-west-2.compute.internal'}]}, {'Key': 'domain-name-servers', 'Values': [{'Value': 'AmazonProvidedDNS'}]}, - ...], + ...],. """ new_config = [] params = module.params @@ -389,7 +389,7 @@ def find_opt_index(config, option): def inherit_dhcp_config(existing_config, new_config): """ - Compare two DhcpConfigurations lists and apply existing options to unset parameters + Compare two DhcpConfigurations lists and apply existing options to unset parameters. If there's an existing option config and the new option is not set or it's none, inherit the existing config. diff --git a/plugins/modules/ec2_vpc_igw.py b/plugins/modules/ec2_vpc_igw.py index b8d468aba41..c0934232652 100644 --- a/plugins/modules/ec2_vpc_igw.py +++ b/plugins/modules/ec2_vpc_igw.py @@ -191,7 +191,7 @@ def get_matching_igw(self, vpc_id, gateway_id=None): vpc_id (str): VPC ID gateway_id (str): Internet Gateway ID, if specified Returns: - igw (dict): dict of igw found, None if none found + igw (dict): dict of igw found, None if none found. """ try: # If we know the gateway_id, use it to avoid bugs with using filters @@ -218,7 +218,7 @@ def get_matching_vpc(self, vpc_id): Parameters: vpc_id (str): VPC ID Returns: - vpc (dict): dict of vpc found, None if none found + vpc (dict): dict of vpc found, None if none found. """ try: vpcs = describe_vpcs_with_backoff(self._connection, VpcIds=[vpc_id]) diff --git a/plugins/modules/ec2_vpc_nat_gateway.py b/plugins/modules/ec2_vpc_nat_gateway.py index fbc15292f16..f87a475233e 100644 --- a/plugins/modules/ec2_vpc_nat_gateway.py +++ b/plugins/modules/ec2_vpc_nat_gateway.py @@ -334,7 +334,7 @@ def get_nat_gateways(client, module, subnet_id=None, nat_gateway_id=None, states """Retrieve a list of NAT Gateways Args: client (botocore.client.EC2): Boto3 client - module: AnsibleAWSModule class instance + module: AnsibleAWSModule class instance. Kwargs: subnet_id (str): The subnet_id the nat resides in. @@ -370,7 +370,6 @@ def get_nat_gateways(client, module, subnet_id=None, nat_gateway_id=None, states Returns: list """ - params = dict() existing_gateways = list() @@ -438,7 +437,6 @@ def gateway_in_subnet_exists(client, module, subnet_id, allocation_id=None): Returns: Tuple (list, bool) """ - allocation_id_exists = False gateways = [] states = ["available", "pending"] @@ -477,7 +475,6 @@ def get_eip_allocation_id_by_address(client, module, eip_address): Returns: Tuple (str, str) """ - params = { "PublicIps": [eip_address], } @@ -516,7 +513,7 @@ def allocate_eip_address(client, module): """Release an EIP from your EIP Pool Args: client (botocore.client.EC2): Boto3 client - module: AnsibleAWSModule class instance + module: AnsibleAWSModule class instance. Basic Usage: >>> client = boto3.client('ec2') @@ -529,7 +526,6 @@ def allocate_eip_address(client, module): Returns: Tuple (bool, str, str) """ - new_eip = None msg = "" params = { @@ -570,7 +566,6 @@ def release_address(client, module, allocation_id): Returns: Tuple (bool, str) """ - msg = "" if module.check_mode: @@ -610,7 +605,7 @@ def create(client, module, subnet_id, allocation_id, tags, client_token=None, wa connectivity_type (str): public or private connectivity support tags (dict): Tags to associate to the NAT gateway purge_tags (bool): If true, remove tags not listed in I(tags) - type: bool + type: bool. Kwargs: wait (bool): Wait for the nat to be in the deleted state before returning. @@ -649,7 +644,6 @@ def create(client, module, subnet_id, allocation_id, tags, client_token=None, wa Returns: Tuple (bool, str, list) """ - params = {"SubnetId": subnet_id, "ConnectivityType": connectivity_type} if connectivity_type == "public": @@ -724,7 +718,7 @@ def pre_create( module: AnsibleAWSModule class instance subnet_id (str): The subnet_id the nat resides in tags (dict): Tags to associate to the NAT gateway - purge_tags (bool): If true, remove tags not listed in I(tags) + purge_tags (bool): If true, remove tags not listed in I(tags). Kwargs: allocation_id (str): The EIP Amazon identifier. @@ -772,7 +766,6 @@ def pre_create( Returns: Tuple (bool, str, list) """ - changed = False msg = "" results = {} @@ -853,7 +846,7 @@ def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connec Args: client (botocore.client.EC2): Boto3 client module: AnsibleAWSModule class instance - nat_gateway_id (str): The Amazon nat id + nat_gateway_id (str): The Amazon nat id. Kwargs: wait (bool): Wait for the nat to be in the deleted state before returning. @@ -890,7 +883,6 @@ def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connec Returns: Tuple (bool, str, list) """ - allocation_id = None params = {"NatGatewayId": nat_gateway_id} changed = False diff --git a/plugins/modules/ec2_vpc_net.py b/plugins/modules/ec2_vpc_net.py index a3bc5aa9772..d4b6f29ba9f 100644 --- a/plugins/modules/ec2_vpc_net.py +++ b/plugins/modules/ec2_vpc_net.py @@ -388,7 +388,6 @@ def wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_assoc_state): If ipv6_assoc_state is True, wait for VPC to be associated with at least one Amazon-provided IPv6 CIDR block. If ipv6_assoc_state is False, wait for VPC to be dissociated from all Amazon-provided IPv6 CIDR blocks. """ - if ipv6_assoc_state is None: return if module.check_mode: diff --git a/plugins/modules/ec2_vpc_route_table.py b/plugins/modules/ec2_vpc_route_table.py index 57db93cdb91..2b75fb24c30 100644 --- a/plugins/modules/ec2_vpc_route_table.py +++ b/plugins/modules/ec2_vpc_route_table.py @@ -389,9 +389,7 @@ def find_subnets(connection, module, vpc_id, identified_subnets): def find_igw(connection, module, vpc_id): - """ - Finds the Internet gateway for the given VPC ID. - """ + """Finds the Internet gateway for the given VPC ID.""" filters = ansible_dict_to_boto3_filter_list({"attachment.vpc-id": vpc_id}) try: igw = describe_igws_with_backoff(connection, Filters=filters) diff --git a/plugins/modules/ec2_vpc_route_table_info.py b/plugins/modules/ec2_vpc_route_table_info.py index bde66f03378..833d567ba00 100644 --- a/plugins/modules/ec2_vpc_route_table_info.py +++ b/plugins/modules/ec2_vpc_route_table_info.py @@ -237,9 +237,8 @@ def normalize_route_table(table): def normalize_results(results): """ We used to be a boto v2 module, make sure that the old return values are - maintained and the shape of the return values are what people expect + maintained and the shape of the return values are what people expect. """ - routes = [normalize_route_table(route) for route in results["RouteTables"]] del results["RouteTables"] results = camel_dict_to_snake_dict(results) diff --git a/plugins/modules/elb_application_lb.py b/plugins/modules/elb_application_lb.py index 25ebd8c8490..dfeae6bb3cc 100644 --- a/plugins/modules/elb_application_lb.py +++ b/plugins/modules/elb_application_lb.py @@ -571,9 +571,7 @@ def describe_sgs_with_backoff(connection, **params): def find_default_sg(connection, module, vpc_id): - """ - Finds the default security group for the given VPC ID. - """ + """Finds the default security group for the given VPC ID.""" filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id, "group-name": "default"}) try: sg = describe_sgs_with_backoff(connection, Filters=filters) @@ -588,7 +586,7 @@ def find_default_sg(connection, module, vpc_id): def create_or_update_alb(alb_obj): - """Create ALB or modify main attributes. json_exit here""" + """Create ALB or modify main attributes. json_exit here.""" if alb_obj.elb: # ALB exists so check subnets, security groups and tags match what has been passed # Subnets diff --git a/plugins/modules/elb_classic_lb.py b/plugins/modules/elb_classic_lb.py index 60134f0e3bb..d2d8431105e 100644 --- a/plugins/modules/elb_classic_lb.py +++ b/plugins/modules/elb_classic_lb.py @@ -691,7 +691,7 @@ class ElbManager: - """Handles ELB creation and destruction""" + """Handles ELB creation and destruction.""" def __init__(self, module): self.module = module @@ -873,8 +873,8 @@ def _create_elb(self): def _format_listener(self, listener, inject_protocol=False): """Formats listener into the format needed by the - ELB API""" - + ELB API. + """ listener = scrub_none_parameters(listener) for protocol in ["protocol", "instance_protocol"]: @@ -896,7 +896,7 @@ def _format_listener(self, listener, inject_protocol=False): return formatted_listener def _format_healthcheck_target(self): - """Compose target string from healthcheck parameters""" + """Compose target string from healthcheck parameters.""" protocol = self.health_check["ping_protocol"].upper() path = "" @@ -915,7 +915,7 @@ def _format_healthcheck(self): ) def ensure_ok(self): - """Create the ELB""" + """Create the ELB.""" if not self.elb: try: self._create_elb() @@ -963,7 +963,7 @@ def ensure_ok(self): # self._set_access_log() def ensure_gone(self): - """Destroy the ELB""" + """Destroy the ELB.""" if self.elb: try: self._delete_elb() @@ -1193,7 +1193,7 @@ def _wait_for_instance_state(self, waiter_name, instances): return True def _create_elb_listeners(self, listeners): - """Takes a list of listener definitions and creates them""" + """Takes a list of listener definitions and creates them.""" if not listeners: return False self.changed = True @@ -1208,7 +1208,7 @@ def _create_elb_listeners(self, listeners): return True def _delete_elb_listeners(self, ports): - """Takes a list of listener ports and deletes them from the ELB""" + """Takes a list of listener ports and deletes them from the ELB.""" if not ports: return False self.changed = True @@ -1225,9 +1225,8 @@ def _delete_elb_listeners(self, ports): def _set_elb_listeners(self): """ Creates listeners specified by self.listeners; overwrites existing - listeners on these ports; removes extraneous listeners + listeners on these ports; removes extraneous listeners. """ - if not self.listeners: return False @@ -1262,7 +1261,7 @@ def _set_elb_listeners(self): return changed def _api_listener_as_tuple(self, listener): - """Adds ssl_certificate_id to ELB API tuple if present""" + """Adds ssl_certificate_id to ELB API tuple if present.""" base_tuple = [ listener.get("LoadBalancerPort"), listener.get("InstancePort"), @@ -1292,7 +1291,7 @@ def _detach_subnets(self, subnets): return True def _set_subnets(self): - """Determine which subnets need to be attached or detached on the ELB""" + """Determine which subnets need to be attached or detached on the ELB.""" # Subnets parameter not set, nothing to change if self.subnets is None: return False @@ -1319,7 +1318,7 @@ def _set_subnets(self): return changed def _check_scheme(self): - """Determine if the current scheme is different than the scheme of the ELB""" + """Determine if the current scheme is different than the scheme of the ELB.""" if self.scheme: if self.elb["Scheme"] != self.scheme: return True @@ -1360,7 +1359,7 @@ def _disable_zones(self, zones): return True def _set_zones(self): - """Determine which zones need to be enabled or disabled on the ELB""" + """Determine which zones need to be enabled or disabled on the ELB.""" # zones parameter not set, nothing to changeA if self.zones is None: return False @@ -1510,7 +1509,7 @@ def _set_listener_policies(self, port, policies): return True def _get_stickiness_policies(self): - """Get a list of AppCookieStickinessPolicyType and LBCookieStickinessPolicyType policies""" + """Get a list of AppCookieStickinessPolicyType and LBCookieStickinessPolicyType policies.""" return list( p["PolicyName"] for p in self.elb_policies @@ -1518,17 +1517,17 @@ def _get_stickiness_policies(self): ) def _get_app_stickness_policy_map(self): - """Get a mapping of App Cookie Stickiness policy names to their definitions""" + """Get a mapping of App Cookie Stickiness policy names to their definitions.""" policies = self.elb.get("Policies", {}).get("AppCookieStickinessPolicies", []) return {p["PolicyName"]: p for p in policies} def _get_lb_stickness_policy_map(self): - """Get a mapping of LB Cookie Stickiness policy names to their definitions""" + """Get a mapping of LB Cookie Stickiness policy names to their definitions.""" policies = self.elb.get("Policies", {}).get("LBCookieStickinessPolicies", []) return {p["PolicyName"]: p for p in policies} def _purge_stickiness_policies(self): - """Removes all stickiness policies from all Load Balancers""" + """Removes all stickiness policies from all Load Balancers.""" # Used when purging stickiness policies or updating a policy (you can't # update a policy while it's connected to a Listener) stickiness_policies = set(self._get_stickiness_policies()) @@ -1659,7 +1658,7 @@ def _set_lb_stickiness_policy(self, listener, policy): return changed def _get_backend_policies(self): - """Get a list of backend policies mapped to the InstancePort""" + """Get a list of backend policies mapped to the InstancePort.""" if not self.elb: return {} server_descriptions = self.elb.get("BackendServerDescriptions", []) @@ -1667,7 +1666,7 @@ def _get_backend_policies(self): return policies def _get_proxy_protocol_policy(self): - """Returns the name of the name of the ProxyPolicy if created""" + """Returns the name of the name of the ProxyPolicy if created.""" all_proxy_policies = self._get_proxy_policies() if not all_proxy_policies: return None @@ -1676,15 +1675,15 @@ def _get_proxy_protocol_policy(self): return all_proxy_policies def _get_proxy_policies(self): - """Get a list of ProxyProtocolPolicyType policies""" + """Get a list of ProxyProtocolPolicyType policies.""" return list(p["PolicyName"] for p in self.elb_policies if p["PolicyTypeName"] == "ProxyProtocolPolicyType") def _get_policy_map(self): - """Get a mapping of Policy names to their definitions""" + """Get a mapping of Policy names to their definitions.""" return {p["PolicyName"]: p for p in self.elb_policies} def _set_backend_policies(self): - """Sets policies for all backends""" + """Sets policies for all backends.""" # Currently only supports setting ProxyProtocol policies if not self.listeners: return False @@ -1754,7 +1753,7 @@ def _set_backend_policy(self, port, policies): return True def _set_proxy_protocol_policy(self, policy_name): - """Install a proxy protocol policy if needed""" + """Install a proxy protocol policy if needed.""" policy_map = self._get_policy_map() policy_attributes = [dict(AttributeName="ProxyProtocol", AttributeValue="true")] @@ -1791,7 +1790,7 @@ def _set_proxy_protocol_policy(self, policy_name): return True def _get_instance_ids(self): - """Get the current list of instance ids installed in the elb""" + """Get the current list of instance ids installed in the elb.""" elb = self.elb or {} return list(i["InstanceId"] for i in elb.get("Instances", [])) @@ -1817,7 +1816,7 @@ def _change_instances(self, method, instances): return True def _set_instance_ids(self): - """Register or deregister instances from an lb instance""" + """Register or deregister instances from an lb instance.""" new_instances = self.instance_ids or [] existing_instances = self._get_instance_ids() @@ -1869,7 +1868,7 @@ def _remove_tags(self, tags_to_unset): return True def _set_tags(self): - """Add/Delete tags""" + """Add/Delete tags.""" if self.tags is None: return False diff --git a/plugins/modules/iam_instance_profile.py b/plugins/modules/iam_instance_profile.py index 52b7c9370d9..36c4ccba6eb 100644 --- a/plugins/modules/iam_instance_profile.py +++ b/plugins/modules/iam_instance_profile.py @@ -293,9 +293,7 @@ def ensure_absent( def main(): - """ - Module action handler - """ + """Module action handler.""" argument_spec = dict( name=dict(aliases=["instance_profile_name"], required=True), path=dict(aliases=["path_prefix", "prefix"]), diff --git a/plugins/modules/iam_instance_profile_info.py b/plugins/modules/iam_instance_profile_info.py index a26a069900a..606943acc16 100644 --- a/plugins/modules/iam_instance_profile_info.py +++ b/plugins/modules/iam_instance_profile_info.py @@ -105,9 +105,7 @@ def describe_iam_instance_profiles(module, client): def main(): - """ - Module action handler - """ + """Module action handler.""" argument_spec = dict( name=dict(aliases=["instance_profile_name"]), path_prefix=dict(aliases=["path", "prefix"]), diff --git a/plugins/modules/iam_role.py b/plugins/modules/iam_role.py index 3262a7a9226..23e7f3d0752 100644 --- a/plugins/modules/iam_role.py +++ b/plugins/modules/iam_role.py @@ -519,9 +519,8 @@ def create_instance_profiles(client, check_mode, role_name, path): def remove_instance_profiles(client, check_mode, role_name, delete_instance_profile): """Removes the role from instance profiles and deletes the instance profile if - delete_instance_profile is set + delete_instance_profile is set. """ - instance_profiles = list_iam_instance_profiles(client, role=role_name) if not instance_profiles: return False diff --git a/plugins/modules/iam_role_info.py b/plugins/modules/iam_role_info.py index fb4a06466e3..36a599a4dad 100644 --- a/plugins/modules/iam_role_info.py +++ b/plugins/modules/iam_role_info.py @@ -193,9 +193,7 @@ def describe_iam_roles(client, name, path_prefix): def main(): - """ - Module action handler - """ + """Module action handler.""" argument_spec = dict( name=dict(aliases=["role_name"]), path_prefix=dict(aliases=["path", "prefix"]), diff --git a/plugins/modules/kms_key.py b/plugins/modules/kms_key.py index 47e52978d78..72a62146260 100644 --- a/plugins/modules/kms_key.py +++ b/plugins/modules/kms_key.py @@ -510,7 +510,7 @@ def get_kms_policies(connection, module, key_id): def camel_to_snake_grant(grant): - """camel_to_snake_grant snakifies everything except the encryption context""" + """camel_to_snake_grant snakifies everything except the encryption context.""" constraints = grant.get("Constraints", {}) result = camel_dict_to_snake_dict(grant) if "EncryptionContextEquals" in constraints: diff --git a/plugins/modules/lambda_alias.py b/plugins/modules/lambda_alias.py index 5b16eebd3aa..be4374b3678 100644 --- a/plugins/modules/lambda_alias.py +++ b/plugins/modules/lambda_alias.py @@ -173,7 +173,6 @@ def set_api_params(module_params, param_names): :param param_names: :return: """ - api_params = dict() for param in param_names: @@ -191,7 +190,6 @@ def validate_params(module_params): :param module_params: AnsibleAWSModule Parameters :return: """ - function_name = module_params["function_name"] # validate function name @@ -224,7 +222,6 @@ def get_lambda_alias(module_params, client): :param client: (wrapped) boto3 lambda client :return: """ - # set API parameters api_params = set_api_params(module_params, ("function_name", "name")) diff --git a/plugins/modules/lambda_event.py b/plugins/modules/lambda_event.py index 424ad5abeca..8663ad749dc 100644 --- a/plugins/modules/lambda_event.py +++ b/plugins/modules/lambda_event.py @@ -182,7 +182,6 @@ def validate_params(module, client): :param client: The client used to perform requests to AWS :return: """ - function_name = module.params["lambda_function_arn"] qualifier = get_qualifier(module) @@ -224,7 +223,6 @@ def get_qualifier(module): :param module: :return: """ - qualifier = None if module.params["version"] > 0: qualifier = str(module.params["version"]) @@ -282,7 +280,6 @@ def lambda_event_stream(module, client): :param aws: :return: """ - facts = dict() changed = False current_state = "absent" diff --git a/plugins/modules/lambda_info.py b/plugins/modules/lambda_info.py index fbd443bb7cc..861571fc799 100644 --- a/plugins/modules/lambda_info.py +++ b/plugins/modules/lambda_info.py @@ -297,7 +297,6 @@ def alias_details(client, module, function_name): :param function_name (str): Name of Lambda function to query :return dict: """ - lambda_info = dict() try: @@ -331,7 +330,6 @@ def list_functions(client, module): :param client: AWS API client reference (boto3) :param module: Ansible module reference """ - function_name = module.params.get("function_name") if function_name: # Function name is specified - retrieve info on that function @@ -388,7 +386,6 @@ def config_details(client, module, function_name): :param function_name (str): Name of Lambda function to query :return dict: """ - lambda_info = dict() try: @@ -420,7 +417,6 @@ def mapping_details(client, module, function_name): :param function_name (str): Name of Lambda function to query :return dict: """ - lambda_info = dict() params = dict() @@ -451,7 +447,6 @@ def policy_details(client, module, function_name): :param function_name (str): Name of Lambda function to query :return dict: """ - lambda_info = dict() try: @@ -477,7 +472,6 @@ def version_details(client, module, function_name): :param function_name (str): Name of Lambda function to query :return dict: """ - lambda_info = dict() try: @@ -504,7 +498,6 @@ def tags_details(client, module, function_name): :param function_name (str): Name of Lambda function to query :return dict: """ - lambda_info = dict() try: diff --git a/plugins/modules/lambda_policy.py b/plugins/modules/lambda_policy.py index 3413d6e79da..fcefc72f6f2 100644 --- a/plugins/modules/lambda_policy.py +++ b/plugins/modules/lambda_policy.py @@ -148,7 +148,6 @@ def pc(key): :param key: :return: """ - return "".join([token.capitalize() for token in key.split("_")]) @@ -168,7 +167,6 @@ def set_api_params(module, module_params): :param module_params: :return: """ - api_params = dict() for param in module_params: @@ -186,7 +184,6 @@ def validate_params(module): :param module: :return: """ - function_name = module.params["function_name"] # validate function name @@ -213,7 +210,6 @@ def get_qualifier(module): :param module: :return: """ - if module.params.get("version") is not None: return to_native(module.params["version"]) elif module.params["alias"]: @@ -223,7 +219,7 @@ def get_qualifier(module): def extract_statement(policy, sid): - """return flattened single policy statement from a policy + """Return flattened single policy statement from a policy. If a policy statement is present in the policy extract it and return it in a flattened form. Otherwise return an empty @@ -304,7 +300,6 @@ def add_policy_permission(module, client): :param aws: :return: """ - changed = False # set API parameters @@ -340,7 +335,6 @@ def remove_policy_permission(module, client): :param aws: :return: """ - changed = False # set API parameters @@ -421,7 +415,6 @@ def main(): :return dict: ansible facts """ - module = setup_module_object() client = module.client("lambda") validate_params(module) diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index 0362df0ba2f..2f292b06ea4 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -1305,7 +1305,7 @@ def promote_replication_instance(client, module, instance, read_replica): def ensure_iam_roles(client, module, instance_id): """ - Ensure specified IAM roles are associated with DB instance + Ensure specified IAM roles are associated with DB instance. Parameters: client: RDS client diff --git a/plugins/modules/rds_instance_param_group.py b/plugins/modules/rds_instance_param_group.py index 82d0112fde4..98219599e41 100644 --- a/plugins/modules/rds_instance_param_group.py +++ b/plugins/modules/rds_instance_param_group.py @@ -140,9 +140,7 @@ def _describe_db_parameters(connection, **params): def convert_parameter(param, value): - """ - Allows setting parameters with 10M = 10* 1024 * 1024 and so on. - """ + """Allows setting parameters with 10M = 10* 1024 * 1024 and so on.""" converted_value = value if param["DataType"] == "integer": diff --git a/plugins/modules/rds_subnet_group.py b/plugins/modules/rds_subnet_group.py index 17fbdb00173..4cbb7ef805d 100644 --- a/plugins/modules/rds_subnet_group.py +++ b/plugins/modules/rds_subnet_group.py @@ -237,7 +237,7 @@ def create_subnet_list(subnets): subnets (list): A list of subnets definitions. @see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups Returns: - (list): List of subnet ids (str) + (list): List of subnet ids (str). """ subnets_ids = [] for subnet in subnets: diff --git a/plugins/modules/route53.py b/plugins/modules/route53.py index 8a5ccb5a6c3..39404076696 100644 --- a/plugins/modules/route53.py +++ b/plugins/modules/route53.py @@ -460,7 +460,7 @@ def get_record(route53, zone_id, record_name, record_type, record_identifier): def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): - """Finds a zone by name or zone_id""" + """Finds a zone by name or zone_id.""" hosted_zones_results = _list_hosted_zones(route53) for zone in hosted_zones_results: @@ -484,7 +484,7 @@ def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): def format_record(record_in, zone_in, zone_id): """ Formats a record in a way that's consistent with the pre-boto3 migration values - as well as returning the 'normal' boto3 style values + as well as returning the 'normal' boto3 style values. """ if not record_in: return None diff --git a/plugins/modules/route53_health_check.py b/plugins/modules/route53_health_check.py index b2924145d24..bbd3850a9b9 100644 --- a/plugins/modules/route53_health_check.py +++ b/plugins/modules/route53_health_check.py @@ -316,8 +316,7 @@ def _list_health_checks(**params): def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): - """Searches for health checks that have the exact same set of immutable values""" - + """Searches for health checks that have the exact same set of immutable values.""" # In lieu of an Id we perform matches against the following values: # - ip_addr # - fqdn diff --git a/plugins/modules/s3_bucket_info.py b/plugins/modules/s3_bucket_info.py index b382e5eebd3..83462f9f51d 100644 --- a/plugins/modules/s3_bucket_info.py +++ b/plugins/modules/s3_bucket_info.py @@ -463,9 +463,7 @@ def get_bucket_list(module, connection, name="", name_filter=""): def get_buckets_facts(connection, buckets, requested_facts, transform_location): - """ - Retrieve additional information about S3 buckets - """ + """Retrieve additional information about S3 buckets.""" full_bucket_list = [] # Iterate over all buckets and append Retrieved facts to bucket for bucket in buckets: @@ -476,9 +474,7 @@ def get_buckets_facts(connection, buckets, requested_facts, transform_location): def get_bucket_details(connection, name, requested_facts, transform_location): - """ - Execute all enabled S3API get calls for selected bucket - """ + """Execute all enabled S3API get calls for selected bucket.""" all_facts = {} for key in requested_facts: @@ -510,9 +506,7 @@ def get_bucket_details(connection, name, requested_facts, transform_location): @AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_location(name, connection, transform_location=False): - """ - Get bucket location and optionally transform 'null' to 'us-east-1' - """ + """Get bucket location and optionally transform 'null' to 'us-east-1'.""" data = connection.get_bucket_location(Bucket=name) # Replace 'null' with 'us-east-1'? @@ -529,9 +523,7 @@ def get_bucket_location(name, connection, transform_location=False): @AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_tagging(name, connection): - """ - Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function - """ + """Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function.""" data = connection.get_bucket_tagging(Bucket=name) try: @@ -545,9 +537,7 @@ def get_bucket_tagging(name, connection): @AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_property(name, connection, get_api_name): - """ - Get bucket property - """ + """Get bucket property.""" api_call = "get_" + get_api_name api_function = getattr(connection, api_call) data = api_function(Bucket=name) diff --git a/plugins/plugin_utils/botocore.py b/plugins/plugin_utils/botocore.py index 2fe2ca0eb31..992b3661f32 100644 --- a/plugins/plugin_utils/botocore.py +++ b/plugins/plugin_utils/botocore.py @@ -23,9 +23,8 @@ def boto3_conn(plugin, conn_type=None, resource=None, region=None, endpoint=None ValueError, botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError, - botocore.exceptions.NoRegionError + botocore.exceptions.NoRegionError. """ - try: return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) except ValueError as e: diff --git a/plugins/plugin_utils/inventory.py b/plugins/plugin_utils/inventory.py index b0e47f7ef55..949b6e5d6bb 100644 --- a/plugins/plugin_utils/inventory.py +++ b/plugins/plugin_utils/inventory.py @@ -164,7 +164,7 @@ def _boto3_regions(self, service): def all_clients(self, service): """ - Generator that yields a boto3 client and the region + Generator that yields a boto3 client and the region. :param service: The boto3 service to connect to. diff --git a/pyproject.toml b/pyproject.toml index 5f6f4d55e65..1f70083ce09 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,21 +21,135 @@ src = [ profile = "black" force_single_line = true line_length = 120 +src_paths = [ "plugins", "tests/unit", "tests/integration",] +sections = [ "FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "ANSIBLE_CORE", "ANSIBLE_AMAZON_AWS", "ANSIBLE_COMMUNITY_AWS", "LOCALFOLDER",] +known_third_party = [ "botocore", "boto3",] +known_ansible_core = [ "ansible",] +known_ansible_amazon_aws = [ "ansible_collections.amazon.aws",] +known_ansible_community_aws = [ "ansible_collections.community.aws",] -src_paths = [ - "plugins", - "tests/unit", - "tests/integration", +[tool.flynt] +transform-joins = true +exclude = [ "ec2_metadata_facts",] + +[tool.ruff] +builtins = [ "__",] +line-length = 120 +target-version = "py39" + +[tool.ruff.lint] +# See all rules at https://docs.astral.sh/ruff/rules/ +select = [ + "E", # pycodestyle + "W", # pycodestyle + "F", # Pyflakes + "Q", # flake8-quotes + "I", # isort + "TID", # flake8-tidy-imports + "TCH", # flake8-type-checking + "PL", # Pylint + "FLY", # Flynt + "COM", # flake8-commas + "N", # pep8-naming + "YTT", # flake8-2020 + "PT", # flake8-pytest-style + "ISC", # flake8-implicit-str-concat + "D", # pydocstyle + "C90", # mccabe + + # To be enabed + # "S", # flake8-bandit + # "C", # complexity + # "FBT001", # flake8-boolean-trap + # "UP", # pyupgrade + # "SIM", # flake8-simplify + # "C4", # flake8-comprehensions + # "RUF", # Ruff-specific rules + # "B", # flake8-bugbear ] -sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "ANSIBLE_CORE", "ANSIBLE_AMAZON_AWS", "ANSIBLE_COMMUNITY_AWS", "LOCALFOLDER"] -known_third_party = ["botocore", "boto3"] -known_ansible_core = ["ansible"] -known_ansible_amazon_aws = ["ansible_collections.amazon.aws"] -known_ansible_community_aws = ["ansible_collections.community.aws"] +ignore = [ + # Conflicts with the formatter + "COM812", "ISC001", + "E741", # Ambiguous variable name + "E501", # Never enforce `E501` (line length violations). + "F401", # imported but unused + "F811", # redefinition of unnused + "F841", # Local variable is assigned to but never used + "PLR", # Design related pylint codes + "PLW", # Design related pylint codes + "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` + "N806", # Variable in function should be lowercase + "N802", # Function name should be lowercase + "N818", # Exception name should be named with an Error suffix + "N803", # Argument name `keyId` should be lowercase + "N801", # Class name should use CapWords convention + "UP009", # UTF-8 encoding declaration is unnecessary + "ISC003", # Explicitly concatenated string should be implicitly concatenated + "D401", # First line of docstring should be in imperative mood + "C901", # `_compare_condition` is too complex (15 > 10) + "D205", # 1 blank line required between summary line and description + "D400", # First line should end with a period +] -[tool.flynt] -transform-joins = true -exclude = [ - "ec2_metadata_facts", +# Disable fix for unused imports (`F401`). +fixable = ["ALL"] + +# Avoid trying to fix flake8-bugbear (`B`) violations. +unfixable = ["F401", "B"] + +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" + +[tool.ruff.lint.isort] +force-single-line = true # Force from .. import to be 1 per line +section-order = ["future", "standard-library", "third-party", "first-party", "ansible_core", "ansible_amazon_aws", "ansible_community_aws", "local-folder"] +known-local-folder = [ "plugins", "tests/unit", "tests/integration",] +known-third-party = [ "botocore", "boto3",] + +[tool.ruff.lint.isort.sections] +ansible_core = [ "ansible",] +ansible_amazon_aws = [ "ansible_collections.amazon.aws",] +ansible_community_aws = [ "ansible_collections.community.aws",] + +[tool.ruff.lint.pydocstyle] +convention = "pep257" + +[tool.ruff.lint.per-file-ignores] +# 402: Module level import not at top of file +"plugins/**/*.py" = [ "D1", "E402"] +"tests/**/*.py" = [ + "D1", + "E402", + "S101", # asserts allowed in tests... + "ARG", # Unused function args -> fixtures nevertheless are functionally relevant... + "FBT", # Don't care about booleans as positional arguments in tests, e.g. via @pytest.mark.parametrize() + "PT", # pytest error codes ] + +[tool.ruff.format] +# Like Black, use double quotes for strings. +quote-style = "double" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" + +# Enable auto-formatting of code examples in docstrings. Markdown, +# reStructuredText code/literal blocks and doctests are all supported. +# +# This is currently disabled by default, but it is planned for this +# to be opt-out in the future. +docstring-code-format = false + +# Set the line length limit used when formatting code snippets in +# docstrings. +# +# This only has an effect when the `docstring-code-format` setting is +# enabled. +docstring-code-line-length = "dynamic" diff --git a/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py b/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py index 04d2eb1ea54..5ca9f648248 100644 --- a/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py +++ b/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py @@ -1,14 +1,13 @@ #!/usr/bin/env python """ Reads an OpenSSH Public key and spits out the 'AWS' MD5 sum -The equivalent of +The equivalent of. ssh-keygen -f id_rsa.pub -e -m PKCS8 | openssl pkey -pubin -outform DER | openssl md5 -c | cut -f 2 -d ' ' (but without needing the OpenSSL CLI) """ - import hashlib import sys diff --git a/tests/unit/module_utils/cloud/test_cloud_retry.py b/tests/unit/module_utils/cloud/test_cloud_retry.py index 06119d7f66c..dc7250217bb 100644 --- a/tests/unit/module_utils/cloud/test_cloud_retry.py +++ b/tests/unit/module_utils/cloud/test_cloud_retry.py @@ -16,9 +16,7 @@ class TestCloudRetry: custom_error_codes = [100, 200, 300] class OurTestException(Exception): - """ - custom exception class for testing - """ + """custom exception class for testing.""" def __init__(self, status): self.status = status diff --git a/tests/unit/module_utils/cloud/test_decorator_generation.py b/tests/unit/module_utils/cloud/test_decorator_generation.py index ad389050376..e344adaef77 100644 --- a/tests/unit/module_utils/cloud/test_decorator_generation.py +++ b/tests/unit/module_utils/cloud/test_decorator_generation.py @@ -43,9 +43,8 @@ def check_common_side_effects(decorator_generator): number of keyword arguments. "found" should be CloudRetry.found "status_code_from_exception" should be CloudRetry.status_code_from_exception (this is replaced when the abstract class is realised) - "sleep_time_generator" should be an instance of CloudRetry.BackoffIterator + "sleep_time_generator" should be an instance of CloudRetry.BackoffIterator. """ - assert decorator_generator.called is True assert decorator_generator.call_count == 1 diff --git a/tests/unit/module_utils/policy/test_compare_policies.py b/tests/unit/module_utils/policy/test_compare_policies.py index 4f9d86ac36d..27879febc8b 100644 --- a/tests/unit/module_utils/policy/test_compare_policies.py +++ b/tests/unit/module_utils/policy/test_compare_policies.py @@ -240,7 +240,7 @@ def setup_method(self): def test_compare_small_policies_without_differences(self): """Testing two small policies which are identical except for: * The contents of the statement are in different orders - * The second policy contains a list of length one whereas in the first it is a string + * The second policy contains a list of length one whereas in the first it is a string. """ assert compare_policies(self.small_policy_one, self.small_policy_two) is False @@ -248,44 +248,44 @@ def test_compare_large_policies_without_differences(self): """Testing two larger policies which are identical except for: * The statements are in different orders * The contents of the statements are also in different orders - * The second contains a list of length one for the Principal whereas in the first it is a string + * The second contains a list of length one for the Principal whereas in the first it is a string. """ assert compare_policies(self.larger_policy_one, self.larger_policy_two) is False def test_compare_larger_policies_with_difference(self): """Testing two larger policies which are identical except for: - * one different principal + * one different principal. """ assert compare_policies(self.larger_policy_two, self.larger_policy_three) is True def test_compare_smaller_policy_with_larger(self): - """Testing two policies of different sizes""" + """Testing two policies of different sizes.""" assert compare_policies(self.larger_policy_one, self.small_policy_one) is True def test_compare_boolean_policy_bool_and_string_are_equal(self): - """Testing two policies one using a quoted boolean, the other a bool""" + """Testing two policies one using a quoted boolean, the other a bool.""" assert compare_policies(self.bool_policy_string, self.bool_policy_bool) is False def test_compare_numeric_policy_number_and_string_are_equal(self): - """Testing two policies one using a quoted number, the other an int""" + """Testing two policies one using a quoted number, the other an int.""" assert compare_policies(self.numeric_policy_string, self.numeric_policy_number) is False def test_compare_version_policies_defaults_old(self): """Testing that a policy without Version is considered identical to one - with the 'old' Version (by default) + with the 'old' Version (by default). """ assert compare_policies(self.version_policy_old, self.version_policy_missing) is False assert compare_policies(self.version_policy_new, self.version_policy_missing) is True def test_compare_version_policies_default_disabled(self): - """Testing that a policy without Version not considered identical when default_version=None""" + """Testing that a policy without Version not considered identical when default_version=None.""" assert compare_policies(self.version_policy_missing, self.version_policy_missing, default_version=None) is False assert compare_policies(self.version_policy_old, self.version_policy_missing, default_version=None) is True assert compare_policies(self.version_policy_new, self.version_policy_missing, default_version=None) is True def test_compare_version_policies_default_set(self): """Testing that a policy without Version is only considered identical - when default_version="2008-10-17" + when default_version="2008-10-17". """ assert ( compare_policies(self.version_policy_missing, self.version_policy_missing, default_version="2012-10-17") @@ -307,13 +307,13 @@ def test_compare_version_policies_default_set(self): ) def test_compare_version_policies_with_none(self): - """Testing that comparing with no policy works""" + """Testing that comparing with no policy works.""" assert compare_policies(self.small_policy_one, None) is True assert compare_policies(None, self.small_policy_one) is True assert compare_policies(None, None) is False def test_compare_wildcard_policies_without_differences(self): """Testing two small wildcard policies which are identical except for: - * Principal: "*" vs Principal: ["AWS": "*"] + * Principal: "*" vs Principal: ["AWS": "*"]. """ assert compare_policies(self.wildcard_policy_one, self.wildcard_policy_two) is False diff --git a/tests/unit/utils/amazon_placebo_fixtures.py b/tests/unit/utils/amazon_placebo_fixtures.py index afe91adad43..7fbc558c7dd 100644 --- a/tests/unit/utils/amazon_placebo_fixtures.py +++ b/tests/unit/utils/amazon_placebo_fixtures.py @@ -46,7 +46,7 @@ @pytest.fixture def placeboify(request, monkeypatch): - """This fixture puts a recording/replaying harness around `boto3_conn` + """This fixture puts a recording/replaying harness around `boto3_conn`. Placeboify patches the `boto3_conn` function in ec2 module_utils to return a boto3 session that in recording or replaying mode, depending on the @@ -64,7 +64,7 @@ def placeboify(request, monkeypatch): request.fspath.dirname, "placebo_recordings", request.fspath.basename.replace(".py", ""), - request.function.__name__ + request.function.__name__, # remove the test_ prefix from the function & file name ).replace("test_", "") @@ -106,7 +106,7 @@ def boto3_middleman_connection(module, conn_type, resource, region="us-west-2", @pytest.fixture(scope="module") def basic_launch_config(): - """Create an EC2 launch config whose creation *is not* recorded and return its name + """Create an EC2 launch config whose creation *is not* recorded and return its name. This fixture is module-scoped, since launch configs are immutable and this can be reused for many tests. @@ -209,7 +209,8 @@ def maybe_sleep(): AWS modules often perform polling or retries, but when using recorded sessions there's no reason to wait. We can still exercise retry and other - code paths without waiting for wall-clock time to pass.""" + code paths without waiting for wall-clock time to pass. + """ if not os.getenv("PLACEBO_RECORD"): p = mock.patch("time.sleep", return_value=None) p.start() diff --git a/tox.ini b/tox.ini index ef66e76eaeb..7d5f6a8e8a6 100644 --- a/tox.ini +++ b/tox.ini @@ -3,8 +3,8 @@ skipsdist = True envlist = clean,ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints},linters # Tox4 supports labels which allow us to group the environments rather than dumping all commands into a single environment labels = - format = flynt, black, isort - lint = complexity-report, ansible-lint, black-lint, isort-lint, flake8-lint, flynt-lint + format = flynt, black, isort, ruff + lint = complexity-report, ansible-lint, black-lint, isort-lint, flake8-lint, flynt-lint, ruff-lint units = ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints} [common] @@ -100,11 +100,28 @@ commands = deps = {[testenv:black]deps} {[testenv:isort]deps} + {[testenv:ruff]deps} flake8 commands = black -v --check {toxinidir}/plugins {toxinidir}/tests isort --check-only --diff {toxinidir}/plugins {toxinidir}/tests flake8 {posargs} {toxinidir}/plugins {toxinidir}/tests + ruff check {toxinidir}/plugins {toxinidir}/tests + +[testenv:ruff] +description = lint source code +deps = + ruff +commands = + ruff check --fix {[common]format_dirs} + ruff format {[common]format_dirs} + +[testenv:ruff-lint] +description = lint source code +deps = + ruff +commands = + ruff check --diff {toxinidir}/plugins {toxinidir}/tests [flake8] # E123, E125 skipped as they are invalid PEP-8.