From bca7380aebaaad2e0e676bc6358d634dd8ab2ab5 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 15 Feb 2019 20:52:00 +0530 Subject: [PATCH 001/193] Updated with redshift insecure sgs. Updated with redshift insecure sgs. --- hammer/library/aws/redshift.py | 230 ++++++++++++++++++ .../create_security_groups_tickets.py | 32 ++- 2 files changed, 260 insertions(+), 2 deletions(-) create mode 100644 hammer/library/aws/redshift.py diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py new file mode 100644 index 00000000..71ac4813 --- /dev/null +++ b/hammer/library/aws/redshift.py @@ -0,0 +1,230 @@ +import json +import logging +import mimetypes +import pathlib + + +from datetime import datetime, timezone +from io import BytesIO +from copy import deepcopy +from botocore.exceptions import ClientError +from library.utility import jsonDumps +from library.utility import timeit +from library.aws.security_groups import SecurityGroup + + +# structure which describes EC2 instance +RedshiftCluster = namedtuple('RedshiftCluster', [ + # cluster_id + 'id', + # subnet_group_id + 'subnet_group_name' + ]) + +class RedshiftClusterOperations(object): + + @classmethod + @timeit + def get_redshift_vpc_security_groups(cls, redshift_client, group_id): + """ Retrieve redshift clusters meta data with security group attached + + :param redshift_client: boto3 redshift client + :param group_id: security group id + + :return: list with redshift clusters details + """ + # describe rds instances with security group attached + redshift_clusters = [] + + # this will include Clusters + clusters_res = redshift_client.describe_clusters() + for cluster in clusters_res["Clusters"]: + active_security_groups = [sg["VpcSecurityGroupId"] for sg in cluster['VpcSecurityGroups'] if + sg["Status"] == "active"] + if group_id in active_security_groups: + redshift_clusters.append(RedshiftCluster( + id=cluster["ClusterIdentifier"], + subnet_group_name=cluster["ClusterSubnetGroupName"] + )) + + return redshift_clusters + + @staticmethod + def set_cluster_encryption(redshift_client, cluster_id, kms_master_key_id): + """ + Sets the cluster encryption using Server side encryption. + + :param redshift_client: Redshift boto3 client + :param cluster_id: Redshift cluster name which to encrypt + :param kms_master_key_id: Redshift cluster encryption key. default value is none. + + :return: nothing + """ + + redshift_client.modify_cluster( + ClusterIdentifier=cluster_id, + Encryption=True, + KmsKeyId=kms_master_key_id + ) + +class RedshiftCluster(object): + """ + Basic class for Redshift Cluster. + Encapsulates `Owner`/`Tags`. + """ + def __init__(self, account, name, tags, is_encrypted): + """ + :param account: `Account` instance where redshift cluster is present + + :param name: `Name` of cluster id + :param tags: tags if redshift cluster tags (as AWS returns) + :param is_encrypted: encrypted or not. + """ + self.account = account + self.name =name + self.tags = tags + self.is_encrypt = is_encrypted + + def encrypt_cluster(self, kms_key_id=None): + """ + Encrypt bucket with SSL encryption. + :return: nothing + """ + try: + RedshiftClusterOperations.set_cluster_encryption(self.account.client("redshift"), self.name, kms_key_id) + except Exception: + logging.exception(f"Failed to encrypt {self.name} cluster ") + return False + + return True + + +class RedshiftClusterChecker(object): + """ + Basic class for checking Redshift cluster in account. + Encapsulates discovered Redshift cluster. + """ + def __init__(self, account): + """ + :param account: `Account` instance with Redshift cluster to check + """ + self.account = account + self.clusters = [] + + def get_cluster(self, name): + """ + :return: `Redshift cluster` by name + """ + for cluster in self.clusters: + if cluster.name == name: + return cluster + return None + + def check(self, clusters=None): + """ + Walk through Redshift clusters in the account and check them (encrypted or not). + Put all gathered clusters to `self.clusters`. + + :param clusters: list with Redshift cluster names to check, if it is not supplied - all clusters must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering dirung list, so get all clusters for account + response = self.account.client("redshift").describe_clusters() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to list cluster in {self.account}") + return False + + if "Clusters" in response: + for cluster_details in response["Clusters"]: + cluster_id = cluster_details["ClusterIdentifier"] + + if clusters is not None and cluster_id not in clusters: + continue + + is_encrypted = cluster_details["Encrypted"] + if "Tags" in cluster_details: + tags = cluster_details["Tags"] + + cluster = RedshiftCluster(account=self.account, + name=cluster_id, + tags=tags, + is_encrypt=is_encrypted) + self.clusters.append(cluster) + return True + + +class RedshiftInsecureSGsChecker(object): + + """ + Basic class for checking security group in account/region. + Encapsulates check settings and discovered security groups. + """ + def __init__(self, + account, + restricted_ports): + """ + :param account: `Account` instance with security groups to check + :param restricted_ports: list with ports to consider `SecurityGroup` as not restricted + """ + self.account = account + self.restricted_ports = restricted_ports + self.groups = [] + + def get_security_group(self, id): + """ + :return: `SecurityGroup` by id + """ + for group in self.groups: + if group.id == id: + return group + return None + + def check(self, ids=None): + """ + Walk through security groups in the account/region and check them (restricted or not). + Put all gathered groups to `self.groups`. + + :param ids: list with security group ids to check, if it is not supplied - all groups must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + args = {'DryRun': False} + if ids: + args['GroupIds'] = ids + try: + clusters = self.account.client("redshift").describe_clusters() + for cluster in clusters["Clusters"]: + for security_group in cluster["ClusterSecurityGroups"]: + sg_name = security_group["ClusterSecurityGroupName"] + status = security_group["Status"] + sg_details = self.account.client("redshift").describe_cluster_security_groups( + ClusterSecurityGroupName=sg_name) + + + + #describe_security_groups(**args)["SecurityGroups"] + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(ec2:{err.operation_name})") + elif err.response['Error']['Code'] == "InvalidGroup.NotFound": + logging.error(err.response['Error']['Message']) + return False + else: + logging.exception(f"Failed to describe security groups in {self.account}") + return False + + for security_group in secgroups: + sg = SecurityGroup(self.account, + security_group) + sg.check(self.restricted_ports) + self.groups.append(sg) + return True \ No newline at end of file diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 31b4c4ef..90dc7740 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -21,6 +21,7 @@ from library.aws.utility import Account from library.aws.security_groups import RestrictionStatus from library.aws.rds import RDSOperations +from library.aws.redshift import RedshiftClusterOperations from library.utility import SingletonInstance, SingletonInstanceException @@ -214,6 +215,23 @@ def build_elb_instances_table(elb_details): return elb_instance_details, in_use + @staticmethod + def build_redshift_clusters_table(redshift_clusters): + cluster_details = "" + in_use = False + + if len(redshift_clusters) > 0: + in_use = True + cluster_details += ( + f"\n*Redshift Clustes:*\n" + f"||Redshift Cluster ID||Subnet_Group_Name||\n") + for cluster in redshift_clusters: + cluster_details += ( + f"|{id}|{subnet_group_name}|\n" + ) + + return cluster_details, in_use + def create_tickets_securitygroups(self): """ Class function to create jira tickets """ table_name = self.config.sg.ddb_table_name @@ -315,7 +333,7 @@ def create_tickets_securitygroups(self): elbv2_client = account.client("elbv2") if account.session is not None else None iam_client = account.client("iam") if account.session is not None else None - + redshift_client = account.client("redshift") if account.session is not None else None rds_instance_details = elb_instance_details = None if ec2_client is not None: @@ -338,7 +356,15 @@ def create_tickets_securitygroups(self): except Exception: logging.exception(f"Failed to build RDS details for '{group_name} / {group_id}' in {account}") - sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds + if redshift_client is not None: + try: + redshift_clusters = RedshiftClusterOperations.get_redshift_vpc_security_groups(redshift_client, group_id) + sg_redshift_details, sg_in_use_redshift_clusters = self.build_redshift_clusters_table(redshift_clusters) + except Exception: + logging.exception( + f"Failed to build Redshift Cluster details for '{group_name} / {group_id}' in {account}") + + sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds or sg_in_use_redshift_clusters owner = group_owner if group_owner is not None else ec2_owner bu = group_bu if group_bu is not None else ec2_bu @@ -432,6 +458,8 @@ def create_tickets_securitygroups(self): issue_description += f"{instance_profile_details if instance_profile_details else ''}" + issue_description += f"{sg_redshift_details if sg_redshift_details else ''}" + issue_description += ( f"*Recommendation*: " f"Allow access only for a minimum set of required ip addresses/ranges from [RFC1918|https://tools.ietf.org/html/rfc1918]. " From 66c94ce98de29c826ddc7630d057b4fe5a2cd79b Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 19 Feb 2019 12:26:39 +0530 Subject: [PATCH 002/193] Updated with redshift insecure sgs code changes. Updated with redshift insecure sgs code changes. --- hammer/library/aws/redshift.py | 6 +++--- .../reporting/create_security_groups_tickets.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 71ac4813..621b4cf5 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -3,7 +3,6 @@ import mimetypes import pathlib - from datetime import datetime, timezone from io import BytesIO from copy import deepcopy @@ -11,10 +10,11 @@ from library.utility import jsonDumps from library.utility import timeit from library.aws.security_groups import SecurityGroup +from collections import namedtuple # structure which describes EC2 instance -RedshiftCluster = namedtuple('RedshiftCluster', [ +RedshiftCluster_Details = namedtuple('RedshiftCluster_Details', [ # cluster_id 'id', # subnet_group_id @@ -42,7 +42,7 @@ def get_redshift_vpc_security_groups(cls, redshift_client, group_id): active_security_groups = [sg["VpcSecurityGroupId"] for sg in cluster['VpcSecurityGroups'] if sg["Status"] == "active"] if group_id in active_security_groups: - redshift_clusters.append(RedshiftCluster( + redshift_clusters.append(RedshiftCluster_Details( id=cluster["ClusterIdentifier"], subnet_group_name=cluster["ClusterSubnetGroupName"] )) diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 90dc7740..35d20ae2 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -227,7 +227,7 @@ def build_redshift_clusters_table(redshift_clusters): f"||Redshift Cluster ID||Subnet_Group_Name||\n") for cluster in redshift_clusters: cluster_details += ( - f"|{id}|{subnet_group_name}|\n" + f"|{cluster.id}|{cluster.subnet_group_name}|\n" ) return cluster_details, in_use @@ -325,7 +325,7 @@ def create_tickets_securitygroups(self): ec2_client = account.client("ec2") if account.session is not None else None sg_instance_details = ec2_owner = ec2_bu = ec2_product = None - sg_in_use = sg_in_use_ec2 = sg_in_use_elb = sg_in_use_rds = None + sg_in_use = sg_in_use_ec2 = sg_in_use_elb = sg_in_use_rds = sg_in_use_redshift = None sg_public = sg_blind_public = False rds_client = account.client("rds") if account.session is not None else None @@ -359,12 +359,12 @@ def create_tickets_securitygroups(self): if redshift_client is not None: try: redshift_clusters = RedshiftClusterOperations.get_redshift_vpc_security_groups(redshift_client, group_id) - sg_redshift_details, sg_in_use_redshift_clusters = self.build_redshift_clusters_table(redshift_clusters) + sg_redshift_details, sg_in_use_redshift = self.build_redshift_clusters_table(redshift_clusters) except Exception: logging.exception( f"Failed to build Redshift Cluster details for '{group_name} / {group_id}' in {account}") - sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds or sg_in_use_redshift_clusters + sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds or sg_in_use_redshift owner = group_owner if group_owner is not None else ec2_owner bu = group_bu if group_bu is not None else ec2_bu From 4420b06c1eb93b883b5b6349e52f3c3297cee8bf Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 21 Feb 2019 21:36:56 +0530 Subject: [PATCH 003/193] Updated with redshift publicaccess issue changes. Updated with redshift publicaccess issue changes. --- deployment/build_packages.sh | 2 +- deployment/cf-templates/ddb.json | 32 +++ deployment/cf-templates/identification.json | 225 ++++++++++++++- deployment/configs/config.json | 7 + deployment/configs/whitelist.json | 3 + .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 7 +- ...describe_redshift_cluster_public_access.py | 85 ++++++ ..._to_desc_redshift_cluster_public_access.py | 36 +++ hammer/library/aws/redshift.py | 259 ++++++++++++++++++ hammer/library/config.py | 4 + hammer/library/ddb_issues.py | 5 + .../clean_redshift_public_access.py | 150 ++++++++++ ...te_redshift_public_access_issue_tickets.py | 160 +++++++++++ 14 files changed, 975 insertions(+), 4 deletions(-) create mode 100644 hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py create mode 100644 hammer/identification/lambdas/redshift-cluster-public-access-identification/initiate_to_desc_redshift_cluster_public_access.py create mode 100644 hammer/library/aws/redshift.py create mode 100644 hammer/reporting-remediation/remediation/clean_redshift_public_access.py create mode 100644 hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index f4219872..187e4b87 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,7 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification" +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification redshift-cluster-public-access-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 2ab4843c..51c5c7d2 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -428,6 +428,38 @@ }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-unencrypted" ] ]} } + }, + "DynamoDBRedshiftClusterPublicAccess": { + "Type": "AWS::DynamoDB::Table", + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-public-access" ] ]} + } } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 431bf3f4..3f751682 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -26,7 +26,8 @@ "SourceIdentificationIAMUserInactiveKeys", "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", - "SourceIdentificationRDSSnapshots" + "SourceIdentificationRDSSnapshots", + "SourceIdentificationRedshiftPublicAccess" ] }, { @@ -88,6 +89,9 @@ }, "SourceIdentificationRDSSnapshots": { "default": "Relative path to public RDS snapshots lambda sources" + }, + "SourceIdentificationRedshiftPublicAccess":{ + "default": "Relative path to publicly accessible Redshift Cluster sources" } } } @@ -176,6 +180,10 @@ "SourceIdentificationRDSEncryption": { "Type": "String", "Default": "rds-unencrypted-instance-identification.zip" + }, + "SourceIdentificationRedshiftPublicAccess": { + "Type": "String", + "Default": "redshift-cluster-public-access-identification.zip" } }, "Conditions": { @@ -230,6 +238,9 @@ "IdentificationMetricRDSEncryptionError": { "value": "RDSEncryptionError" }, + "IdentificationMetricRedshiftPublicAccessError": { + "value": "RedshiftPublicAccessError" + }, "SNSDisplayNameSecurityGroups": { "value": "describe-security-groups-sns" }, @@ -302,6 +313,12 @@ "SNSTopicNameRDSEncryption": { "value": "describe-rds-encryption-lambda" }, + "SNSDisplayNameRedshiftPublicAccess": { + "value": "describe-redshift-public-access-sns" + }, + "SNSTopicNameRedshiftPublicAccess": { + "value": "describe-redshift-public-access-lambda" + }, "LogsForwarderLambdaFunctionName": { "value": "logs-forwarder" }, @@ -379,6 +396,12 @@ }, "IdentifyRDSEncryptionLambdaFunctionName": { "value": "describe-rds-encryption" + }, + "InitiateRedshiftPublicAccessLambdaFunctionName": { + "value": "initiate-redshift-public-access" + }, + "IdentifyRedshiftPublicAccessLambdaFunctionName": { + "value": "describe-redshift-public-access" } } }, @@ -1840,6 +1863,106 @@ } }, + "LambdaInitiateRedshiftPublicAccessEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftPublicAccess", "LogGroupLambdaInitiateRedshiftPublicAccess"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationRedshiftPublicAccess" } + }, + "Environment": { + "Variables": { + "SNS_REDSHIFT_PUBLIC_ACCESS_ARN": { "Ref": "SNSNotifyLambdaEvaluateRedshiftPublicAccess" } + } + }, + "Description": "Lambda function for initiate to identify publicly accessible Redshift clusters.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRedshiftPublicAccessLambdaFunctionName", "value"] } ] + ]}, + "Handler": "initiate_to_desc_redshift_cluster_public_access.lambda_handler", + "MemorySize": 128, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateRedshiftPublicAccessEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "InitiateRedshiftPublicAccessLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateRedshiftPublicAccessEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaInitiateRedshiftPublicAccessEvaluation"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRedshiftPublicAccessEvaluation" } + } + }, + "LambdaEvaluateRedshiftPublicAccess": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluateRedshiftPublicAccess"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationRedshiftPublicAccess" } + }, + "Description": "Lambda function to describe publicly accessible Redshift clusters.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftPublicAccessLambdaFunctionName", "value"] } ] + ]}, + "Handler": "describe_redshift_cluster_public_access.lambda_handler", + "MemorySize": 256, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluateRedshiftPublicAccess": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "IdentifyRedshiftPublicAccessLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaEvaluateRedshiftPublicAccess": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaEvaluateRedshiftPublicAccess"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRedshiftPublicAccess" } + } + }, + "EventBackupDDB": { "Type": "AWS::Events::Rule", "DependsOn": ["LambdaBackupDDB"], @@ -2005,6 +2128,22 @@ } }, + "EventInitiateEvaluationRedshiftPublicAccess": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateRedshiftPublicAccessEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate publicly accessible Redshift cluster evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRedshiftPublicAccess"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateRedshiftPublicAccessEvaluation", "Arn"] }, + "Id": "LambdaInitiateRedshiftPublicAccessEvaluation" + } + ] + } + }, "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { "Type": "AWS::Lambda::Permission", "DependsOn": ["LambdaLogsForwarder"], @@ -2148,6 +2287,16 @@ } }, + "PermissionToInvokeLambdaInitiateRedshiftPublicAccessEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateRedshiftPublicAccessEvaluation", "EventInitiateEvaluationRedshiftPublicAccess"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateRedshiftPublicAccessEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRedshiftPublicAccess", "Arn"] } + } + }, "SNSNotifyLambdaEvaluateSG": { "Type": "AWS::SNS::Topic", "DependsOn": ["LambdaEvaluateSG"], @@ -2365,6 +2514,24 @@ } }, + "SNSNotifyLambdaEvaluateRedshiftPublicAccess": { + "Type": "AWS::SNS::Topic", + "DependsOn": "LambdaEvaluateRedshiftPublicAccess", + "Properties": { + "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRedshiftPublicAccess", "value"] } ] + ]}, + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftPublicAccess", "value"] } ] + ]}, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluateRedshiftPublicAccess", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, "PermissionToInvokeLambdaEvaluateSgSNS": { "Type": "AWS::Lambda::Permission", "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], @@ -2486,6 +2653,16 @@ } }, + "PermissionToInvokeLambdaEvaluateRedshiftPublicAccessSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftPublicAccess", "LambdaEvaluateRedshiftPublicAccess"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRedshiftPublicAccess" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRedshiftPublicAccess", "Arn"] } + } + }, "SNSIdentificationErrors": { "Type": "AWS::SNS::Topic", "Properties": { @@ -3088,6 +3265,52 @@ "Threshold": 0, "TreatMissingData": "notBreaching" } + }, + "AlarmErrorsLambdaInitiateRedshiftPublicAccessEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRedshiftPublicAccessEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRedshiftPublicAccessEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateRedshiftPublicAccessEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaRedshiftPublicAccessEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRedshiftPublicAccess"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRedshiftPublicAccess" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluateRedshiftPublicAccess" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } } }, "Outputs": { diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 74087148..74460cd9 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -139,5 +139,12 @@ "enabled": true, "ddb.table_name": "hammer-rds-unencrypted", "reporting": true + }, + "redshift_public_access": { + "enabled": true, + "ddb.table_name": "djif-hammer-redshift-public-access", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 } } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 6d31497a..19b39359 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -43,5 +43,8 @@ "s3_encryption": { }, "rds_encryption": { + }, + "redshift_public_access":{ + } } \ No newline at end of file diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 654aaffa..b40bd3e9 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -14,7 +14,8 @@ resource "aws_cloudformation_stack" "identification" { "aws_s3_bucket_object.ebs-public-snapshots-identification", "aws_s3_bucket_object.sqs-public-policy-identification", "aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification", - "aws_s3_bucket_object.rds-unencrypted-instance-identification" + "aws_s3_bucket_object.rds-unencrypted-instance-identification", + "aws_s3_bucket_object.redshift-cluster-public-access-identification" ] tags = "${var.tags}" @@ -40,6 +41,7 @@ resource "aws_cloudformation_stack" "identification" { SourceIdentificationSQSPublicPolicy = "${aws_s3_bucket_object.sqs-public-policy-identification.id}" SourceIdentificationS3Encryption = "${aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification.id}" SourceIdentificationRDSEncryption = "${aws_s3_bucket_object.rds-unencrypted-instance-identification.id}" + SourceIdentificationRedshiftPublicAccess = "${aws_s3_bucket_object.redshift-cluster-public-access-identification.id}" } template_url = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-cfn.id}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index d0791d6d..3bfd0963 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -86,4 +86,9 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("rds-unencrypted-instance-identification-%s.zip", "${md5(file("${path.module}/../../../packages/rds-unencrypted-instance-identification.zip"))}")}" source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" -} \ No newline at end of file +} +resource "aws_s3_bucket_object" "redshift-cluster-public-access-identification" { + bucket = "${var.s3bucket}" + key = "lambda/${format("redshift-cluster-public-access-identification-%s.zip", "${md5(file("${path.module}/../../../packages/redshift-cluster-public-access-identification.zip"))}")}" + source = "${path.module}/../../../packages/redshift-cluster-public-access-identification.zip" +} diff --git a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py new file mode 100644 index 00000000..70f741dc --- /dev/null +++ b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py @@ -0,0 +1,85 @@ +import json +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.redshift import RedshiftClusterPublicAccessChecker +from library.aws.utility import Account +from library.ddb_issues import IssueStatus, RedshiftPublicAccessIssue +from library.ddb_issues import Operations as IssueOperations +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to evaluate Redshift cluster publicly accessible or not. """ + set_logging(level=logging.DEBUG) + + try: + payload = json.loads(event["Records"][0]["Sns"]["Message"]) + account_id = payload['account_id'] + account_name = payload['account_name'] + # get the last region from the list to process + region = payload['regions'].pop() + # region = payload['region'] + except Exception: + logging.exception(f"Failed to parse event\n{event}") + return + + try: + config = Config() + + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(config.redshift_public_access.ddb_table_name) + + account = Account(id=account_id, + name=account_name, + region=region, + role_name=config.aws.role_name_identification) + if account.session is None: + return + + logging.debug(f"Checking for publicly accessible Redshift clusters in {account}") + + # existing open issues for account to check if resolved + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, RedshiftPublicAccessIssue) + # make dictionary for fast search by id + # and filter by current region + open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} + logging.debug(f"Redshift clusters in DDB:\n{open_issues.keys()}") + + checker = RedshiftClusterPublicAccessChecker(account=account) + if checker.check(): + for cluster in checker.clusters: + logging.debug(f"Checking {cluster.name}") + if cluster.is_public: + issue = RedshiftPublicAccessIssue(account_id, cluster.name) + issue.issue_details.tags = cluster.tags + issue.issue_details.region = cluster.account.region + if config.redshiftEncrypt.in_whitelist(account_id, cluster.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {cluster.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(cluster.name, None) + + logging.debug(f"Redshift Clusters in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated clusters + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + except Exception: + logging.exception(f"Failed to check Redshift clusters for '{account_id} ({account_name})'") + return + + # push SNS messages until the list with regions to check is empty + if len(payload['regions']) > 0: + try: + Sns.publish(payload["sns_arn"], payload) + except Exception: + logging.exception("Failed to chain public access issues checking") + + logging.debug(f"Checked Redshift Clusters for '{account_id} ({account_name})'") + + diff --git a/hammer/identification/lambdas/redshift-cluster-public-access-identification/initiate_to_desc_redshift_cluster_public_access.py b/hammer/identification/lambdas/redshift-cluster-public-access-identification/initiate_to_desc_redshift_cluster_public_access.py new file mode 100644 index 00000000..f6265a98 --- /dev/null +++ b/hammer/identification/lambdas/redshift-cluster-public-access-identification/initiate_to_desc_redshift_cluster_public_access.py @@ -0,0 +1,36 @@ +import os +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to initiate to find clusters have public access or not. """ + set_logging(level=logging.INFO) + logging.debug("Initiating Redshift Cluster public access checking") + + try: + sns_arn = os.environ["SNS_REDSHIFT_PUBLIC_ACCESS_ARN"] + config = Config() + + if not config.redshift_public_access.enabled: + logging.debug("Redshift cluster public access checking disabled") + return + + logging.debug("Iterating over each account to initiate Redshift cluster public access check") + for account_id, account_name in config.redshift_public_access.accounts.items(): + payload = {"account_id": account_id, + "account_name": account_name, + "regions": config.aws.regions, + "sns_arn": sns_arn + } + logging.debug(f"Initiating Redshift cluster public access checking for '{account_name}'") + Sns.publish(sns_arn, payload) + + except Exception: + logging.exception("Error occurred while initiation of Redshift cluster public access checking") + return + + logging.debug("Redshift clusters public access checking initiation done") diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py new file mode 100644 index 00000000..3d9ce144 --- /dev/null +++ b/hammer/library/aws/redshift.py @@ -0,0 +1,259 @@ +import json +import logging +import mimetypes +import pathlib + +from datetime import datetime, timezone +from io import BytesIO +from copy import deepcopy +from botocore.exceptions import ClientError +from library.utility import jsonDumps +from library.utility import timeit +from library.aws.security_groups import SecurityGroup +from collections import namedtuple + + +# structure which describes EC2 instance +RedshiftCluster_Details = namedtuple('RedshiftCluster_Details', [ + # cluster_id + 'id', + # subnet_group_id + 'subnet_group_name' + ]) + +class RedshiftClusterOperations(object): + + @classmethod + @timeit + def get_redshift_vpc_security_groups(cls, redshift_client, group_id): + """ Retrieve redshift clusters meta data with security group attached + + :param redshift_client: boto3 redshift client + :param group_id: security group id + + :return: list with redshift clusters details + """ + # describe rds instances with security group attached + redshift_clusters = [] + + # this will include Clusters + clusters_res = redshift_client.describe_clusters() + for cluster in clusters_res["Clusters"]: + active_security_groups = [sg["VpcSecurityGroupId"] for sg in cluster['VpcSecurityGroups'] if + sg["Status"] == "active"] + if group_id in active_security_groups: + redshift_clusters.append(RedshiftCluster_Details( + id=cluster["ClusterIdentifier"], + subnet_group_name=cluster["ClusterSubnetGroupName"] + )) + + return redshift_clusters + + @staticmethod + def set_cluster_encryption(redshift_client, cluster_id, kms_master_key_id): + """ + Sets the cluster encryption using Server side encryption. + + :param redshift_client: Redshift boto3 client + :param cluster_id: Redshift cluster name which to encrypt + :param kms_master_key_id: Redshift cluster encryption key. default value is none. + + :return: nothing + """ + + redshift_client.modify_cluster( + ClusterIdentifier=cluster_id, + Encryption=True, + KmsKeyId=kms_master_key_id + ) + + @staticmethod + def set_cluster_access(redshift_client, cluster_id, public_access): + """ + Sets the cluster encryption using Server side encryption. + + :param redshift_client: Redshift boto3 client + :param cluster_id: Redshift cluster name which to encrypt + :param public_access: Redshift cluster public access True or False. + + :return: nothing + """ + + redshift_client.modify_cluster( + ClusterIdentifier=cluster_id, + PubliclyAccessible=public_access + ) + + +class RedshiftCluster(object): + """ + Basic class for Redshift Cluster. + Encapsulates `Owner`/`Tags`. + """ + def __init__(self, account, name, tags, is_encrypted=None, is_public=None): + """ + :param account: `Account` instance where redshift cluster is present + + :param name: `Name` of cluster id + :param tags: tags if redshift cluster tags (as AWS returns) + :param is_encrypted: encrypted or not. + """ + self.account = account + self.name =name + self.tags = tags + self.is_encrypt = is_encrypted + self.is_public = is_public + + def encrypt_cluster(self, kms_key_id=None): + """ + Encrypt bucket with SSL encryption. + :return: nothing + """ + try: + RedshiftClusterOperations.set_cluster_encryption(self.account.client("redshift"), self.name, kms_key_id) + except Exception: + logging.exception(f"Failed to encrypt {self.name} cluster ") + return False + + return True + + def modify_cluster(self, public_access): + """ + Encrypt bucket with SSL encryption. + :return: nothing + """ + try: + RedshiftClusterOperations.set_cluster_access(self.account.client("redshift"), self.name, public_access) + except Exception: + logging.exception(f"Failed to encrypt {self.name} cluster ") + return False + + return True + + +class RedshiftClusterChecker(object): + """ + Basic class for checking Redshift cluster in account. + Encapsulates discovered Redshift cluster. + """ + def __init__(self, account): + """ + :param account: `Account` instance with Redshift cluster to check + """ + self.account = account + self.clusters = [] + + def get_cluster(self, name): + """ + :return: `Redshift cluster` by name + """ + for cluster in self.clusters: + if cluster.name == name: + return cluster + return None + + def check(self, clusters=None): + """ + Walk through Redshift clusters in the account and check them (encrypted or not). + Put all gathered clusters to `self.clusters`. + + :param clusters: list with Redshift cluster names to check, if it is not supplied - all clusters must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering dirung list, so get all clusters for account + response = self.account.client("redshift").describe_clusters() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to list cluster in {self.account}") + return False + + if "Clusters" in response: + for cluster_details in response["Clusters"]: + tags = {} + cluster_id = cluster_details["ClusterIdentifier"] + + if clusters is not None and cluster_id not in clusters: + continue + + is_encrypted = cluster_details["Encrypted"] + if "Tags" in cluster_details: + tags = cluster_details["Tags"] + + cluster = RedshiftCluster(account=self.account, + name=cluster_id, + tags=tags, + is_encrypted=is_encrypted) + self.clusters.append(cluster) + return True + + +class RedshiftClusterPublicAccessChecker(object): + + """ + Basic class for checking redshift clusters public access in account/region. + Encapsulates check settings and discovered clusters. + """ + def __init__(self, account): + """ + :param account: `Account` clusters to check + + """ + self.account = account + self.clusters = [] + + def get_cluster(self, name): + """ + :return: `Redshift cluster` by name + """ + for cluster in self.clusters: + if cluster.name == name: + return cluster + return None + + + def check(self, clusters=None): + """ + Walk through clusters in the account/region and check them. + Put all gathered clusters to `self.clusters`. + + :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering dirung list, so get all clusters for account + response = self.account.client("redshift").describe_clusters() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to list cluster in {self.account}") + return False + + if "Clusters" in response: + for cluster_details in response["Clusters"]: + tags = {} + cluster_id = cluster_details["ClusterIdentifier"] + + if clusters is not None and cluster_id not in clusters: + continue + + is_public = cluster_details["PubliclyAccessible"] + if "Tags" in cluster_details: + tags = cluster_details["Tags"] + + cluster = RedshiftCluster(account=self.account, + name=cluster_id, + tags=tags, + is_public=is_public) + self.clusters.append(cluster) + + return True \ No newline at end of file diff --git a/hammer/library/config.py b/hammer/library/config.py index 3e3ba1cc..b0a48935 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -63,6 +63,10 @@ def __init__(self, # RDS encryption issue config self.rdsEncrypt = ModuleConfig(self._config, "rds_encryption") + + + self.redshift_public_access = ModuleConfig(self._config, "redshift_public_access") + self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index 433b9b5a..8e251b51 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -223,6 +223,11 @@ def __init__(self, *args): super().__init__(*args) +class RedshiftPublicAccessIssue(Issue): + def __init__(self, *args): + super().__init__(*args) + + class Operations(object): @staticmethod def find(ddb_table, issue): diff --git a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py new file mode 100644 index 00000000..8dc2e514 --- /dev/null +++ b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py @@ -0,0 +1,150 @@ +""" +Class to remediate Redshift cluster public access issues. +""" +import sys +import logging +import argparse + + +from library.logger import set_logging, add_cw_logging +from library.config import Config +from library.jiraoperations import JiraReporting +from library.slack_utility import SlackNotification +from library.ddb_issues import Operations as IssueOperations +from library.ddb_issues import IssueStatus, RedshiftPublicAccessIssue +from library.aws.redshift import RedshiftClusterPublicAccessChecker +from library.aws.utility import Account +from library.utility import confirm +from library.utility import SingletonInstance, SingletonInstanceException + + +class CleanRedshiftPublicAccess: + """ Class to remediate Redshift cluster public access issues """ + def __init__(self, config): + self.config = config + + def clean_redshift_public_access(self, batch=False): + """ Class method to clean Redshift cluster which are violating aws best practices """ + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(self.config.redshift_public_access.ddb_table_name) + + retention_period = self.config.redshift_public_access.remediation_retention_period + + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.aws.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_open_issues(ddb_table, account_id, RedshiftPublicAccessIssue) + for issue in issues: + cluster_id = issue.issue_id + + in_whitelist = self.config.redshift_public_access.in_whitelist(account_id, cluster_id) + in_fixlist = True + + if in_whitelist: + logging.debug(f"Skipping {cluster_id} (in whitelist)") + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + labels=IssueStatus.Whitelisted + ) + continue + if not in_fixlist: + logging.debug(f"Skipping {cluster_id} (not in fixlist)") + continue + + if issue.timestamps.reported is None: + logging.debug(f"Skipping '{cluster_id}' (was not reported)") + continue + + if issue.timestamps.remediated is not None: + logging.debug(f"Skipping {cluster_id} (has been already remediated)") + continue + + updated_date = issue.timestamp_as_datetime + no_of_days_issue_created = (self.config.now - updated_date).days + + if no_of_days_issue_created >= retention_period: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + try: + if not batch and \ + not confirm(f"Do you want to remediate '{cluster_id}' Redshift cluster public access", False): + continue + + account = Account(id=account_id, + name=account_name, + role_name=self.config.aws.role_name_reporting) + if account.session is None: + continue + + checker = RedshiftClusterPublicAccessChecker(account=account) + checker.check(clusters=[cluster_id]) + cluster_details = checker.get_cluster(cluster_id) + + if cluster_id is None: + logging.debug(f"Redshift Cluster {cluster_details.name} was removed by user") + elif cluster_details.is_public: + logging.debug(f"Cluster {cluster_details.name} public access issue was remediated by user") + else: + logging.debug(f"Remediating '{cluster_details.name}' public access") + # kms_key_id = None + remediation_succeed = True + if cluster_details.modify_cluster(True): + comment = (f"Cluster '{cluster_details.name}' public access issue " + f"in '{account_name} / {account_id}' account " + f"was remediated by hammer") + else: + remediation_succeed = False + comment = (f"Failed to remediate cluster '{cluster_details.name}' public access issue " + f"in '{account_name} / {account_id}' account " + f"due to some limitations. Please, check manually") + + jira.remediate_issue( + ticket_id=issue.jira_details.ticket, + comment=comment, + reassign=remediation_succeed, + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_remediated(ddb_table, issue) + except Exception: + logging.exception(f"Error occurred while updating cluster '{cluster_details.name}' public access " + f"in '{account_name} / {account_id}'") + else: + logging.debug(f"Skipping '{cluster_details.name}' " + f"({retention_period - no_of_days_issue_created} days before remediation)") + + +if __name__ == "__main__": + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + parser = argparse.ArgumentParser() + parser.add_argument('--batch', action='store_true', help='Do not ask confirmation for remediation') + args = parser.parse_args() + + try: + class_object = CleanRedshiftPublicAccess(config) + class_object.clean_redshift_public_access(batch=args.batch) + except Exception: + logging.exception("Failed to clean Redshift cluster public access") diff --git a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py new file mode 100644 index 00000000..016111ca --- /dev/null +++ b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py @@ -0,0 +1,160 @@ +""" +Class to create redshift publicly accessible cluster issue tickets. +""" +import sys +import logging + + +from library.logger import set_logging, add_cw_logging +from library.aws.utility import Account +from library.config import Config +from library.jiraoperations import JiraReporting, JiraOperations +from library.slack_utility import SlackNotification +from library.ddb_issues import IssueStatus, RedshiftPublicAccessIssue +from library.ddb_issues import Operations as IssueOperations +from library.utility import SingletonInstance, SingletonInstanceException + + +class CreateRedshiftPublicAccessTickets(object): + """ Class to create redshift publicly accessible cluster issue issue tickets """ + def __init__(self, config): + self.config = config + + def create_tickets_redshift_public_access(self): + """ Class method to create jira tickets """ + table_name = self.config.redshift_public_access.ddb_table_name + + main_account = Account(region=self.config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(table_name) + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.aws.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, RedshiftPublicAccessIssue) + for issue in issues: + cluster_id = issue.issue_id + region = issue.issue_details.region + tags = issue.issue_details.tags + # issue has been already reported + if issue.timestamps.reported is not None: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} Redshift publicly accessible cluster '{cluster_id}' issue") + + comment = (f"Closing {issue.status.value} Redshift publicly accessible cluster '{cluster_id}' issue " + f"in '{account_name} / {account_id}' account, '{region}' region") + if issue.status == IssueStatus.Whitelisted: + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + labels=IssueStatus.Whitelisted + ) + jira.close_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_closed(ddb_table, issue) + # issue.status != IssueStatus.Closed (should be IssueStatus.Open) + elif issue.timestamps.updated > issue.timestamps.reported: + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {cluster_id}") + slack.report_issue( + msg=f"Redshift publicly accessible cluster '{cluster_id}' issue is changed " + f"in '{account_name} / {account_id}' account, '{region}' region" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_updated(ddb_table, issue) + else: + logging.debug(f"No changes for '{cluster_id}'") + # issue has not been reported yet + else: + logging.debug(f"Reporting Redshift publicly accessible cluster '{cluster_id}' issue") + + owner = tags.get("owner", None) + bu = tags.get("bu", None) + product = tags.get("product", None) + + issue_summary = (f"Redshift publicly accessible cluster '{cluster_id}'" + f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + issue_description = ( + f"The Redshift Cluster is publicly accessible.\n\n" + f"*Risk*: High\n\n" + f"*Account Name*: {account_name}\n" + f"*Account ID*: {account_id}\n" + f"*Region*: {region}\n" + f"*Redshift Cluster ID*: {cluster_id}\n") + + auto_remediation_date = (self.config.now + self.config.redshift_public_access.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + + issue_description += JiraOperations.build_tags_table(tags) + + issue_description += "\n" + issue_description += ( + f"*Recommendation*: " + f"Disable public access of the Redshift cluster.") + + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["redshift-public-access"], + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + + issue.jira_details.owner = owner + issue.jira_details.business_unit = bu + issue.jira_details.product = product + + slack.report_issue( + msg=f"Discovered {issue_summary}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + + IssueOperations.set_status_reported(ddb_table, issue) + + +if __name__ == '__main__': + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + try: + obj = CreateRedshiftPublicAccessTickets(config) + obj.create_tickets_redshift_public_access() + except Exception: + logging.exception("Failed to create redshift publicly accessible cluster tickets") From c11b128e3a0d7ffcd6e5ebd533d8281948421dc0 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 1 Mar 2019 18:52:04 +0530 Subject: [PATCH 004/193] Added redshift logging code changes. Added redshift logging code changes. --- deployment/build_packages.sh | 2 +- deployment/cf-templates/ddb.json | 32 + deployment/cf-templates/identification.json | 1160 +++++++++++++---- .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 7 +- .../describe_redshift_logging_issues.py | 85 ++ ...nitiate_to_desc_redshift_logging_issues.py | 36 + hammer/library/aws/redshift.py | 361 +++++ hammer/library/config.py | 1 + hammer/library/ddb_issues.py | 5 + .../create_redshift_logging_issue_tickets.py | 160 +++ 11 files changed, 1600 insertions(+), 253 deletions(-) create mode 100644 hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py create mode 100644 hammer/identification/lambdas/redshift-audit-logging-issues-identification/initiate_to_desc_redshift_logging_issues.py create mode 100644 hammer/library/aws/redshift.py create mode 100644 hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index f4219872..8ad0f9cc 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,7 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification" +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification redshift-audit-logging-issues-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 2ab4843c..f81207d9 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -428,6 +428,38 @@ }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-unencrypted" ] ]} } + }, + "DynamoDBRedshiftLogging": { + "Type": "AWS::DynamoDB::Table", + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-logging" ] ]} + } } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 431bf3f4..15b63c1f 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -26,7 +26,8 @@ "SourceIdentificationIAMUserInactiveKeys", "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", - "SourceIdentificationRDSSnapshots" + "SourceIdentificationRDSSnapshots", + "SourceIdentificationRedshiftLogging" ] }, { @@ -88,6 +89,9 @@ }, "SourceIdentificationRDSSnapshots": { "default": "Relative path to public RDS snapshots lambda sources" + }, + "SourceIdentificationRedshiftLogging":{ + "default": "Relative path to disabled logging Redshift Cluster sources" } } } @@ -176,6 +180,10 @@ "SourceIdentificationRDSEncryption": { "Type": "String", "Default": "rds-unencrypted-instance-identification.zip" + }, + "SourceIdentificationRedshiftLogging": { + "Type": "String", + "Default": "redshift-audit-logging-issues-identification.zip" } }, "Conditions": { @@ -230,6 +238,9 @@ "IdentificationMetricRDSEncryptionError": { "value": "RDSEncryptionError" }, + "IdentificationMetricRedshiftLoggingError": { + "value": "RedshiftLoggingError" + }, "SNSDisplayNameSecurityGroups": { "value": "describe-security-groups-sns" }, @@ -302,6 +313,12 @@ "SNSTopicNameRDSEncryption": { "value": "describe-rds-encryption-lambda" }, + "SNSDisplayNameRedshiftLogging": { + "value": "describe-redshift-logging-sns" + }, + "SNSTopicNameRedshiftLogging": { + "value": "describe-redshift-logging-lambda" + }, "LogsForwarderLambdaFunctionName": { "value": "logs-forwarder" }, @@ -379,6 +396,12 @@ }, "IdentifyRDSEncryptionLambdaFunctionName": { "value": "describe-rds-encryption" + }, + "InitiateRedshiftLoggingLambdaFunctionName": { + "value": "initiate-redshift-logging" + }, + "IdentifyRedshiftLoggingLambdaFunctionName": { + "value": "describe-redshift-logging" } } }, @@ -1840,255 +1863,809 @@ } }, - "EventBackupDDB": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaBackupDDB"], - "Properties": { - "Description": "Hammer ScheduledRule for DDB tables backup", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, - "ScheduleExpression": "rate(1 day)", - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, - "Id": "LambdaBackupDDB" - } - ] - } - }, - "EventInitiateEvaluationS3IAM": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", - "LambdaInitiateIAMUserInactiveKeysEvaluation", - "LambdaInitiateS3EncryptionEvaluation", - "LambdaInitiateS3ACLEvaluation", - "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate S3 and IAM evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3IAM"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserKeysRotationEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserKeysRotationEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserInactiveKeysEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3EncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3EncryptionEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3ACLEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3ACLEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3PolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3PolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationCloudTrails": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate CloudTrails evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateCloudTrailsEvaluation", "Arn"] }, - "Id": "LambdaInitiateCloudTrailsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSVolumes": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS volumes evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSVolumesEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSVolumesEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate RDS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSG": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSGEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate Security Groups evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSGEvaluation", "Arn"] }, - "Id": "LambdaInitiateSGEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSQSPublicPolicy": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate SQS queue evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSQSPublicPolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateSQSPublicPolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSEncryption": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate rds instance encryption evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSEncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSEncryptionEvaluation" - } - ] - } - }, - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaLogsForwarder"], - "Properties": { - "FunctionName": { "Ref": "LambdaLogsForwarder" }, - "Action": "lambda:InvokeFunction", - "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, - "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} - } - }, - "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], - "Properties": { - "FunctionName": { "Ref": "LambdaBackupDDB" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSGEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSGEvaluation", "EventInitiateEvaluationSG"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSGEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateCloudTrailsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation", "EventInitiateEvaluationCloudTrails"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3ACLEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3ACLEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3ACLEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3PolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3PolicyEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3PolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserKeysRotationEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { - "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] - } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserInactiveKeysEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSVolumesEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + "LambdaInitiateRedshiftLoggingEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftLogging", "LogGroupLambdaInitiateRedshiftLoggingEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationRedshiftLogging" } + }, + "Environment": { + "Variables": { + "SNS_REDSHIFT_LOGGING_ARN": { "Ref": "SNSNotifyLambdaEvaluateRedshiftLogging" } + } + }, + "Description": "Lambda function for initiate to identify disabled audit logging Redshift clusters.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRedshiftLoggingLambdaFunctionName", "value"] } ] + ]}, + "Handler": "initiate_to_desc_redshift_logging_issues.lambda_handler", + "MemorySize": 128, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateRedshiftLoggingEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "InitiateRedshiftLoggingLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateRedshiftLoggingEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaInitiateRedshiftLoggingEvaluation"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRedshiftLoggingEvaluation" } + } + }, + "LambdaEvaluateRedshiftLogging": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluateRedshiftLogging"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationRedshiftLogging" } + }, + "Description": "Lambda function to describe disabled audit logging Redshift clusters.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftLoggingLambdaFunctionName", "value"] } ] + ]}, + "Handler": "describe_redshift_logging_issues.lambda_handler", + "MemorySize": 256, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluateRedshiftLogging": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "IdentifyRedshiftLoggingLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaEvaluateRedshiftLogging": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaEvaluateRedshiftLogging"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRedshiftLogging" } + } + }, + + "EventBackupDDB": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaBackupDDB"], + "Properties": { + "Description": "Hammer ScheduledRule for DDB tables backup", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, + "ScheduleExpression": "rate(1 day)", + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, + "Id": "LambdaBackupDDB" + } + ] + } + }, + "EventInitiateEvaluationS3IAM": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", + "LambdaInitiateIAMUserInactiveKeysEvaluation", + "LambdaInitiateS3EncryptionEvaluation", + "LambdaInitiateS3ACLEvaluation", + "LambdaInitiateS3PolicyEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate S3 and IAM evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3IAM"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserKeysRotationEvaluation", "Arn"] }, + "Id": "LambdaInitiateIAMUserKeysRotationEvaluation" + }, + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "Arn"] }, + "Id": "LambdaInitiateIAMUserInactiveKeysEvaluation" + }, + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateS3EncryptionEvaluation", "Arn"] }, + "Id": "LambdaInitiateS3EncryptionEvaluation" + }, + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateS3ACLEvaluation", "Arn"] }, + "Id": "LambdaInitiateS3ACLEvaluation" + }, + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateS3PolicyEvaluation", "Arn"] }, + "Id": "LambdaInitiateS3PolicyEvaluation" + } + ] + } + }, + "EventInitiateEvaluationCloudTrails": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateCloudTrailsEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate CloudTrails evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateCloudTrailsEvaluation", "Arn"] }, + "Id": "LambdaInitiateCloudTrailsEvaluation" + } + ] + } + }, + "EventInitiateEvaluationEBSVolumes": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateEBSVolumesEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate EBS volumes evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSVolumesEvaluation", "Arn"] }, + "Id": "LambdaInitiateEBSVolumesEvaluation" + } + ] + } + }, + "EventInitiateEvaluationEBSSnapshots": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate EBS snapshots evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSSnapshotsEvaluation", "Arn"] }, + "Id": "LambdaInitiateEBSSnapshotsEvaluation" + } + ] + } + }, + "EventInitiateEvaluationRDSSnapshots": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate RDS snapshots evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSSnapshotsEvaluation", "Arn"] }, + "Id": "LambdaInitiateRDSSnapshotsEvaluation" + } + ] + } + }, + "EventInitiateEvaluationSG": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateSGEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate Security Groups evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateSGEvaluation", "Arn"] }, + "Id": "LambdaInitiateSGEvaluation" + } + ] + } + }, + "EventInitiateEvaluationSQSPublicPolicy": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate SQS queue evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateSQSPublicPolicyEvaluation", "Arn"] }, + "Id": "LambdaInitiateSQSPublicPolicyEvaluation" + } + ] + } + }, + "EventInitiateEvaluationRDSEncryption": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate rds instance encryption evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSEncryptionEvaluation", "Arn"] }, + "Id": "LambdaInitiateRDSEncryptionEvaluation" + } + ] + } + }, + + "EventInitiateEvaluationRedshiftLogging": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateRedshiftLoggingEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate audit logging issue Redshift cluster evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRedshiftLogging"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateRedshiftLoggingEvaluation", "Arn"] }, + "Id": "LambdaInitiateRedshiftLoggingEvaluation" + } + ] + } + }, + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaLogsForwarder"], + "Properties": { + "FunctionName": { "Ref": "LambdaLogsForwarder" }, + "Action": "lambda:InvokeFunction", + "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, + "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} + } + }, + "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], + "Properties": { + "FunctionName": { "Ref": "LambdaBackupDDB" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateSGEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateSGEvaluation", "EventInitiateEvaluationSG"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateSGEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSG", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateCloudTrailsEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateCloudTrailsEvaluation", "EventInitiateEvaluationCloudTrails"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationCloudTrails", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateS3ACLEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateS3ACLEvaluation", "EventInitiateEvaluationS3IAM"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateS3ACLEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateS3PolicyEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateS3PolicyEvaluation", "EventInitiateEvaluationS3IAM"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateS3PolicyEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateIAMUserKeysRotationEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", "EventInitiateEvaluationS3IAM"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { + "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] + } + } + }, + "PermissionToInvokeLambdaInitiateIAMUserInactiveKeysEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "EventInitiateEvaluationS3IAM"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateEBSVolumesEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", "DependsOn": ["LambdaInitiateEBSVolumesEvaluation", "EventInitiateEvaluationEBSVolumes"], "Properties": { "FunctionName": { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, @@ -2148,6 +2725,16 @@ } }, + "PermissionToInvokeLambdaInitiateRedshiftLoggingEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateRedshiftLoggingEvaluation", "EventInitiateEvaluationRedshiftLogging"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRedshiftLogging", "Arn"] } + } + }, "SNSNotifyLambdaEvaluateSG": { "Type": "AWS::SNS::Topic", "DependsOn": ["LambdaEvaluateSG"], @@ -2364,7 +2951,24 @@ }] } }, - + "SNSNotifyLambdaEvaluateRedshiftLogging": { + "Type": "AWS::SNS::Topic", + "DependsOn": "LambdaEvaluateRedshiftLogging", + "Properties": { + "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRedshiftLogging", "value"] } ] + ]}, + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftLogging", "value"] } ] + ]}, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluateRedshiftLogging", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, "PermissionToInvokeLambdaEvaluateSgSNS": { "Type": "AWS::Lambda::Permission", "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], @@ -2486,6 +3090,16 @@ } }, + "PermissionToInvokeLambdaEvaluateRedshiftLoggingSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftLogging", "LambdaEvaluateRedshiftLogging"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRedshiftLogging" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRedshiftLogging", "Arn"] } + } + }, "SNSIdentificationErrors": { "Type": "AWS::SNS::Topic", "Properties": { @@ -3088,6 +3702,52 @@ "Threshold": 0, "TreatMissingData": "notBreaching" } + }, + "AlarmErrorsLambdaInitiateRedshiftLoggingEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRedshiftLoggingEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaRedshiftLoggingEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRedshiftLogging"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRedshiftLogging" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluateRedshiftLogging" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } } }, "Outputs": { diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 654aaffa..5f45e14d 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -14,7 +14,8 @@ resource "aws_cloudformation_stack" "identification" { "aws_s3_bucket_object.ebs-public-snapshots-identification", "aws_s3_bucket_object.sqs-public-policy-identification", "aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification", - "aws_s3_bucket_object.rds-unencrypted-instance-identification" + "aws_s3_bucket_object.rds-unencrypted-instance-identification", + "aws_s3_bucket_object.redshift-audit-logging-issues-identification" ] tags = "${var.tags}" @@ -40,6 +41,7 @@ resource "aws_cloudformation_stack" "identification" { SourceIdentificationSQSPublicPolicy = "${aws_s3_bucket_object.sqs-public-policy-identification.id}" SourceIdentificationS3Encryption = "${aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification.id}" SourceIdentificationRDSEncryption = "${aws_s3_bucket_object.rds-unencrypted-instance-identification.id}" + SourceIdentificationRedshiftLogging = "${aws_s3_bucket_object.redshift-audit-logging-issues-identification.id}" } template_url = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-cfn.id}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index d0791d6d..b78097a9 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -86,4 +86,9 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("rds-unencrypted-instance-identification-%s.zip", "${md5(file("${path.module}/../../../packages/rds-unencrypted-instance-identification.zip"))}")}" source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" -} \ No newline at end of file +} +resource "aws_s3_bucket_object" "redshift-audit-logging-issues-identification" { + bucket = "${var.s3bucket}" + key = "lambda/${format("redshift-audit-logging-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/redshift-audit-logging-issues-identification.zip"))}")}" + source = "${path.module}/../../../packages/redshift-audit-logging-issues-identification.zip" +} diff --git a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py new file mode 100644 index 00000000..ea2180c9 --- /dev/null +++ b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py @@ -0,0 +1,85 @@ +import json +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.redshift import RedshiftLoggingChecker +from library.aws.utility import Account +from library.ddb_issues import IssueStatus, RedshiftLoggingIssue +from library.ddb_issues import Operations as IssueOperations +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to evaluate Redshift logging enabled or not. """ + set_logging(level=logging.DEBUG) + + try: + payload = json.loads(event["Records"][0]["Sns"]["Message"]) + account_id = payload['account_id'] + account_name = payload['account_name'] + # get the last region from the list to process + region = payload['regions'].pop() + # region = payload['region'] + except Exception: + logging.exception(f"Failed to parse event\n{event}") + return + + try: + config = Config() + + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(config.redshift_logging.ddb_table_name) + + account = Account(id=account_id, + name=account_name, + region=region, + role_name=config.aws.role_name_identification) + if account.session is None: + return + + logging.debug(f"Checking logging enabled or not for Redshift clusters in {account}") + + # existing open issues for account to check if resolved + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, RedshiftLoggingIssue) + # make dictionary for fast search by id + # and filter by current region + open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} + logging.debug(f"Redshift clusters in DDB:\n{open_issues.keys()}") + + checker = RedshiftLoggingChecker(account=account) + if checker.check(): + for cluster in checker.clusters: + logging.debug(f"Checking {cluster.name}") + if not cluster.is_logging: + issue = RedshiftLoggingIssue(account_id, cluster.name) + issue.issue_details.tags = cluster.tags + issue.issue_details.region = cluster.account.region + if config.redshift_logging.in_whitelist(account_id, cluster.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {cluster.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(cluster.name, None) + + logging.debug(f"Redshift Clusters in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated clusters + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + except Exception: + logging.exception(f"Failed to check Redshift clusters for '{account_id} ({account_name})'") + return + + # push SNS messages until the list with regions to check is empty + if len(payload['regions']) > 0: + try: + Sns.publish(payload["sns_arn"], payload) + except Exception: + logging.exception("Failed to chain audit logging checking") + + logging.debug(f"Checked Redshift Clusters for '{account_id} ({account_name})'") + + diff --git a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/initiate_to_desc_redshift_logging_issues.py b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/initiate_to_desc_redshift_logging_issues.py new file mode 100644 index 00000000..29358f2d --- /dev/null +++ b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/initiate_to_desc_redshift_logging_issues.py @@ -0,0 +1,36 @@ +import os +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to initiate to find clusters logging enabled or not. """ + set_logging(level=logging.INFO) + logging.debug("Initiating Redshift Cluster logging checking") + + try: + sns_arn = os.environ["SNS_REDSHIFT_LOGGING_ARN"] + config = Config() + + if not config.redshift_logging.enabled: + logging.debug("Redshift cluster logging checking disabled") + return + + logging.debug("Iterating over each account to initiate Redshift cluster logging check") + for account_id, account_name in config.redshift_logging.accounts.items(): + payload = {"account_id": account_id, + "account_name": account_name, + "regions": config.aws.regions, + "sns_arn": sns_arn + } + logging.debug(f"Initiating Redshift cluster logging checking for '{account_name}'") + Sns.publish(sns_arn, payload) + + except Exception: + logging.exception("Error occurred while initiation of Redshift cluster logging checking") + return + + logging.debug("Redshift clusters logging checking initiation done") diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py new file mode 100644 index 00000000..6d068096 --- /dev/null +++ b/hammer/library/aws/redshift.py @@ -0,0 +1,361 @@ +import json +import logging +import mimetypes +import pathlib + +from datetime import datetime, timezone +from io import BytesIO +from copy import deepcopy +from botocore.exceptions import ClientError +from library.utility import jsonDumps +from library.utility import timeit +from library.aws.security_groups import SecurityGroup +from collections import namedtuple +from library.aws.utility import convert_tags + + +# structure which describes EC2 instance +RedshiftCluster_Details = namedtuple('RedshiftCluster_Details', [ + # cluster_id + 'id', + # subnet_group_id + 'subnet_group_name' + ]) + +class RedshiftClusterOperations(object): + + @classmethod + @timeit + def get_redshift_vpc_security_groups(cls, redshift_client, group_id): + """ Retrieve redshift clusters meta data with security group attached + + :param redshift_client: boto3 redshift client + :param group_id: security group id + + :return: list with redshift clusters details + """ + # describe rds instances with security group attached + redshift_clusters = [] + + # this will include Clusters + clusters_res = redshift_client.describe_clusters() + for cluster in clusters_res["Clusters"]: + active_security_groups = [sg["VpcSecurityGroupId"] for sg in cluster['VpcSecurityGroups'] if + sg["Status"] == "active"] + if group_id in active_security_groups: + redshift_clusters.append(RedshiftCluster_Details( + id=cluster["ClusterIdentifier"], + subnet_group_name=cluster["ClusterSubnetGroupName"] + )) + + return redshift_clusters + + @staticmethod + def set_cluster_encryption(redshift_client, cluster_id, kms_master_key_id): + """ + Sets the cluster encryption using Server side encryption. + + :param redshift_client: Redshift boto3 client + :param cluster_id: Redshift cluster name which to encrypt + :param kms_master_key_id: Redshift cluster encryption key. default value is none. + + :return: nothing + """ + + redshift_client.modify_cluster( + ClusterIdentifier=cluster_id, + Encrypted=True + ) + + @staticmethod + def set_cluster_access(redshift_client, cluster_id, public_access): + """ + Sets the cluster access as private. + + :param redshift_client: Redshift boto3 client + :param cluster_id: Redshift cluster name which to make as private + :param public_access: Redshift cluster public access True or False. + + :return: nothing + """ + + redshift_client.modify_cluster( + ClusterIdentifier=cluster_id, + PubliclyAccessible=public_access + ) + + @staticmethod + def enable_logging(redshift_client, cluster_id, s3_bucket): + """ + Enable cluster audit logging. + + :param redshift_client: Redshift boto3 client + :param cluster_id: Redshift cluster name which to make as private + :param s3_bucket: S3 bucket to store audit logs. + + :return: nothing + """ + + redshift_client.enable_logging( + ClusterIdentifier=cluster_id, + BucketName=s3_bucket + ) + + +class RedshiftCluster(object): + """ + Basic class for Redshift Cluster. + Encapsulates `Owner`/`Tags`. + """ + def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_logging=None): + """ + :param account: `Account` instance where redshift cluster is present + + :param name: `Name` of cluster id + :param tags: tags if redshift cluster tags (as AWS returns) + :param is_encrypted: encrypted or not. + """ + self.account = account + self.name =name + self.tags = convert_tags(tags) + self.is_encrypt = is_encrypted + self.is_public = is_public + self.is_logging = is_logging + + def encrypt_cluster(self, kms_key_id=None): + """ + Encrypt bucket with SSL encryption. + :return: nothing + """ + try: + RedshiftClusterOperations.set_cluster_encryption(self.account.client("redshift"), self.name, kms_key_id) + except Exception: + logging.exception(f"Failed to encrypt {self.name} cluster ") + return False + + return True + + def modify_cluster(self, public_access): + """ + Modify cluster as private. + :return: nothing + """ + try: + RedshiftClusterOperations.set_cluster_access(self.account.client("redshift"), self.name, public_access) + except Exception: + logging.exception(f"Failed to modify {self.name} cluster ") + return False + + return True + + def enable_cluster_logging(self, s3_bucket): + """ + Enable audit logging for cluster. + + @:param s3_bucket: s3 bucket to store audit logs. + :return: nothing + """ + try: + RedshiftClusterOperations.enable_logging(self.account.client("redshift"), self.name, s3_bucket) + except Exception: + logging.exception(f"Failed to enable logging for {self.name} cluster ") + return False + + return True + + +class RedshiftEncryptionChecker(object): + """ + Basic class for checking Redshift cluster in account. + Encapsulates discovered Redshift cluster. + """ + def __init__(self, account): + """ + :param account: `Account` instance with Redshift cluster to check + """ + self.account = account + self.clusters = [] + + def get_cluster(self, name): + """ + :return: `Redshift cluster` by name + """ + for cluster in self.clusters: + if cluster.name == name: + return cluster + return None + + def check(self, clusters=None): + """ + Walk through Redshift clusters in the account and check them (encrypted or not). + Put all gathered clusters to `self.clusters`. + + :param clusters: list with Redshift cluster names to check, if it is not supplied - all clusters must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering dirung list, so get all clusters for account + response = self.account.client("redshift").describe_clusters() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to list cluster in {self.account}") + return False + + if "Clusters" in response: + for cluster_details in response["Clusters"]: + tags = {} + cluster_id = cluster_details["ClusterIdentifier"] + + if clusters is not None and cluster_id not in clusters: + continue + + is_encrypted = cluster_details["Encrypted"] + if "Tags" in cluster_details: + tags = cluster_details["Tags"] + + cluster = RedshiftCluster(account=self.account, + name=cluster_id, + tags=tags, + is_encrypted=is_encrypted) + self.clusters.append(cluster) + return True + + +class RedshiftClusterPublicAccessChecker(object): + + """ + Basic class for checking redshift clusters public access in account/region. + Encapsulates check settings and discovered clusters. + """ + def __init__(self, account): + """ + :param account: `Account` clusters to check + + """ + self.account = account + self.clusters = [] + + def get_cluster(self, name): + """ + :return: `Redshift cluster` by name + """ + for cluster in self.clusters: + if cluster.name == name: + return cluster + return None + + + def check(self, clusters=None): + """ + Walk through clusters in the account/region and check them. + Put all gathered clusters to `self.clusters`. + + :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering dirung list, so get all clusters for account + response = self.account.client("redshift").describe_clusters() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to list cluster in {self.account}") + return False + + if "Clusters" in response: + for cluster_details in response["Clusters"]: + tags = {} + cluster_id = cluster_details["ClusterIdentifier"] + + if clusters is not None and cluster_id not in clusters: + continue + + is_public = cluster_details["PubliclyAccessible"] + if "Tags" in cluster_details: + tags = cluster_details["Tags"] + + cluster = RedshiftCluster(account=self.account, + name=cluster_id, + tags=tags, + is_public=is_public) + self.clusters.append(cluster) + + return True + + +class RedshiftLoggingChecker(object): + """ + Basic class for checking redshift cluster's logging enabled or not in account/region. + Encapsulates check settings and discovered clusters. + """ + + def __init__(self, account): + """ + :param account: `Account` clusters to check + + """ + self.account = account + self.clusters = [] + + def get_cluster(self, name): + """ + :return: `Redshift cluster` by name + """ + for cluster in self.clusters: + if cluster.name == name: + return cluster + return None + + def check(self, clusters=None): + """ + Walk through clusters in the account/region and check them. + Put all gathered clusters to `self.clusters`. + + :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering dirung list, so get all clusters for account + response = self.account.client("redshift").describe_clusters() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to list cluster in {self.account}") + return False + + if "Clusters" in response: + for cluster_details in response["Clusters"]: + logging_enabled = True + tags = {} + cluster_id = cluster_details["ClusterIdentifier"] + + if clusters is not None and cluster_id not in clusters: + continue + + logging_details = self.account.client("redshift").describe_logging_status(ClusterIdentifier=cluster_id) + if "LoggingEnabled" in logging_details: + logging_enabled = logging_details["LoggingEnabled"] + + if "Tags" in cluster_details: + tags = cluster_details["Tags"] + + cluster = RedshiftCluster(account=self.account, + name=cluster_id, + tags=tags, + is_logging=logging_enabled) + self.clusters.append(cluster) + + return True \ No newline at end of file diff --git a/hammer/library/config.py b/hammer/library/config.py index 3e3ba1cc..0e5a8334 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -63,6 +63,7 @@ def __init__(self, # RDS encryption issue config self.rdsEncrypt = ModuleConfig(self._config, "rds_encryption") + self.redshift_logging = ModuleConfig(self._config, "redshift_logging") self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index 433b9b5a..b2a6e2fe 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -223,6 +223,11 @@ def __init__(self, *args): super().__init__(*args) +class RedshiftLoggingIssue(Issue): + def __init__(self, *args): + super().__init__(*args) + + class Operations(object): @staticmethod def find(ddb_table, issue): diff --git a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py new file mode 100644 index 00000000..aaa53e64 --- /dev/null +++ b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py @@ -0,0 +1,160 @@ +""" +Class to create redshift cluster logging issue tickets. +""" +import sys +import logging + + +from library.logger import set_logging, add_cw_logging +from library.aws.utility import Account +from library.config import Config +from library.jiraoperations import JiraReporting, JiraOperations +from library.slack_utility import SlackNotification +from library.ddb_issues import IssueStatus, RedshiftLoggingIssue +from library.ddb_issues import Operations as IssueOperations +from library.utility import SingletonInstance, SingletonInstanceException + + +class CreateRedshiftLoggingIssueTickets(object): + """ Class to create redshift cluster logging issue tickets """ + def __init__(self, config): + self.config = config + + def create_tickets_redshift_logging(self): + """ Class method to create jira tickets """ + table_name = self.config.redshift_logging.ddb_table_name + + main_account = Account(region=self.config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(table_name) + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.aws.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, RedshiftLoggingIssue) + for issue in issues: + cluster_id = issue.issue_id + region = issue.issue_details.region + tags = issue.issue_details.tags + # issue has been already reported + if issue.timestamps.reported is not None: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} Redshift logging enabled '{cluster_id}' issue") + + comment = (f"Closing {issue.status.value} Redshift cluster logging enabled '{cluster_id}' issue " + f"in '{account_name} / {account_id}' account, '{region}' region") + if issue.status == IssueStatus.Whitelisted: + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + labels=IssueStatus.Whitelisted + ) + jira.close_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_closed(ddb_table, issue) + # issue.status != IssueStatus.Closed (should be IssueStatus.Open) + elif issue.timestamps.updated > issue.timestamps.reported: + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {cluster_id}") + slack.report_issue( + msg=f"Redshift cluster logging '{cluster_id}' issue is changed " + f"in '{account_name} / {account_id}' account, '{region}' region" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_updated(ddb_table, issue) + else: + logging.debug(f"No changes for '{cluster_id}'") + # issue has not been reported yet + else: + logging.debug(f"Reporting Redshift cluster logging '{cluster_id}' issue") + + owner = tags.get("owner", None) + bu = tags.get("bu", None) + product = tags.get("product", None) + + issue_summary = (f"Redshift logging is not enabled for '{cluster_id}'" + f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + issue_description = ( + f"The Redshift Cluster audit logging is not enabled.\n\n" + f"*Risk*: High\n\n" + f"*Account Name*: {account_name}\n" + f"*Account ID*: {account_id}\n" + f"*Region*: {region}\n" + f"*Redshift Cluster ID*: {cluster_id}\n") + + auto_remediation_date = (self.config.now + self.config.redshift_logging.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + + issue_description += JiraOperations.build_tags_table(tags) + + issue_description += "\n" + issue_description += ( + f"*Recommendation*: " + f"Enable logging for Redshift cluster.") + + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["redshift-logging"], + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + + issue.jira_details.owner = owner + issue.jira_details.business_unit = bu + issue.jira_details.product = product + + slack.report_issue( + msg=f"Discovered {issue_summary}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + + IssueOperations.set_status_reported(ddb_table, issue) + + +if __name__ == '__main__': + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + try: + obj = CreateRedshiftLoggingIssueTickets(config) + obj.create_tickets_redshift_logging() + except Exception: + logging.exception("Failed to create redshift cluster logging tickets") From a4a8f44d887f145424d2e40b91a67a0ca61fef48 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 1 Mar 2019 18:59:06 +0530 Subject: [PATCH 005/193] Updated with redshift public access changes. Updated with redshift public access changes. --- ...describe_redshift_cluster_public_access.py | 2 +- hammer/library/aws/redshift.py | 120 ++++++++++++++++-- .../clean_redshift_public_access.py | 15 ++- 3 files changed, 120 insertions(+), 17 deletions(-) diff --git a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py index 70f741dc..a02a7d22 100644 --- a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py +++ b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py @@ -55,7 +55,7 @@ def lambda_handler(event, context): issue = RedshiftPublicAccessIssue(account_id, cluster.name) issue.issue_details.tags = cluster.tags issue.issue_details.region = cluster.account.region - if config.redshiftEncrypt.in_whitelist(account_id, cluster.name): + if config.redshift_public_access.in_whitelist(account_id, cluster.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 3d9ce144..6d068096 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -11,6 +11,7 @@ from library.utility import timeit from library.aws.security_groups import SecurityGroup from collections import namedtuple +from library.aws.utility import convert_tags # structure which describes EC2 instance @@ -63,17 +64,16 @@ def set_cluster_encryption(redshift_client, cluster_id, kms_master_key_id): redshift_client.modify_cluster( ClusterIdentifier=cluster_id, - Encryption=True, - KmsKeyId=kms_master_key_id + Encrypted=True ) @staticmethod def set_cluster_access(redshift_client, cluster_id, public_access): """ - Sets the cluster encryption using Server side encryption. + Sets the cluster access as private. :param redshift_client: Redshift boto3 client - :param cluster_id: Redshift cluster name which to encrypt + :param cluster_id: Redshift cluster name which to make as private :param public_access: Redshift cluster public access True or False. :return: nothing @@ -84,13 +84,30 @@ def set_cluster_access(redshift_client, cluster_id, public_access): PubliclyAccessible=public_access ) + @staticmethod + def enable_logging(redshift_client, cluster_id, s3_bucket): + """ + Enable cluster audit logging. + + :param redshift_client: Redshift boto3 client + :param cluster_id: Redshift cluster name which to make as private + :param s3_bucket: S3 bucket to store audit logs. + + :return: nothing + """ + + redshift_client.enable_logging( + ClusterIdentifier=cluster_id, + BucketName=s3_bucket + ) + class RedshiftCluster(object): """ Basic class for Redshift Cluster. Encapsulates `Owner`/`Tags`. """ - def __init__(self, account, name, tags, is_encrypted=None, is_public=None): + def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_logging=None): """ :param account: `Account` instance where redshift cluster is present @@ -100,9 +117,10 @@ def __init__(self, account, name, tags, is_encrypted=None, is_public=None): """ self.account = account self.name =name - self.tags = tags + self.tags = convert_tags(tags) self.is_encrypt = is_encrypted self.is_public = is_public + self.is_logging = is_logging def encrypt_cluster(self, kms_key_id=None): """ @@ -119,19 +137,34 @@ def encrypt_cluster(self, kms_key_id=None): def modify_cluster(self, public_access): """ - Encrypt bucket with SSL encryption. + Modify cluster as private. :return: nothing """ try: RedshiftClusterOperations.set_cluster_access(self.account.client("redshift"), self.name, public_access) except Exception: - logging.exception(f"Failed to encrypt {self.name} cluster ") + logging.exception(f"Failed to modify {self.name} cluster ") + return False + + return True + + def enable_cluster_logging(self, s3_bucket): + """ + Enable audit logging for cluster. + + @:param s3_bucket: s3 bucket to store audit logs. + :return: nothing + """ + try: + RedshiftClusterOperations.enable_logging(self.account.client("redshift"), self.name, s3_bucket) + except Exception: + logging.exception(f"Failed to enable logging for {self.name} cluster ") return False return True -class RedshiftClusterChecker(object): +class RedshiftEncryptionChecker(object): """ Basic class for checking Redshift cluster in account. Encapsulates discovered Redshift cluster. @@ -256,4 +289,73 @@ def check(self, clusters=None): is_public=is_public) self.clusters.append(cluster) + return True + + +class RedshiftLoggingChecker(object): + """ + Basic class for checking redshift cluster's logging enabled or not in account/region. + Encapsulates check settings and discovered clusters. + """ + + def __init__(self, account): + """ + :param account: `Account` clusters to check + + """ + self.account = account + self.clusters = [] + + def get_cluster(self, name): + """ + :return: `Redshift cluster` by name + """ + for cluster in self.clusters: + if cluster.name == name: + return cluster + return None + + def check(self, clusters=None): + """ + Walk through clusters in the account/region and check them. + Put all gathered clusters to `self.clusters`. + + :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering dirung list, so get all clusters for account + response = self.account.client("redshift").describe_clusters() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to list cluster in {self.account}") + return False + + if "Clusters" in response: + for cluster_details in response["Clusters"]: + logging_enabled = True + tags = {} + cluster_id = cluster_details["ClusterIdentifier"] + + if clusters is not None and cluster_id not in clusters: + continue + + logging_details = self.account.client("redshift").describe_logging_status(ClusterIdentifier=cluster_id) + if "LoggingEnabled" in logging_details: + logging_enabled = logging_details["LoggingEnabled"] + + if "Tags" in cluster_details: + tags = cluster_details["Tags"] + + cluster = RedshiftCluster(account=self.account, + name=cluster_id, + tags=tags, + is_logging=logging_enabled) + self.clusters.append(cluster) + return True \ No newline at end of file diff --git a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py index 8dc2e514..4029a8fc 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py @@ -77,6 +77,7 @@ def clean_redshift_public_access(self, batch=False): account = Account(id=account_id, name=account_name, + region= issue.issue_details.region, role_name=self.config.aws.role_name_reporting) if account.session is None: continue @@ -87,20 +88,20 @@ def clean_redshift_public_access(self, batch=False): if cluster_id is None: logging.debug(f"Redshift Cluster {cluster_details.name} was removed by user") - elif cluster_details.is_public: + elif not cluster_details.is_public: logging.debug(f"Cluster {cluster_details.name} public access issue was remediated by user") else: logging.debug(f"Remediating '{cluster_details.name}' public access") - # kms_key_id = None + remediation_succeed = True - if cluster_details.modify_cluster(True): + if cluster_details.modify_cluster(False): comment = (f"Cluster '{cluster_details.name}' public access issue " - f"in '{account_name} / {account_id}' account " + f"in '{account_name} / {account_id}' account, '{issue.issue_details.region}' region " f"was remediated by hammer") else: remediation_succeed = False comment = (f"Failed to remediate cluster '{cluster_details.name}' public access issue " - f"in '{account_name} / {account_id}' account " + f"in '{account_name} / {account_id}' account, '{issue.issue_details.region}' region " f"due to some limitations. Please, check manually") jira.remediate_issue( @@ -117,10 +118,10 @@ def clean_redshift_public_access(self, batch=False): ) IssueOperations.set_status_remediated(ddb_table, issue) except Exception: - logging.exception(f"Error occurred while updating cluster '{cluster_details.name}' public access " + logging.exception(f"Error occurred while updating cluster '{cluster_id}' public access " f"in '{account_name} / {account_id}'") else: - logging.debug(f"Skipping '{cluster_details.name}' " + logging.debug(f"Skipping '{cluster_id}' " f"({retention_period - no_of_days_issue_created} days before remediation)") From a6aba8f123ffd2bbbdfa43289a14ef1b1e5040a2 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 12 Mar 2019 14:01:05 +0530 Subject: [PATCH 006/193] Updated with ECS insecure sgs. Updated with ECS insecure sgs. --- hammer/library/aws/ecs.py | 65 +++++++++++++++++++ .../create_security_groups_tickets.py | 34 +++++++++- 2 files changed, 97 insertions(+), 2 deletions(-) create mode 100644 hammer/library/aws/ecs.py diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py new file mode 100644 index 00000000..15294420 --- /dev/null +++ b/hammer/library/aws/ecs.py @@ -0,0 +1,65 @@ +import json +import logging +import mimetypes +import pathlib + +from datetime import datetime, timezone +from io import BytesIO +from copy import deepcopy +from botocore.exceptions import ClientError +from library.utility import jsonDumps +from library.utility import timeit +from library.aws.security_groups import SecurityGroup +from collections import namedtuple +from library.aws.utility import convert_tags + + +# structure which describes EC2 instance +ECSCluster_Details = namedtuple('ECSCluster_Details', [ + # cluster_id + 'cluster_arn', + # subnet_group_id + 'cluster_instance_arn' + ]) + + +class ECSClusterOperations(object): + @classmethod + @timeit + def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): + """ Retrieve ecs clusters meta data with security group attached + + :param ec2_client: boto3 ec2 client + :param ecs_client: boto3 ECS client + :param group_id: security group id + + :return: list with ecs clusters details + """ + # describe ecs instances with security group attached + ecs_instances = [] + + # this will include Clusters + clusters_res = ecs_client.list_clusters() + for cluster_arn in clusters_res["clusterArns"]: + list_container_instances = ecs_client.list_container_instances( + cluster=cluster_arn + ) + + for instance_arn in list_container_instances["containerInstanceArns"]: + container_instance = ecs_client.describe_container_instances( + cluster=cluster_arn, + containerInstances=[ + instance_arn, + ] + ) + + ec2_instance_id = container_instance[0]["ec2InstanceId"] + ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] + + if group_id in str(ec2_instance["SecurityGroups"]): + ecs_instances.append(ECSCluster_Details( + cluster_arn=cluster_arn, + cluster_instance_arn=instance_arn + )) + + return ecs_instances diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 31b4c4ef..d17a7c9b 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -21,6 +21,7 @@ from library.aws.utility import Account from library.aws.security_groups import RestrictionStatus from library.aws.rds import RDSOperations +from library.aws.ecs import ECSClusterOperations from library.utility import SingletonInstance, SingletonInstanceException @@ -214,6 +215,23 @@ def build_elb_instances_table(elb_details): return elb_instance_details, in_use + @staticmethod + def build_ecs_clusters_table(ecs_clusters): + cluster_details = "" + in_use = False + + if len(ecs_clusters) > 0: + in_use = True + cluster_details += ( + f"\n*ECS Clustes:*\n" + f"||ECS Cluster ID||ECS Instance ARN||\n") + for cluster in ecs_clusters: + cluster_details += ( + f"|{cluster.cluster_arn}|{cluster.cluster_instance_arn}|\n" + ) + + return cluster_details, in_use + def create_tickets_securitygroups(self): """ Class function to create jira tickets """ table_name = self.config.sg.ddb_table_name @@ -307,7 +325,7 @@ def create_tickets_securitygroups(self): ec2_client = account.client("ec2") if account.session is not None else None sg_instance_details = ec2_owner = ec2_bu = ec2_product = None - sg_in_use = sg_in_use_ec2 = sg_in_use_elb = sg_in_use_rds = None + sg_in_use = sg_in_use_ec2 = sg_in_use_elb = sg_in_use_rds = sg_in_use_ecs = None sg_public = sg_blind_public = False rds_client = account.client("rds") if account.session is not None else None @@ -316,6 +334,7 @@ def create_tickets_securitygroups(self): iam_client = account.client("iam") if account.session is not None else None + ecs_client = account.client("ecs") if account.session is not None else None rds_instance_details = elb_instance_details = None if ec2_client is not None: @@ -338,7 +357,16 @@ def create_tickets_securitygroups(self): except Exception: logging.exception(f"Failed to build RDS details for '{group_name} / {group_id}' in {account}") - sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds + if ecs_client is not None: + try: + ecs_instances = ECSClusterOperations.get_ecs_instance_security_groups(ec2_client, ecs_client, group_id) + sg_ecs_details, sg_in_use_ecs = self.build_ecs_clusters_table(ecs_instances) + + except Exception: + logging.exception( + f"Failed to build ECS Cluster details for '{group_name} / {group_id}' in {account}") + + sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds or sg_in_use_ecs owner = group_owner if group_owner is not None else ec2_owner bu = group_bu if group_bu is not None else ec2_bu @@ -432,6 +460,8 @@ def create_tickets_securitygroups(self): issue_description += f"{instance_profile_details if instance_profile_details else ''}" + issue_description += f"{sg_ecs_details if sg_ecs_details else ''}" + issue_description += ( f"*Recommendation*: " f"Allow access only for a minimum set of required ip addresses/ranges from [RFC1918|https://tools.ietf.org/html/rfc1918]. " From 72e1bfd59da4ea1394fba876c7ae635936db16f3 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 14 Mar 2019 14:34:50 +0530 Subject: [PATCH 007/193] Updated with ecs logging issue changes. Updated with ecs logging changes. --- deployment/build_packages.sh | 2 +- deployment/cf-templates/ddb.json | 32 + deployment/cf-templates/identification.json | 1220 +++++++++++++---- deployment/configs/config.json | 7 + deployment/configs/whitelist.json | 5 +- .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 8 +- .../describe_ecs_logging_issues.py | 84 ++ .../initiate_to_desc_ecs_logging_issues.py | 36 + hammer/library/aws/ecs.py | 154 +++ hammer/library/config.py | 3 + hammer/library/ddb_issues.py | 5 + .../create_ecs_logging_issue_tickets.py | 160 +++ 13 files changed, 1464 insertions(+), 256 deletions(-) create mode 100644 hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py create mode 100644 hammer/identification/lambdas/ecs-logging-issues-identification/initiate_to_desc_ecs_logging_issues.py create mode 100644 hammer/library/aws/ecs.py create mode 100644 hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index f4219872..9c323470 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,7 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification" +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ecs-logging-issues-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 2ab4843c..a8fbe71c 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -428,6 +428,38 @@ }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-unencrypted" ] ]} } + }, + "DynamoDBECSLogging": { + "Type": "AWS::DynamoDB::Table", + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ecs-logging" ] ]} + } } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 431bf3f4..423b3211 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -26,7 +26,8 @@ "SourceIdentificationIAMUserInactiveKeys", "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", - "SourceIdentificationRDSSnapshots" + "SourceIdentificationRDSSnapshots", + "SourceIdentificationECSLogging" ] }, { @@ -88,6 +89,9 @@ }, "SourceIdentificationRDSSnapshots": { "default": "Relative path to public RDS snapshots lambda sources" + }, + "SourceIdentificationECSLogging":{ + "default": "Relative path to disabled logging ECS sources" } } } @@ -176,6 +180,10 @@ "SourceIdentificationRDSEncryption": { "Type": "String", "Default": "rds-unencrypted-instance-identification.zip" + }, + "SourceIdentificationECSLogging": { + "Type": "String", + "Default": "ecs-logging-issues-identification.zip" } }, "Conditions": { @@ -230,6 +238,9 @@ "IdentificationMetricRDSEncryptionError": { "value": "RDSEncryptionError" }, + "IdentificationMetricECSLoggingError": { + "value": "ECSLoggingError" + }, "SNSDisplayNameSecurityGroups": { "value": "describe-security-groups-sns" }, @@ -302,6 +313,12 @@ "SNSTopicNameRDSEncryption": { "value": "describe-rds-encryption-lambda" }, + "SNSDisplayNameECSLogging": { + "value": "describe-ecs-logging-sns" + }, + "SNSTopicNameECSLogging": { + "value": "describe-ecs-logging-lambda" + }, "LogsForwarderLambdaFunctionName": { "value": "logs-forwarder" }, @@ -379,6 +396,12 @@ }, "IdentifyRDSEncryptionLambdaFunctionName": { "value": "describe-rds-encryption" + }, + "InitiateECSLoggingLambdaFunctionName": { + "value": "initiate-ecs-logging" + }, + "IdentifyECSLoggingLambdaFunctionName": { + "value": "describe-ecs-logging" } } }, @@ -419,7 +442,6 @@ "RetentionInDays": "7" } }, - "LambdaBackupDDB": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaBackupDDB"], @@ -467,7 +489,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaBackupDDB" } } }, - "LambdaInitiateSGEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LogGroupLambdaInitiateSGEvaluation"], @@ -520,7 +541,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSGEvaluation" } } }, - "LambdaEvaluateSG": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateSG"], @@ -584,7 +604,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSG" } } }, - "LambdaInitiateCloudTrailsEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LogGroupLambdaInitiateCloudTrailsEvaluation"], @@ -818,7 +837,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3ACL" } } }, - "LambdaInitiateS3PolicyEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LogGroupLambdaInitiateS3PolicyEvaluation"], @@ -871,7 +889,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3PolicyEvaluation" } } }, - "LambdaEvaluateS3Policy": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateS3Policy"], @@ -935,7 +952,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Policy" } } }, - "LambdaInitiateIAMUserKeysRotationEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], @@ -1286,7 +1302,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSVolumes" } } }, - "LambdaInitiateEBSSnapshotsEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], @@ -1339,7 +1354,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSSnapshotsEvaluation" } } }, - "LambdaEvaluateEBSSnapshots": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateEBSSnapshots"], @@ -1403,7 +1417,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSSnapshots" } } }, - "LambdaInitiateRDSSnapshotsEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], @@ -1456,7 +1469,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSSnapshotsEvaluation" } } }, - "LambdaEvaluateRDSSnapshots": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateRDSSnapshots"], @@ -1520,7 +1532,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSSnapshots" } } }, - "LambdaInitiateSQSPublicPolicyEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], @@ -1573,7 +1584,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSQSPublicPolicyEvaluation" } } }, - "LambdaEvaluateSQSPublicPolicy": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateSQSPublicPolicy"], @@ -1637,7 +1647,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSQSPublicPolicy" } } }, - "LambdaInitiateS3EncryptionEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LogGroupLambdaInitiateS3EncryptionEvaluation"], @@ -1690,7 +1699,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3EncryptionEvaluation" } } }, - "LambdaEvaluateS3Encryption": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateS3Encryption"], @@ -1738,7 +1746,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Encryption" } } }, - "LambdaInitiateRDSEncryptionEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LogGroupLambdaInitiateRDSEncryptionEvaluation"], @@ -1791,7 +1798,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSEncryptionEvaluation" } } }, - "LambdaEvaluateRDSEncryption": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateRDSEncryption"], @@ -1840,238 +1846,890 @@ } }, - "EventBackupDDB": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaBackupDDB"], - "Properties": { - "Description": "Hammer ScheduledRule for DDB tables backup", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, - "ScheduleExpression": "rate(1 day)", - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, - "Id": "LambdaBackupDDB" - } - ] - } - }, - "EventInitiateEvaluationS3IAM": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", - "LambdaInitiateIAMUserInactiveKeysEvaluation", - "LambdaInitiateS3EncryptionEvaluation", - "LambdaInitiateS3ACLEvaluation", - "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate S3 and IAM evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3IAM"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserKeysRotationEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserKeysRotationEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserInactiveKeysEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3EncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3EncryptionEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3ACLEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3ACLEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3PolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3PolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationCloudTrails": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate CloudTrails evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateCloudTrailsEvaluation", "Arn"] }, - "Id": "LambdaInitiateCloudTrailsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSVolumes": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS volumes evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSVolumesEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSVolumesEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate RDS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSG": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSGEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate Security Groups evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSGEvaluation", "Arn"] }, - "Id": "LambdaInitiateSGEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSQSPublicPolicy": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate SQS queue evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSQSPublicPolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateSQSPublicPolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSEncryption": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate rds instance encryption evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSEncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSEncryptionEvaluation" - } - ] - } - }, - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaLogsForwarder"], - "Properties": { - "FunctionName": { "Ref": "LambdaLogsForwarder" }, - "Action": "lambda:InvokeFunction", - "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, - "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} - } - }, - "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], - "Properties": { - "FunctionName": { "Ref": "LambdaBackupDDB" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSGEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSGEvaluation", "EventInitiateEvaluationSG"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSGEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateCloudTrailsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation", "EventInitiateEvaluationCloudTrails"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3ACLEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3ACLEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3ACLEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3PolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3PolicyEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3PolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserKeysRotationEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + "LambdaInitiateECSLoggingEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluateECSLogging", "LogGroupLambdaInitiateECSLoggingEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationECSLogging" } + }, + "Environment": { + "Variables": { + "SNS_ECS_LOGGING_ARN": { "Ref": "SNSNotifyLambdaEvaluateECSLogging" } + } + }, + "Description": "Lambda function for initiate to identify disabled loging of ECS task definition.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateECSLoggingLambdaFunctionName", "value"] } ] + ]}, + "Handler": "initiate_to_desc_ecs_logging_issues.lambda_handler", + "MemorySize": 128, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateECSLoggingEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "InitiateECSLoggingLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateECSLoggingEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaInitiateECSLoggingEvaluation"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateECSLoggingEvaluation" } + } + }, + "LambdaEvaluateECSLogging": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluateECSLogging"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationECSLogging" } + }, + "Description": "Lambda function to describe disabled ECS logging.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyECSLoggingLambdaFunctionName", "value"] } ] + ]}, + "Handler": "describe_ecs_logging_issues.lambda_handler", + "MemorySize": 256, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluateECSLogging": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "IdentifyECSLoggingLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaEvaluateECSLogging": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaEvaluateECSLogging"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateECSLogging" } + } + }, + "EventBackupDDB": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaBackupDDB"], + "Properties": { + "Description": "Hammer ScheduledRule for DDB tables backup", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, + "ScheduleExpression": "rate(1 day)", + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, + "Id": "LambdaBackupDDB" + } + ] + } + }, + "EventInitiateEvaluationS3IAM": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", + "LambdaInitiateIAMUserInactiveKeysEvaluation", + "LambdaInitiateS3EncryptionEvaluation", + "LambdaInitiateS3ACLEvaluation", + "LambdaInitiateS3PolicyEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate S3 and IAM evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3IAM"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserKeysRotationEvaluation", "Arn"] }, + "Id": "LambdaInitiateIAMUserKeysRotationEvaluation" + }, + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "Arn"] }, + "Id": "LambdaInitiateIAMUserInactiveKeysEvaluation" + }, + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateS3EncryptionEvaluation", "Arn"] }, + "Id": "LambdaInitiateS3EncryptionEvaluation" + }, + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateS3ACLEvaluation", "Arn"] }, + "Id": "LambdaInitiateS3ACLEvaluation" + }, + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateS3PolicyEvaluation", "Arn"] }, + "Id": "LambdaInitiateS3PolicyEvaluation" + } + ] + } + }, + "EventInitiateEvaluationCloudTrails": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateCloudTrailsEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate CloudTrails evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateCloudTrailsEvaluation", "Arn"] }, + "Id": "LambdaInitiateCloudTrailsEvaluation" + } + ] + } + }, + "EventInitiateEvaluationEBSVolumes": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateEBSVolumesEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate EBS volumes evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSVolumesEvaluation", "Arn"] }, + "Id": "LambdaInitiateEBSVolumesEvaluation" + } + ] + } + }, + "EventInitiateEvaluationEBSSnapshots": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate EBS snapshots evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSSnapshotsEvaluation", "Arn"] }, + "Id": "LambdaInitiateEBSSnapshotsEvaluation" + } + ] + } + }, + "EventInitiateEvaluationRDSSnapshots": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate RDS snapshots evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSSnapshotsEvaluation", "Arn"] }, + "Id": "LambdaInitiateRDSSnapshotsEvaluation" + } + ] + } + }, + "EventInitiateEvaluationSG": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateSGEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate Security Groups evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateSGEvaluation", "Arn"] }, + "Id": "LambdaInitiateSGEvaluation" + } + ] + } + }, + "EventInitiateEvaluationSQSPublicPolicy": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate SQS queue evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateSQSPublicPolicyEvaluation", "Arn"] }, + "Id": "LambdaInitiateSQSPublicPolicyEvaluation" + } + ] + } + }, + "EventInitiateEvaluationRDSEncryption": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate rds instance encryption evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSEncryptionEvaluation", "Arn"] }, + "Id": "LambdaInitiateRDSEncryptionEvaluation" + } + ] + } + }, + + "EventInitiateEvaluationECSLogging": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateECSLoggingEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate logging issue ECS task definition evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSLogging"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateECSLoggingEvaluation", "Arn"] }, + "Id": "LambdaInitiateECSLoggingEvaluation" + } + ] + } + }, + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaLogsForwarder"], + "Properties": { + "FunctionName": { "Ref": "LambdaLogsForwarder" }, + "Action": "lambda:InvokeFunction", + "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, + "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} + } + }, + "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], + "Properties": { + "FunctionName": { "Ref": "LambdaBackupDDB" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateSGEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateSGEvaluation", "EventInitiateEvaluationSG"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateSGEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSG", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateCloudTrailsEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateCloudTrailsEvaluation", "EventInitiateEvaluationCloudTrails"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationCloudTrails", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateS3ACLEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateS3ACLEvaluation", "EventInitiateEvaluationS3IAM"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateS3ACLEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateS3PolicyEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateS3PolicyEvaluation", "EventInitiateEvaluationS3IAM"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateS3PolicyEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } + } + }, + "PermissionToInvokeLambdaInitiateIAMUserKeysRotationEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", "EventInitiateEvaluationS3IAM"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } @@ -2148,6 +2806,16 @@ } }, + "PermissionToInvokeLambdaInitiateECSLoggingEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateECSLoggingEvaluation", "EventInitiateEvaluationECSLogging"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateECSLoggingEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationECSLogging", "Arn"] } + } + }, "SNSNotifyLambdaEvaluateSG": { "Type": "AWS::SNS::Topic", "DependsOn": ["LambdaEvaluateSG"], @@ -2364,7 +3032,24 @@ }] } }, - + "SNSNotifyLambdaEvaluateECSLogging": { + "Type": "AWS::SNS::Topic", + "DependsOn": "LambdaEvaluateECSLogging", + "Properties": { + "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECsLogging", "value"] } ] + ]}, + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSLogging", "value"] } ] + ]}, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluateECSLogging", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, "PermissionToInvokeLambdaEvaluateSgSNS": { "Type": "AWS::Lambda::Permission", "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], @@ -2485,7 +3170,16 @@ "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] } } }, - + "PermissionToInvokeLambdaEvaluateECSLoggingSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluateECSLogging", "LambdaEvaluateECSLogging"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateECSLogging" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateECSLogging", "Arn"] } + } + }, "SNSIdentificationErrors": { "Type": "AWS::SNS::Topic", "Properties": { @@ -2513,7 +3207,6 @@ "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } } }, - "AlarmErrorsLambdaBackupDDB": { "Type": "AWS::CloudWatch::Alarm", "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], @@ -3088,6 +3781,29 @@ "Threshold": 0, "TreatMissingData": "notBreaching" } + }, + "AlarmErrorsLambdaECSLoggingEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateECSLogging"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateECSLogging" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluateECSLogging" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } } }, "Outputs": { diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 74087148..b9aa29d1 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -140,4 +140,11 @@ "ddb.table_name": "hammer-rds-unencrypted", "reporting": true } + "ecs_logging": { + "enabled": true, + "ddb.table_name": "djif-hammer-ecs-logging", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 + } } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 6d31497a..de566d8d 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -43,5 +43,8 @@ "s3_encryption": { }, "rds_encryption": { - } + }, + "ecs_logging":{ + + } } \ No newline at end of file diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 654aaffa..c3e10bef 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -14,7 +14,8 @@ resource "aws_cloudformation_stack" "identification" { "aws_s3_bucket_object.ebs-public-snapshots-identification", "aws_s3_bucket_object.sqs-public-policy-identification", "aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification", - "aws_s3_bucket_object.rds-unencrypted-instance-identification" + "aws_s3_bucket_object.rds-unencrypted-instance-identification", + "aws_s3_bucket_object.ecs-logging-issues-identification" ] tags = "${var.tags}" @@ -40,6 +41,7 @@ resource "aws_cloudformation_stack" "identification" { SourceIdentificationSQSPublicPolicy = "${aws_s3_bucket_object.sqs-public-policy-identification.id}" SourceIdentificationS3Encryption = "${aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification.id}" SourceIdentificationRDSEncryption = "${aws_s3_bucket_object.rds-unencrypted-instance-identification.id}" + SourceIdentificationECSLogging = "${aws_s3_bucket_object.ecs-logging-issues-identification.id}" } template_url = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-cfn.id}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index d0791d6d..177e98e7 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -86,4 +86,10 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("rds-unencrypted-instance-identification-%s.zip", "${md5(file("${path.module}/../../../packages/rds-unencrypted-instance-identification.zip"))}")}" source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" -} \ No newline at end of file +} + +resource "aws_s3_bucket_object" "ecs-logging-issues-identification" { + bucket = "${var.s3bucket}" + key = "lambda/${format("ecs-logging-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/ecs-logging-issues-identification.zip"))}")}" + source = "${path.module}/../../../packages/ecs-logging-issues-identification.zip" +} diff --git a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py new file mode 100644 index 00000000..fdb440d8 --- /dev/null +++ b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py @@ -0,0 +1,84 @@ +import json +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.ecs import ECSLoggingChecker +from library.aws.utility import Account +from library.ddb_issues import IssueStatus, ECSLoggingIssue +from library.ddb_issues import Operations as IssueOperations +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to evaluate ECS logging enabled or not. """ + set_logging(level=logging.DEBUG) + + try: + payload = json.loads(event["Records"][0]["Sns"]["Message"]) + account_id = payload['account_id'] + account_name = payload['account_name'] + # get the last region from the list to process + region = payload['regions'].pop() + # region = payload['region'] + except Exception: + logging.exception(f"Failed to parse event\n{event}") + return + + try: + config = Config() + + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(config.ecs_logging.ddb_table_name) + + account = Account(id=account_id, + name=account_name, + region=region, + role_name=config.aws.role_name_identification) + if account.session is None: + return + + logging.debug(f"Checking logging enabled or not for ecs task definitions in {account}") + + # existing open issues for account to check if resolved + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, ECSLoggingIssue) + # make dictionary for fast search by id + # and filter by current region + open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} + logging.debug(f"ECS task definitions in DDB:\n{open_issues.keys()}") + + checker = ECSLoggingChecker(account=account) + if checker.check(): + for task_definition in checker.task_definitions: + logging.debug(f"Checking {task_definition.name}") + if not task_definition.is_logging: + issue = ECSLoggingIssue(account_id, task_definition.name) + issue.issue_details.region = task_definition.account.region + if config.ecs_logging.in_whitelist(account_id, task_definition.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {task_definition.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(task_definition.name, None) + + logging.debug(f"ECS task definitions in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated task definitions + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + except Exception: + logging.exception(f"Failed to check ECS task definitions for '{account_id} ({account_name})'") + return + + # push SNS messages until the list with regions to check is empty + if len(payload['regions']) > 0: + try: + Sns.publish(payload["sns_arn"], payload) + except Exception: + logging.exception("Failed to chain audit logging checking") + + logging.debug(f"Checked ECS task definitions for '{account_id} ({account_name})'") + + diff --git a/hammer/identification/lambdas/ecs-logging-issues-identification/initiate_to_desc_ecs_logging_issues.py b/hammer/identification/lambdas/ecs-logging-issues-identification/initiate_to_desc_ecs_logging_issues.py new file mode 100644 index 00000000..166ce9a6 --- /dev/null +++ b/hammer/identification/lambdas/ecs-logging-issues-identification/initiate_to_desc_ecs_logging_issues.py @@ -0,0 +1,36 @@ +import os +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to initiate to find clusters logging enabled or not. """ + set_logging(level=logging.INFO) + logging.debug("Initiating ECS Cluster logging checking") + + try: + sns_arn = os.environ["SNS_ECS_LOGGING_ARN"] + config = Config() + + if not config.ecs_logging.enabled: + logging.debug("ECS cluster logging checking disabled") + return + + logging.debug("Iterating over each account to initiate ECS cluster logging check") + for account_id, account_name in config.ecs_logging.accounts.items(): + payload = {"account_id": account_id, + "account_name": account_name, + "regions": config.aws.regions, + "sns_arn": sns_arn + } + logging.debug(f"Initiating ECS cluster logging checking for '{account_name}'") + Sns.publish(sns_arn, payload) + + except Exception: + logging.exception("Error occurred while initiation of ECS cluster logging checking") + return + + logging.debug("ECS clusters logging checking initiation done") diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py new file mode 100644 index 00000000..545ea7b2 --- /dev/null +++ b/hammer/library/aws/ecs.py @@ -0,0 +1,154 @@ +import json +import logging +import mimetypes +import pathlib + +from datetime import datetime, timezone +from io import BytesIO +from copy import deepcopy +from botocore.exceptions import ClientError +from library.utility import jsonDumps +from library.utility import timeit +from library.aws.security_groups import SecurityGroup +from collections import namedtuple + + +# structure which describes EC2 instance +ECSCluster_Details = namedtuple('ECSCluster_Details', [ + # cluster_id + 'cluster_arn', + # subnet_group_id + 'cluster_instance_arn' + ]) + + +class ECSClusterOperations(object): + @classmethod + @timeit + def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): + """ Retrieve ecs clusters meta data with security group attached + + :param ec2_client: boto3 ec2 client + :param ecs_client: boto3 ECS client + :param group_id: security group id + + :return: list with ecs clusters details + """ + # describe ecs instances with security group attached + ecs_instances = [] + + # this will include Clusters + clusters_res = ecs_client.list_clusters() + for cluster_arn in clusters_res["clusterArns"]: + list_container_instances = ecs_client.list_container_instances( + cluster=cluster_arn + ) + + for instance_arn in list_container_instances["containerInstanceArns"]: + container_instance = ecs_client.describe_container_instances( + cluster=cluster_arn, + containerInstances=[ + instance_arn, + ] + ) + + ec2_instance_id = container_instance[0]["ec2InstanceId"] + ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] + + if group_id in str(ec2_instance["SecurityGroups"]): + ecs_instances.append(ECSCluster_Details( + cluster_arn=cluster_arn, + cluster_instance_arn=instance_arn + )) + + return ecs_instances + + +class ECSTaskDefinitions(object): + """ + Basic class for ECS task definitions. + + """ + def __init__(self, account, name, arn, is_logging=None): + """ + :param account: `Account` instance where ECS task definition is present + + :param name: name of the task definition + :param arn: arn of the task definition + :param is_logging: logging enabled or not. + """ + self.account = account + self.name = name + self.arn = arn + self.is_logging = is_logging + + +class ECSLoggingChecker(object): + """ + Basic class for checking ecs task definition's logging enabled or not in account/region. + Encapsulates check settings and discovered task definitions. + """ + + def __init__(self, account): + """ + :param account: `Account` task definitions to check + + """ + self.account = account + self.task_definitions = [] + + def task_definition_arns(self, name): + """ + :return: `ECS task definition' by arn + """ + for task_definition in self.task_definitions: + if task_definition.name == name: + return task_definition + return None + + def check(self, task_definitions=None): + """ + Walk through clusters in the account/region and check them. + Put all gathered clusters to `self.clusters`. + + :param task_definitions: list with task definitions to check, if it is not supplied - all taks definitions must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering, so get all task definition family details for account + response = self.account.client("ecs").list_task_definition_families() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(ecs:{err.operation_name})") + else: + logging.exception(f"Failed to list task definitions in {self.account}") + return False + + if "families" in response: + for task_definition_name in response["families"]: + if task_definitions is not None and task_definition_name not in task_definitions: + continue + + logging_enabled = False + task_definition = self.account.client("ecs").describe_task_definition( + taskDefinition=task_definition_name + )['taskDefinition'] + task_definition_arn = task_definition["taskDefinitionArn"] + if "containerDefinitions" in task_definition: + for container_definition in task_definition['containerDefinitions']: + if container_definition.get('logConfiguration') is None: + logging_enabled = False + else: + logging_enabled = True + break + + task_definition_details = ECSTaskDefinitions(account=self.account, + name=task_definition_name, + arn=task_definition_arn, + is_logging=logging_enabled) + self.task_definitions.append(task_definition_details) + + return True \ No newline at end of file diff --git a/hammer/library/config.py b/hammer/library/config.py index 3e3ba1cc..30a458fd 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -63,6 +63,9 @@ def __init__(self, # RDS encryption issue config self.rdsEncrypt = ModuleConfig(self._config, "rds_encryption") + # ECS logging issue config + self.ecs_logging = ModuleConfig(self._config, "ecs_logging") + self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index 433b9b5a..15cc9420 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -223,6 +223,11 @@ def __init__(self, *args): super().__init__(*args) +class ECSLoggingIssue(Issue): + def __init__(self, *args): + super().__init__(*args) + + class Operations(object): @staticmethod def find(ddb_table, issue): diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py new file mode 100644 index 00000000..05b2011c --- /dev/null +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -0,0 +1,160 @@ +""" +Class to create ecs task definition logging issue tickets. +""" +import sys +import logging + + +from library.logger import set_logging, add_cw_logging +from library.aws.utility import Account +from library.config import Config +from library.jiraoperations import JiraReporting, JiraOperations +from library.slack_utility import SlackNotification +from library.ddb_issues import IssueStatus, ECSLoggingIssue +from library.ddb_issues import Operations as IssueOperations +from library.utility import SingletonInstance, SingletonInstanceException + + +class CreateECSLoggingIssueTickets(object): + """ Class to create ECS task definition logging issue tickets """ + def __init__(self, config): + self.config = config + + def create_tickets_ecs_logging(self): + """ Class method to create jira tickets """ + table_name = self.config.ecs_logging.ddb_table_name + + main_account = Account(region=self.config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(table_name) + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.aws.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ECSLoggingIssue) + for issue in issues: + task_definition_arn = issue.issue_id + region = issue.issue_details.region + tags = issue.issue_details.tags + # issue has been already reported + if issue.timestamps.reported is not None: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} ECS logging enabled '{task_definition_arn}' issue") + + comment = (f"Closing {issue.status.value} ECS logging enabled '{task_definition_arn}' issue " + f"in '{account_name} / {account_id}' account, '{region}' region") + if issue.status == IssueStatus.Whitelisted: + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + labels=IssueStatus.Whitelisted + ) + jira.close_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_closed(ddb_table, issue) + # issue.status != IssueStatus.Closed (should be IssueStatus.Open) + elif issue.timestamps.updated > issue.timestamps.reported: + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_arn}") + slack.report_issue( + msg=f"ECS logging '{task_definition_arn}' issue is changed " + f"in '{account_name} / {account_id}' account, '{region}' region" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_updated(ddb_table, issue) + else: + logging.debug(f"No changes for '{task_definition_arn}'") + # issue has not been reported yet + else: + logging.debug(f"Reporting ECS logging '{task_definition_arn}' issue") + + owner = tags.get("owner", None) + bu = tags.get("bu", None) + product = tags.get("product", None) + + issue_summary = (f"ECS logging is not enabled for '{task_definition_arn}'" + f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + issue_description = ( + f"The ECS logging is not enabled.\n\n" + f"*Risk*: High\n\n" + f"*Account Name*: {account_name}\n" + f"*Account ID*: {account_id}\n" + f"*Region*: {region}\n" + f"*ECS Task Definition*: {task_definition_arn}\n") + + auto_remediation_date = (self.config.now + self.config.ecs_logging.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + + issue_description += JiraOperations.build_tags_table(tags) + + issue_description += "\n" + issue_description += ( + f"*Recommendation*: " + f"Enable logging for ECS task definition.") + + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["ecs-logging"], + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + + issue.jira_details.owner = owner + issue.jira_details.business_unit = bu + issue.jira_details.product = product + + slack.report_issue( + msg=f"Discovered {issue_summary}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + + IssueOperations.set_status_reported(ddb_table, issue) + + +if __name__ == '__main__': + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + try: + obj = CreateECSLoggingIssueTickets(config) + obj.create_tickets_ecs_logging() + except Exception: + logging.exception("Failed to create ecs logging tickets") From 55f104c287f264d972bcd80a17cdae20c9880bf7 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 26 Mar 2019 13:54:11 +0530 Subject: [PATCH 008/193] Updated with ECS changes. Updated with ECS changes. --- deployment/build_packages.sh | 6 +- deployment/cf-templates/ddb.json | 41 +- deployment/cf-templates/identification.json | 799 ++++-------------- deployment/configs/whitelist.json | 10 +- .../modules/identification/sources.tf | 6 +- hammer/library/aws/ecs.py | 37 +- hammer/library/config.py | 7 +- hammer/library/ddb_issues.py | 8 +- .../create_ecs_logging_issue_tickets.py | 2 +- 9 files changed, 226 insertions(+), 690 deletions(-) diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index 0652c197..98c61d54 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,11 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -<<<<<<< HEAD -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ecs-logging-issues-identification" -======= -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api" ->>>>>>> refs/remotes/origin/dev +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api ecs-logging-issues-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index b85c7076..1c6cf8b1 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -330,6 +330,7 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-public-snapshots" ] ]} } }, + "DynamoDBSQSPublicPolicy": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", @@ -428,11 +429,7 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-unencrypted" ] ]} } }, -<<<<<<< HEAD - "DynamoDBECSLogging": { -======= "DynamoDBAMIPublicAccess": { ->>>>>>> refs/remotes/origin/dev "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", "DependsOn": ["DynamoDBCredentials"], @@ -461,9 +458,6 @@ "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, -<<<<<<< HEAD - "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ecs-logging" ] ]} -======= "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ec2-public-ami" ] ]} } }, @@ -488,7 +482,38 @@ "WriteCapacityUnits": "2" }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} ->>>>>>> refs/remotes/origin/dev + } + }, + "DynamoDBECSLogging": { + "Type": "AWS::DynamoDB::Table", + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ecs-logging" ] ]} } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index c82990fe..39cf2bf4 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,11 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", -<<<<<<< HEAD + "SourceIdentificationAMIPublicAccess", "SourceIdentificationECSLogging" -======= - "SourceIdentificationAMIPublicAccess" ->>>>>>> refs/remotes/origin/dev ] }, { @@ -94,13 +91,11 @@ "SourceIdentificationRDSSnapshots": { "default": "Relative path to public RDS snapshots lambda sources" }, -<<<<<<< HEAD - "SourceIdentificationECSLogging":{ - "default": "Relative path to disabled logging ECS sources" -======= "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" ->>>>>>> refs/remotes/origin/dev + }, + "SourceIdentificationECSLogging":{ + "default": "Relative path to disabled logging ECS sources" } } } @@ -1925,545 +1920,53 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } } }, - -<<<<<<< HEAD - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + "LambdaEvaluateAMIPublicAccess": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } + }, + "Description": "Lambda function to describe public AMI issues.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] + ]}, + "Handler": "describe_public_ami_issues.lambda_handler", + "MemorySize": 256, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluateAMIPublicAccess": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "IdentifyAMIPublicAccessLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaEvaluateAMIPublicAccess"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } + } + }, "LambdaInitiateECSLoggingEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateECSLogging", "LogGroupLambdaInitiateECSLoggingEvaluation"], @@ -2529,21 +2032,6 @@ { "Fn::FindInMap": ["NamingStandards", "IdentifyECSLoggingLambdaFunctionName", "value"] } ] ]}, "Handler": "describe_ecs_logging_issues.lambda_handler", -======= - "LambdaEvaluateAMIPublicAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Description": "Lambda function to describe public AMI issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_public_ami_issues.lambda_handler", ->>>>>>> refs/remotes/origin/dev "MemorySize": 256, "Timeout": "300", "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", @@ -2555,27 +2043,18 @@ "Runtime": "python3.6" } }, -<<<<<<< HEAD "LogGroupLambdaEvaluateECSLogging": { -======= - "LogGroupLambdaEvaluateAMIPublicAccess": { ->>>>>>> refs/remotes/origin/dev "Type" : "AWS::Logs::LogGroup", "Properties" : { "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", -<<<<<<< HEAD "IdentifyECSLoggingLambdaFunctionName", -======= - "IdentifyAMIPublicAccessLambdaFunctionName", ->>>>>>> refs/remotes/origin/dev "value"] } ] ] }, "RetentionInDays": "7" } }, -<<<<<<< HEAD "SubscriptionFilterLambdaEvaluateECSLogging": { "Type" : "AWS::Logs::SubscriptionFilter", "DependsOn": ["LambdaLogsForwarder", @@ -2585,17 +2064,6 @@ "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateECSLogging" } -======= - "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } ->>>>>>> refs/remotes/origin/dev } }, "EventBackupDDB": { @@ -2762,21 +2230,6 @@ ] } }, -<<<<<<< HEAD - - "EventInitiateEvaluationECSLogging": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateECSLoggingEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate logging issue ECS task definition evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSLogging"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateECSLoggingEvaluation", "Arn"] }, - "Id": "LambdaInitiateECSLoggingEvaluation" -======= "EventInitiateEvaluationAMIPublicAccess": { "Type": "AWS::Events::Rule", "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation"], @@ -2789,7 +2242,22 @@ { "Arn": { "Fn::GetAtt": ["LambdaInitiateAMIPublicAccessEvaluation", "Arn"] }, "Id": "LambdaInitiateAMIPublicAccessEvaluation" ->>>>>>> refs/remotes/origin/dev + } + ] + } + }, + "EventInitiateEvaluationECSLogging": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateECSLoggingEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate logging issue ECS task definition evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSLogging"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateECSLoggingEvaluation", "Arn"] }, + "Id": "LambdaInitiateECSLoggingEvaluation" } ] } @@ -2936,17 +2404,6 @@ "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSEncryption", "Arn"] } } }, -<<<<<<< HEAD - - "PermissionToInvokeLambdaInitiateECSLoggingEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateECSLoggingEvaluation", "EventInitiateEvaluationECSLogging"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateECSLoggingEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationECSLogging", "Arn"] } -======= "PermissionToInvokeLambdaInitiateAMIPublicAccessEvaluationCloudWatchEvents": { "Type": "AWS::Lambda::Permission", "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation", "EventInitiateEvaluationAMIPublicAccess"], @@ -2955,7 +2412,16 @@ "Action": "lambda:InvokeFunction", "Principal": "events.amazonaws.com", "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } ->>>>>>> refs/remotes/origin/dev + } + }, + "PermissionToInvokeLambdaInitiateECSLoggingEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateECSLoggingEvaluation", "EventInitiateEvaluationECSLogging"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateECSLoggingEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationECSLogging", "Arn"] } } }, "SNSNotifyLambdaEvaluateSG": { @@ -3174,35 +2640,38 @@ }] } }, -<<<<<<< HEAD - "SNSNotifyLambdaEvaluateECSLogging": { + "SNSNotifyLambdaEvaluateAMIPublicAccess": { "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateECSLogging", + "DependsOn": "LambdaEvaluateAMIPublicAccess", "Properties": { "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECsLogging", "value"] } ] + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] ]}, "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSLogging", "value"] } ] + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] ]}, "Subscription": [{ "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateECSLogging", "Arn"] -======= - "SNSNotifyLambdaEvaluateAMIPublicAccess": { + "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] + + }, + "Protocol": "lambda" + }] + } + }, + "SNSNotifyLambdaEvaluateECSLogging": { "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateAMIPublicAccess", + "DependsOn": "LambdaEvaluateECSLogging", "Properties": { "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSLogging", "value"] } ] ]}, "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSLogging", "value"] } ] ]}, "Subscription": [{ "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] ->>>>>>> refs/remotes/origin/dev + "Fn::GetAtt": ["LambdaEvaluateECSLogging", "Arn"] }, "Protocol": "lambda" }] @@ -3328,16 +2797,6 @@ "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] } } }, -<<<<<<< HEAD - "PermissionToInvokeLambdaEvaluateECSLoggingSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateECSLogging", "LambdaEvaluateECSLogging"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateECSLogging" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateECSLogging", "Arn"] } -======= "PermissionToInvokeLambdaEvaluateAMIPublicAccessSNS": { "Type": "AWS::Lambda::Permission", "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LambdaEvaluateAMIPublicAccess"], @@ -3346,7 +2805,16 @@ "Principal": "sns.amazonaws.com", "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } ->>>>>>> refs/remotes/origin/dev + } + }, + "PermissionToInvokeLambdaEvaluateECSLoggingSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluateECSLogging", "LambdaEvaluateECSLogging"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateECSLogging" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateECSLogging", "Arn"] } } }, "SNSIdentificationErrors": { @@ -3951,15 +3419,6 @@ "TreatMissingData": "notBreaching" } }, -<<<<<<< HEAD - "AlarmErrorsLambdaECSLoggingEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateECSLogging"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateECSLogging" }, "LambdaError" ] ]}, -======= "AlarmErrorsLambdaInitiateAMIPublicAccessEvaluation": { "Type": "AWS::CloudWatch::Alarm", "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateAMIPublicAccessEvaluation"], @@ -3967,16 +3426,12 @@ "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, "LambdaError" ] ]}, ->>>>>>> refs/remotes/origin/dev "EvaluationPeriods": 1, "Namespace": "AWS/Lambda", "MetricName": "Errors", "Dimensions": [ { "Name": "FunctionName", -<<<<<<< HEAD - "Value": { "Ref": "LambdaEvaluateECSLogging" } -======= "Value": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" } } ], @@ -4001,7 +3456,52 @@ { "Name": "FunctionName", "Value": { "Ref": "LambdaEvaluateAMIPublicAccess" } ->>>>>>> refs/remotes/origin/dev + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaInitiateECSLoggingEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateECSLoggingEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateECSLoggingEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateECSLoggingEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaECSLoggingEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateECSLogging"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateECSLogging" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluateECSLogging" } } ], "Period": 3600, @@ -4012,7 +3512,6 @@ } } }, - "Outputs": { "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index b563c1e4..7ef55627 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -43,15 +43,13 @@ "123456789012": [""] }, "s3_encryption": { + }, "rds_encryption": { -<<<<<<< HEAD + }, "ecs_logging":{ - + "__comment__": "Detects ECS task definitions which are not enabled logging - task definitions ARNs.", + "1234567890123": ["arn:aws:ecs:us-east-1:1234567890123:task-definition/dev-admin:2993"] } } -======= - } -} ->>>>>>> refs/remotes/origin/dev diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index a1ffc86b..b30d552a 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -79,6 +79,7 @@ resource "aws_s3_bucket_object" "sqs-public-policy-identification" { key = "lambda/${format("sqs-public-policy-identification-%s.zip", "${md5(file("${path.module}/../../../packages/sqs-public-policy-identification.zip"))}")}" source = "${path.module}/../../../packages/sqs-public-policy-identification.zip" } + resource "aws_s3_bucket_object" "s3-unencrypted-bucket-issues-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("s3-unencrypted-bucket-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/s3-unencrypted-bucket-issues-identification.zip"))}")}" @@ -91,11 +92,8 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" } -<<<<<<< HEAD resource "aws_s3_bucket_object" "ecs-logging-issues-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("ecs-logging-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/ecs-logging-issues-identification.zip"))}")}" source = "${path.module}/../../../packages/ecs-logging-issues-identification.zip" -} -======= ->>>>>>> refs/remotes/origin/dev +} \ No newline at end of file diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 545ea7b2..0dfdf058 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -11,7 +11,7 @@ from library.utility import timeit from library.aws.security_groups import SecurityGroup from collections import namedtuple - +from library.aws.utility import convert_tags # structure which describes EC2 instance ECSCluster_Details = namedtuple('ECSCluster_Details', [ @@ -69,21 +69,25 @@ class ECSTaskDefinitions(object): Basic class for ECS task definitions. """ - def __init__(self, account, name, arn, is_logging=None): + def __init__(self, account, name, arn, tags, is_logging=None, is_privileged=None, external_image=None): """ :param account: `Account` instance where ECS task definition is present :param name: name of the task definition :param arn: arn of the task definition + :param arn: tags of task definition. :param is_logging: logging enabled or not. """ self.account = account self.name = name self.arn = arn + self.tags = convert_tags(tags) self.is_logging = is_logging + self.is_privileged = is_privileged + self.external_image = external_image -class ECSLoggingChecker(object): +class ECSChecker(object): """ Basic class for checking ecs task definition's logging enabled or not in account/region. Encapsulates check settings and discovered task definitions. @@ -128,11 +132,14 @@ def check(self, task_definitions=None): return False if "families" in response: + tags = {} for task_definition_name in response["families"]: if task_definitions is not None and task_definition_name not in task_definitions: continue logging_enabled = False + external_image = False + is_privileged = False task_definition = self.account.client("ecs").describe_task_definition( taskDefinition=task_definition_name )['taskDefinition'] @@ -143,12 +150,28 @@ def check(self, task_definitions=None): logging_enabled = False else: logging_enabled = True - break + if container_definition['privileged']: + is_privileged = True + else: + is_privileged = False + + image = container_definition['image'] + if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: + external_image = True + else: + external_image = False + + if "Tags" in task_definition: + tags = task_definition["Tags"] task_definition_details = ECSTaskDefinitions(account=self.account, - name=task_definition_name, - arn=task_definition_arn, - is_logging=logging_enabled) + name=task_definition_name, + arn=task_definition_arn, + tags=tags, + is_logging=logging_enabled, + is_privileged=is_privileged, + external_image=external_image + ) self.task_definitions.append(task_definition_details) return True \ No newline at end of file diff --git a/hammer/library/config.py b/hammer/library/config.py index aec37852..603dac82 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -63,13 +63,10 @@ def __init__(self, # RDS encryption issue config self.rdsEncrypt = ModuleConfig(self._config, "rds_encryption") -<<<<<<< HEAD - # ECS logging issue config - self.ecs_logging = ModuleConfig(self._config, "ecs_logging") -======= # AMI public access issue config self.publicAMIs = ModuleConfig(self._config, "ec2_public_ami") ->>>>>>> refs/remotes/origin/dev + # ECS logging issue config + self.ecs_logging = ModuleConfig(self._config, "ecs_logging") self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index 70b31f46..e9e7a9ec 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -233,11 +233,11 @@ def __init__(self, *args): super().__init__(*args) -<<<<<<< HEAD -class ECSLoggingIssue(Issue): -======= class PublicAMIIssue(Issue): ->>>>>>> refs/remotes/origin/dev + def __init__(self, *args): + super().__init__(*args) + +class ECSLoggingIssue(Issue): def __init__(self, *args): super().__init__(*args) diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index 05b2011c..8097a3c2 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -29,7 +29,7 @@ def create_tickets_ecs_logging(self): jira = JiraReporting(self.config) slack = SlackNotification(self.config) - for account_id, account_name in self.config.aws.accounts.items(): + for account_id, account_name in self.config.ecs_logging.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ECSLoggingIssue) for issue in issues: From 2e9409395d9ad2e13976bb08d763f815d508e06a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 26 Mar 2019 15:36:36 +0530 Subject: [PATCH 009/193] Updated with ECS privileged-access changes. Updated with ECS privileged-access changes. --- deployment/build_packages.sh | 2 +- deployment/cf-templates/ddb.json | 33 +++ deployment/cf-templates/identification.json | 243 ++++++++++++++++-- .../modules/identification/identification.tf | 3 +- .../modules/identification/sources.tf | 7 + .../describe_ecs_privileged_access_issues.py | 86 +++++++ ...te_to_desc_ecs_privileged_access_issues.py | 36 +++ hammer/library/aws/ecs.py | 169 ++++++++++++ hammer/library/config.py | 3 + hammer/library/ddb_issues.py | 5 + ...ate_ecs_privileged_access_issue_tickets.py | 160 ++++++++++++ 11 files changed, 727 insertions(+), 20 deletions(-) create mode 100644 hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py create mode 100644 hammer/identification/lambdas/ecs-privileged-access-issues-identification/initiate_to_desc_ecs_privileged_access_issues.py create mode 100644 hammer/library/aws/ecs.py create mode 100644 hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index 2e00c69c..7c9ed3b8 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,7 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api" +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api ecs-privileged-access-issues-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 4ec7b653..ab703cef 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -330,6 +330,7 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-public-snapshots" ] ]} } }, + "DynamoDBSQSPublicPolicy": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", @@ -482,6 +483,38 @@ }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} } + }, + "DynamoDBECSPrivilegedAccess": { + "Type": "AWS::DynamoDB::Table", + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ecs-privileged-access" ] ]} + } } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 3618c393..a9fcf63f 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,7 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", - "SourceIdentificationAMIPublicAccess" + "SourceIdentificationAMIPublicAccess", + "SourceIdentificationECSPrivilegedAccess" ] }, { @@ -92,6 +93,9 @@ }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" + }, + "SourceIdentificationECSPrivilegedAccess":{ + "default": "Relative path to privileged access issue ECS sources" } } } @@ -184,6 +188,10 @@ "SourceIdentificationRDSEncryption": { "Type": "String", "Default": "rds-unencrypted-instance-identification.zip" + }, + "SourceIdentificationECSPrivilegedAccess": { + "Type": "String", + "Default": "ecs-privileged-access-issues-identification.zip" } }, "Conditions": { @@ -241,6 +249,9 @@ "IdentificationMetricRDSEncryptionError": { "value": "RDSEncryptionError" }, + "IdentificationMetricECSPrivilegedAccessError": { + "value": "ECSPrivilegedAccessError" + }, "SNSDisplayNameSecurityGroups": { "value": "describe-security-groups-sns" }, @@ -319,6 +330,12 @@ "SNSTopicNameRDSEncryption": { "value": "describe-rds-encryption-lambda" }, + "SNSDisplayNameECSPrivilegedAccess": { + "value": "describe-ecs-privileged-access-sns" + }, + "SNSTopicNameECSPrivilegedAccess": { + "value": "describe-ecs-privileged-access-lambda" + }, "LogsForwarderLambdaFunctionName": { "value": "logs-forwarder" }, @@ -402,6 +419,12 @@ }, "IdentifyRDSEncryptionLambdaFunctionName": { "value": "describe-rds-encryption" + }, + "InitiateECSPrivilegedAccessLambdaFunctionName": { + "value": "initiate-ecs-privileged-access" + }, + "IdentifyECSPrivilegedAccessLambdaFunctionName": { + "value": "describe-ecs-privileged-access" } } }, @@ -442,7 +465,6 @@ "RetentionInDays": "7" } }, - "LambdaBackupDDB": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaBackupDDB"], @@ -490,7 +512,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaBackupDDB" } } }, - "LambdaInitiateSGEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LogGroupLambdaInitiateSGEvaluation"], @@ -543,7 +564,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSGEvaluation" } } }, - "LambdaEvaluateSG": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateSG"], @@ -607,7 +627,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSG" } } }, - "LambdaInitiateCloudTrailsEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LogGroupLambdaInitiateCloudTrailsEvaluation"], @@ -841,7 +860,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3ACL" } } }, - "LambdaInitiateS3PolicyEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LogGroupLambdaInitiateS3PolicyEvaluation"], @@ -894,7 +912,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3PolicyEvaluation" } } }, - "LambdaEvaluateS3Policy": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateS3Policy"], @@ -958,7 +975,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Policy" } } }, - "LambdaInitiateIAMUserKeysRotationEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], @@ -1309,7 +1325,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSVolumes" } } }, - "LambdaInitiateEBSSnapshotsEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], @@ -1362,7 +1377,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSSnapshotsEvaluation" } } }, - "LambdaEvaluateEBSSnapshots": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateEBSSnapshots"], @@ -1426,7 +1440,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSSnapshots" } } }, - "LambdaInitiateRDSSnapshotsEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], @@ -1479,7 +1492,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSSnapshotsEvaluation" } } }, - "LambdaEvaluateRDSSnapshots": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateRDSSnapshots"], @@ -1595,7 +1607,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSQSPublicPolicyEvaluation" } } }, - "LambdaEvaluateSQSPublicPolicy": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateSQSPublicPolicy"], @@ -1659,7 +1670,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSQSPublicPolicy" } } }, - "LambdaInitiateS3EncryptionEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LogGroupLambdaInitiateS3EncryptionEvaluation"], @@ -1759,7 +1769,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Encryption" } } }, - "LambdaInitiateRDSEncryptionEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LogGroupLambdaInitiateRDSEncryptionEvaluation"], @@ -1812,7 +1821,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSEncryptionEvaluation" } } }, - "LambdaEvaluateRDSEncryption": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateRDSEncryption"], @@ -1912,7 +1920,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } } }, - "LambdaEvaluateAMIPublicAccess": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], @@ -1960,6 +1967,105 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } } }, + "LambdaInitiateECSPrivilegedAccessEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluateECSPrivilegedAccess", "LogGroupLambdaInitiateECSPrivilegedAccessEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationECSPrivilegedAccess" } + }, + "Environment": { + "Variables": { + "SNS_ECS_PRIVILEGED_ACCESS_ARN": { "Ref": "SNSNotifyLambdaEvaluateECSPrivilegedAccess" } + } + }, + "Description": "Lambda function for initiate to identify privileged access enabled of ECS task definition.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateECSPrivilegedAccessLambdaFunctionName", "value"] } ] + ]}, + "Handler": "initiate_to_desc_ecs_privileged_access_issues.lambda_handler", + "MemorySize": 128, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateECSPrivilegedAccessEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "InitiateECSPrivilegedAccessLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateECSPrivilegedAccessEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaInitiateECSPrivilegedAccessEvaluation"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateECSPrivilegedAccessEvaluation" } + } + }, + "LambdaEvaluateECSPrivilegedAccess": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluateECSPrivilegedAccess"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationECSPrivilegedAccess" } + }, + "Description": "Lambda function to describe priviled access enabled ECS task difinitions.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyECSPrivilegedAccessLambdaFunctionName", "value"] } ] + ]}, + "Handler": "describe_ecs_privileged_access_issues.lambda_handler", + "MemorySize": 256, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluateECSPrivilegedAccess": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "IdentifyECSPrivilegedAccessLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaEvaluateECSPrivilegedAccess": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaEvaluateECSPrivilegedAccess"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateECSPrivilegedAccess" } + } + }, "EventBackupDDB": { "Type": "AWS::Events::Rule", "DependsOn": ["LambdaBackupDDB"], @@ -2140,6 +2246,22 @@ ] } }, + "EventInitiateEvaluationECSPrivilegedAccess": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateECSPrivilegedAccessEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate privileged access issue ECS task definition evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSPrivilegedAccess"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateECSPrivilegedAccessEvaluation", "Arn"] }, + "Id": "LambdaInitiateECSPrivilegedAccessEvaluation" + } + ] + } + }, "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { "Type": "AWS::Lambda::Permission", "DependsOn": ["LambdaLogsForwarder"], @@ -2292,6 +2414,16 @@ "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } } }, + "PermissionToInvokeLambdaInitiateECSPrivilegedAccessEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateECSPrivilegedAccessEvaluation", "EventInitiateEvaluationECSPrivilegedAccess"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateECSPrivilegedAccessEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationECSPrivilegedAccess", "Arn"] } + } + }, "SNSNotifyLambdaEvaluateSG": { "Type": "AWS::SNS::Topic", "DependsOn": ["LambdaEvaluateSG"], @@ -2521,6 +2653,25 @@ "Subscription": [{ "Endpoint": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] + + }, + "Protocol": "lambda" + }] + } + }, + "SNSNotifyLambdaEvaluateECSPrivilegedAccess": { + "Type": "AWS::SNS::Topic", + "DependsOn": "LambdaEvaluateECSPrivilegedAccess", + "Properties": { + "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSPrivilegedAccess", "value"] } ] + ]}, + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSPrivilegedAccess", "value"] } ] + ]}, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluateECSPrivilegedAccess", "Arn"] }, "Protocol": "lambda" }] @@ -2654,6 +2805,17 @@ "Principal": "sns.amazonaws.com", "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } + + } + }, + "PermissionToInvokeLambdaEvaluateECSPrivilegedAccessSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluateECSPrivilegedAccess", "LambdaEvaluateECSPrivilegedAccess"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateECSPrivilegedAccess" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateECSPrivilegedAccess", "Arn"] } } }, "SNSIdentificationErrors": { @@ -3303,9 +3465,54 @@ "Threshold": 0, "TreatMissingData": "notBreaching" } + }, + "AlarmErrorsLambdaInitiateECSPrivilegedAccessEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateECSPrivilegedAccessEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateECSPrivilegedAccessEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateECSPrivilegedAccessEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaECSPrivilegedAccessEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateECSPrivilegedAccess"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateECSPrivilegedAccess" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluateECSPrivilegedAccess" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } } }, - "Outputs": { "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} } diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 38c7f93e..d846d4fe 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -15,7 +15,8 @@ resource "aws_cloudformation_stack" "identification" { "aws_s3_bucket_object.ami-public-access-issues-identification", "aws_s3_bucket_object.sqs-public-policy-identification", "aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification", - "aws_s3_bucket_object.rds-unencrypted-instance-identification" + "aws_s3_bucket_object.rds-unencrypted-instance-identification", + "aws_s3_bucket_object.ecs-privileged-access-issues-identification" ] tags = "${var.tags}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index c839c312..e07b2acb 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -79,6 +79,7 @@ resource "aws_s3_bucket_object" "sqs-public-policy-identification" { key = "lambda/${format("sqs-public-policy-identification-%s.zip", "${md5(file("${path.module}/../../../packages/sqs-public-policy-identification.zip"))}")}" source = "${path.module}/../../../packages/sqs-public-policy-identification.zip" } + resource "aws_s3_bucket_object" "s3-unencrypted-bucket-issues-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("s3-unencrypted-bucket-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/s3-unencrypted-bucket-issues-identification.zip"))}")}" @@ -91,3 +92,9 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" } +resource "aws_s3_bucket_object" "ecs-privileged-access-issues-identification" { + bucket = "${var.s3bucket}" + key = "lambda/${format("ecs-privileged-access-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/ecs-privileged-access-issues-identification.zip"))}")}" + source = "${path.module}/../../../packages/ecs-privileged-access-issues-identification.zip" +} + diff --git a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py new file mode 100644 index 00000000..d2be8714 --- /dev/null +++ b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py @@ -0,0 +1,86 @@ +import json +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.ecs import ECSChecker +from library.aws.utility import Account +from library.ddb_issues import IssueStatus, ECSPrivilegedAccessIssue +from library.ddb_issues import Operations as IssueOperations +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to evaluate ECS privileged access enabled or not. """ + set_logging(level=logging.DEBUG) + + try: + payload = json.loads(event["Records"][0]["Sns"]["Message"]) + account_id = payload['account_id'] + account_name = payload['account_name'] + # get the last region from the list to process + region = payload['regions'].pop() + # region = payload['region'] + except Exception: + logging.exception(f"Failed to parse event\n{event}") + return + + try: + config = Config() + + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(config.ecs_privileged_access.ddb_table_name) + + account = Account(id=account_id, + name=account_name, + region=region, + role_name=config.aws.role_name_identification) + if account.session is None: + return + + logging.debug(f"Checking privileged access enabled or not for ecs task definitions in {account}") + + # existing open issues for account to check if resolved + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, ECSPrivilegedAccessIssue) + # make dictionary for fast search by id + # and filter by current region + open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} + logging.debug(f"ECS privileged access enabled issues in DDB:\n{open_issues.keys()}") + + checker = ECSChecker(account=account) + if checker.check(): + for task_definition in checker.task_definitions: + logging.debug(f"Checking {task_definition.name}") + if task_definition.is_privileged: + issue = ECSPrivilegedAccessIssue(account_id, task_definition.name) + issue.issue_details.arn = task_definition.arn + issue.issue_details.tags = task_definition.tags + issue.issue_details.region = task_definition.account.region + if config.ecs_privileged_access.in_whitelist(account_id, task_definition.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {task_definition.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(task_definition.name, None) + + logging.debug(f"ECS privileged access issues in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated task definitions + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + except Exception: + logging.exception(f"Failed to check ECS privileged access issues for '{account_id} ({account_name})'") + return + + # push SNS messages until the list with regions to check is empty + if len(payload['regions']) > 0: + try: + Sns.publish(payload["sns_arn"], payload) + except Exception: + logging.exception("Failed to chain privileged access checking") + + logging.debug(f"Checked ECS privileged access issues for '{account_id} ({account_name})'") + + diff --git a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/initiate_to_desc_ecs_privileged_access_issues.py b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/initiate_to_desc_ecs_privileged_access_issues.py new file mode 100644 index 00000000..983334d3 --- /dev/null +++ b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/initiate_to_desc_ecs_privileged_access_issues.py @@ -0,0 +1,36 @@ +import os +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to initiate to find privileged access enabled or not. """ + set_logging(level=logging.INFO) + logging.debug("Initiating ECS privileged access checking") + + try: + sns_arn = os.environ["SNS_ECS_PRIVILEGED_ACCESS_ARN"] + config = Config() + + if not config.ecs_privileged_access.enabled: + logging.debug("ECS privileged access checking disabled") + return + + logging.debug("Iterating over each account to initiate ECS privileged access check") + for account_id, account_name in config.ecs_privileged_access.accounts.items(): + payload = {"account_id": account_id, + "account_name": account_name, + "regions": config.aws.regions, + "sns_arn": sns_arn + } + logging.debug(f"Initiating ECS privileged access checking for '{account_name}'") + Sns.publish(sns_arn, payload) + + except Exception: + logging.exception("Error occurred while initiation of ECS privileged access checking") + return + + logging.debug("ECS privileged access checking initiation done") diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py new file mode 100644 index 00000000..58ce8c01 --- /dev/null +++ b/hammer/library/aws/ecs.py @@ -0,0 +1,169 @@ +import logging + +from botocore.exceptions import ClientError +from library.utility import timeit +from collections import namedtuple +from library.aws.utility import convert_tags + +# structure which describes EC2 instance +ECSCluster_Details = namedtuple('ECSCluster_Details', [ + # cluster_id + 'cluster_arn', + # subnet_group_id + 'cluster_instance_arn' + ]) + + +class ECSClusterOperations(object): + @classmethod + @timeit + def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): + """ Retrieve ecs clusters meta data with security group attached + + :param ec2_client: boto3 ec2 client + :param ecs_client: boto3 ECS client + :param group_id: security group id + + :return: list with ecs clusters details + """ + # describe ecs instances with security group attached + ecs_instances = [] + + # this will include Clusters + clusters_res = ecs_client.list_clusters() + for cluster_arn in clusters_res["clusterArns"]: + list_container_instances = ecs_client.list_container_instances( + cluster=cluster_arn + ) + + for instance_arn in list_container_instances["containerInstanceArns"]: + container_instance = ecs_client.describe_container_instances( + cluster=cluster_arn, + containerInstances=[ + instance_arn, + ] + ) + + ec2_instance_id = container_instance[0]["ec2InstanceId"] + ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] + + if group_id in str(ec2_instance["SecurityGroups"]): + ecs_instances.append(ECSCluster_Details( + cluster_arn=cluster_arn, + cluster_instance_arn=instance_arn + )) + + return ecs_instances + + +class ECSTaskDefinitions(object): + """ + Basic class for ECS task definitions. + + """ + def __init__(self, account, name, arn, tags, is_logging=None, is_privileged=None, external_image=None): + """ + :param account: `Account` instance where ECS task definition is present + + :param name: name of the task definition + :param arn: arn of the task definition + :param arn: tags of task definition. + :param is_logging: logging enabled or not. + """ + self.account = account + self.name = name + self.arn = arn + self.tags = convert_tags(tags) + self.is_logging = is_logging + self.is_privileged = is_privileged + self.external_image = external_image + + +class ECSChecker(object): + """ + Basic class for checking ecs task definition's logging enabled or not in account/region. + Encapsulates check settings and discovered task definitions. + """ + + def __init__(self, account): + """ + :param account: `Account` task definitions to check + + """ + self.account = account + self.task_definitions = [] + + def task_definition_arns(self, name): + """ + :return: `ECS task definition' by arn + """ + for task_definition in self.task_definitions: + if task_definition.name == name: + return task_definition + return None + + def check(self, task_definitions=None): + """ + Walk through clusters in the account/region and check them. + Put all gathered clusters to `self.clusters`. + + :param task_definitions: list with task definitions to check, if it is not supplied - all taks definitions must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering, so get all task definition family details for account + response = self.account.client("ecs").list_task_definition_families() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(ecs:{err.operation_name})") + else: + logging.exception(f"Failed to list task definitions in {self.account}") + return False + + if "families" in response: + tags = {} + for task_definition_name in response["families"]: + if task_definitions is not None and task_definition_name not in task_definitions: + continue + + logging_enabled = False + external_image = False + is_privileged = False + task_definition = self.account.client("ecs").describe_task_definition( + taskDefinition=task_definition_name + )['taskDefinition'] + task_definition_arn = task_definition["taskDefinitionArn"] + if "containerDefinitions" in task_definition: + for container_definition in task_definition['containerDefinitions']: + if container_definition.get('logConfiguration') is None: + logging_enabled = False + else: + logging_enabled = True + + if container_definition['privileged']: + is_privileged = True + else: + is_privileged = False + + image = container_definition['image'] + if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: + external_image = True + else: + external_image = False + + if "Tags" in task_definition: + tags = task_definition["Tags"] + task_definition_details = ECSTaskDefinitions(account=self.account, + name=task_definition_name, + arn=task_definition_arn, + tags=tags, + is_logging=logging_enabled, + is_privileged=is_privileged, + external_image=external_image + ) + self.task_definitions.append(task_definition_details) + + return True \ No newline at end of file diff --git a/hammer/library/config.py b/hammer/library/config.py index 504f1a1d..36c6c11e 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -66,6 +66,9 @@ def __init__(self, # AMI public access issue config self.publicAMIs = ModuleConfig(self._config, "ec2_public_ami") + # ECS access issue config + self.ecs_privileged_access = ModuleConfig(self._config, "ecs_privileged_access") + self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index d9ae7de2..156fe8c3 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -238,6 +238,11 @@ def __init__(self, *args): super().__init__(*args) +class ECSPrivilegedAccessIssue(Issue): + def __init__(self, *args): + super().__init__(*args) + + class Operations(object): @staticmethod def find(ddb_table, issue): diff --git a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py new file mode 100644 index 00000000..dcce2e88 --- /dev/null +++ b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py @@ -0,0 +1,160 @@ +""" +Class to create ecs privileged access issue tickets. +""" +import sys +import logging + + +from library.logger import set_logging, add_cw_logging +from library.aws.utility import Account +from library.config import Config +from library.jiraoperations import JiraReporting, JiraOperations +from library.slack_utility import SlackNotification +from library.ddb_issues import IssueStatus, ECSPrivilegedAccessIssue +from library.ddb_issues import Operations as IssueOperations +from library.utility import SingletonInstance, SingletonInstanceException + + +class CreateECSPrivilegedAccessIssueTickets(object): + """ Class to create ECS privileged access issue tickets """ + def __init__(self, config): + self.config = config + + def create_tickets_ecs_privileged(self): + """ Class method to create jira tickets """ + table_name = self.config.ecs_privileged_access.ddb_table_name + + main_account = Account(region=self.config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(table_name) + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.ecs_privileged_access.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ECSPrivilegedAccessIssue) + for issue in issues: + task_definition_arn = issue.issue_id + region = issue.issue_details.region + tags = issue.issue_details.tags + # issue has been already reported + if issue.timestamps.reported is not None: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} ECS privileged access disabled '{task_definition_arn}' issue") + + comment = (f"Closing {issue.status.value} ECS privileged access disabled '{task_definition_arn}' issue " + f"in '{account_name} / {account_id}' account, '{region}' region") + if issue.status == IssueStatus.Whitelisted: + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + labels=IssueStatus.Whitelisted + ) + jira.close_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_closed(ddb_table, issue) + # issue.status != IssueStatus.Closed (should be IssueStatus.Open) + elif issue.timestamps.updated > issue.timestamps.reported: + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_arn}") + slack.report_issue( + msg=f"ECS privileged access disabled '{task_definition_arn}' issue is changed " + f"in '{account_name} / {account_id}' account, '{region}' region" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_updated(ddb_table, issue) + else: + logging.debug(f"No changes for '{task_definition_arn}'") + # issue has not been reported yet + else: + logging.debug(f"Reporting ECS privileged access issue for '{task_definition_arn}'") + + owner = tags.get("owner", None) + bu = tags.get("bu", None) + product = tags.get("product", None) + + issue_summary = (f"ECS privileged access is enabled for '{task_definition_arn}'" + f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + issue_description = ( + f"The ECS privileged access is enabled.\n\n" + f"*Risk*: High\n\n" + f"*Account Name*: {account_name}\n" + f"*Account ID*: {account_id}\n" + f"*Region*: {region}\n" + f"*ECS Task Definition*: {task_definition_arn}\n") + + auto_remediation_date = (self.config.now + self.config.ecs_privileged_access.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + + issue_description += JiraOperations.build_tags_table(tags) + + issue_description += "\n" + issue_description += ( + f"*Recommendation*: " + f"By default, containers are unprivileged and cannot. Disable ECS privileged access.") + + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["ecs-privileged-access"], + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + + issue.jira_details.owner = owner + issue.jira_details.business_unit = bu + issue.jira_details.product = product + + slack.report_issue( + msg=f"Discovered {issue_summary}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + + IssueOperations.set_status_reported(ddb_table, issue) + + +if __name__ == '__main__': + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + try: + obj = CreateECSPrivilegedAccessIssueTickets(config) + obj.create_tickets_ecs_privileged() + except Exception: + logging.exception("Failed to create ECS privileged access tickets") From 1b502a1398fc1f1e64136ea92e46c1f0d817f5ea Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 26 Mar 2019 15:56:06 +0530 Subject: [PATCH 010/193] Updated with documentation. Updated with documentation. --- docs/_data/sidebars/mydoc_sidebar.yml | 3 + docs/pages/deployment_cloudformation.md | 1 + docs/pages/editconfig.md | 14 +- docs/pages/features.md | 1 + .../pages/playbook19_ecs_privileged_access.md | 178 ++++++++++++++++++ docs/pages/remediation_backup_rollback.md | 1 + 6 files changed, 197 insertions(+), 1 deletion(-) create mode 100644 docs/pages/playbook19_ecs_privileged_access.md diff --git a/docs/_data/sidebars/mydoc_sidebar.yml b/docs/_data/sidebars/mydoc_sidebar.yml index c9c4bf6c..2456fdcb 100644 --- a/docs/_data/sidebars/mydoc_sidebar.yml +++ b/docs/_data/sidebars/mydoc_sidebar.yml @@ -119,3 +119,6 @@ entries: - title: RDS Unencrypted instances url: /playbook12_rds_unencryption.html output: web, pdf + - title: ECS Privileged Access + url: /playbook19_ecs_privileged_access.html + output: web, pdf diff --git a/docs/pages/deployment_cloudformation.md b/docs/pages/deployment_cloudformation.md index c7331eb7..483c66f4 100644 --- a/docs/pages/deployment_cloudformation.md +++ b/docs/pages/deployment_cloudformation.md @@ -98,6 +98,7 @@ You will need to set the following parameters: * **SourceIdentificationSQSPublicPolicy**: the relative path to the Lambda package that identifies SQS public queue issues. The default value is **sqs-public-policy-identification.zip**. * **SourceIdentificationS3Encryption**: the relative path to the Lambda package that identifies S3 un-encrypted bucket issues. The default value is **s3-unencrypted-bucket-issues-identification.zip**. * **SourceIdentificationRDSEncryption**: the relative path to the Lambda package that identifies RDS unencrypted instances. The default value is **rds-unencrypted-instance-identification.zip**. +* **SourceIdentificationECSPrivilegedAccess**: the relative path to the Lambda package that identifies ECS privileged access issues. The default value is **ecs-privileged-access-issues-identification.zip**. **VPC config (optional)**: * **LambdaSubnets**: comma-separated list, without spaces, of subnet IDs in your VPC to run identification lambdas in. diff --git a/docs/pages/editconfig.md b/docs/pages/editconfig.md index 23ff0938..5c789165 100644 --- a/docs/pages/editconfig.md +++ b/docs/pages/editconfig.md @@ -386,4 +386,16 @@ Parameters: * **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-rds-unencrypted`. * **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); * **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); -* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; \ No newline at end of file +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +### 2.16. ECS Privileged Access issues. + +This section describes how to detect whether you have ECS privileged accesss enabled or not. Refer to [issue-specific playbook](playbook19_ecs_privileged_access.html) for further details. + +Edit the **ecs_privileged_access** section of the `config.json` file to configure the handling of this issue. + +Parameters: +* **enabled**: enables/disables issue identification. The default value is `true`; +* **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-ecs-privileged-access`. +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +* **remediation**: defines whether Dow Jones Hammer will automatically remediate the detected issue. The default value is `false`; +* **remediation_retention_period**: the amount of days that should pass between the detection of an issue and its automatic remediation by Dow Jones Hammer. The default value is `0`. \ No newline at end of file diff --git a/docs/pages/features.md b/docs/pages/features.md index 3b830f91..5fb96c8f 100644 --- a/docs/pages/features.md +++ b/docs/pages/features.md @@ -21,5 +21,6 @@ Dow Jones Hammer can identify and report the following issues: |[SQS Policy Public Access](playbook10_sqs_public_policy.html) |Detects publicly accessible SQS policy |Any of SQS queues is worldwide accessible by policy | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html) |Detects not encrypted at reset S3 buckets |Any of S3 bucket is not encrypted at rest | |[RDS Unencrypted instances](playbook12_rds_unencryption.html) |Detects not encrypted at rest RDS instances |Any one of RDS instances is not encrypted at reset | +|[ECS Privileged Access](playbook19_ecs_privileged_access.html) |Detects ECS task definition's privileged access issues |Any one of ECS task definition have privileged access enabled or not | Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**. \ No newline at end of file diff --git a/docs/pages/playbook19_ecs_privileged_access.md b/docs/pages/playbook19_ecs_privileged_access.md new file mode 100644 index 00000000..35e65b6d --- /dev/null +++ b/docs/pages/playbook19_ecs_privileged_access.md @@ -0,0 +1,178 @@ +--- +title: ECS Privileged Access issues +keywords: playbook19 +sidebar: mydoc_sidebar +permalink: playbook19_ecs_privileged_access.html +--- + +# Playbook 19: ECS Privileged Access issues + +## Introduction + +This playbook describes how to configure Dow Jones Hammer to detect ECS privileged access issues. + +## 1. Issue Identification + +Dow Jones Hammer identifies those ECS privileged access is enabled or not. + +When Dow Jones Hammer detects an issue, it writes the issue to the designated DynamoDB table. + +According to the [Dow Jones Hammer architecture](/index.html), the issue identification functionality uses two Lambda functions. +The table lists the Python modules that implement this functionality: + +|Designation |Path | +|--------------|:--------------------:| +|Initialization|`hammer/identification/lambdas/ecs-privileged-access-issues-identification/initiate_to_desc_ecs_privileged_access_issues.py`| +|Identification|`hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_issues.py`| + +## 2. Issue Reporting + +You can configure automatic reporting of cases when Dow Jones Hammer identifies an issue of this type. Dow Jones Hammer supports integration with [JIRA](https://www.atlassian.com/software/jira) and [Slack](https://slack.com/). +These types of reporting are independent from one another and you can turn them on/off in the Dow Jones Hammer configuration. + +Thus, in case you have turned on the reporting functionality for this issue and configured corresponding integrations, Dow Jones Hammer, as [defined in the configuration](#43-the-ticket_ownersjson-file), can: +* raise a JIRA ticket and assign it to a specific person in your organization; +* send the issue notification to the Slack channel or directly to a Slack user. + +Additionally Dow Jones Hammer tries to detect person to report issue to by examining ECS privileged access issues. In case the privileged access is enable **valid JIRA/Slack user**: +* for JIRA: `jira_owner` parameter from [ticket_owners.json](#43-the-ticket_ownersjson-file) **is ignored** and discovered `owner` **is used instead** as a JIRA assignee; +* for Slack: discovered `owner` **is used in addition to** `slack_owner` value from [ticket_owners.json](#43-the-ticket_ownersjson-file). + +This Python module implements the issue reporting functionality: +``` +hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py +``` + + +## 3. Setup Instructions For This Issue + +To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: + +### 3.1. The config.json File + +The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. +To identify and report issues of this type, you should add the following parameters in the **ecs_privileged_access** section of the **config.json** file: + +|Parameter Name |Description | Default Value| +|------------------------------|---------------------------------------|:------------:| +|`enabled` |Toggles issue detection for this issue |`true`| +|`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-ecs-privileged-access` | +|`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`true`| + +Sample **config.json** section: +``` +""" +"ecs_privileged_access": { + "enabled": true, + "ddb.table_name": "hammer-ecs-privileged-access", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 + } +``` + +### 3.2. The whitelist.json File + +You can define exceptions to the general automatic remediation settings for specific ECS task definitions. To configure such exceptions, you should edit the **ecs_privileged_access** section of the **whitelist.json** configuration file as follows: + +|Parameter Key | Parameter Value(s)| +|:------------:|:-----------------:| +|AWS Account ID|ECS task definition ids(s)| + +Sample **whitelist.json** section: +``` +"ecs_privileged_access": { + "123456789012": ["task_definition_arn1", "task_definition_arn2"] +} +``` + +### 3.3. The ticket_owners.json File + +You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. + +You can configure these parameters for specific AWS accounts and globally. Account-specific settings precede the global settings in the **ticket_owners.json** configuration file. + +Check the following table for parameters: + +|Parameter Name |Description |Sample Value | +|---------------------|--------------------------------------------------------------------|:---------------:| +|`jira_project` |The name of the JIRA project where Dow Jones Hammer will create the issue | `AWSSEC` | +|`jira_owner` |The name of the JIRA user to whom Dow Jones Hammer will assign the issue | `Support-Cloud` | +|`jira_parent_ticket` |The JIRA ticket to which Dow Jones Hammer will link the new ticket it creates | `AWSSEC-1234` | +|`slack_owner` |Name(s) of the Slack channels (prefixed by `#`) and/or Slack users that will receive issue reports from Dow Jones Hammer | `["#devops-channel", "bob"]` | + +Sample **ticket_owners.json** section: + +Account-specific settings: +``` +{ + "account": { + "123456789012": { + "jira_project": "", + "jira_owner": "Support-Cloud", + "jira_parent_ticket": "", + "slack_owner": "" + } + }, + "jira_project": "AWSSEC", + "jira_owner": "Support-General", + "jira_parent_ticket": "AWSSEC-1234", + "slack_owner": ["#devops-channel", "bob"] +} +``` + +## 4. Logging + +Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. + +Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. + +### 4.1. Issue Identification Logging + +Dow Jones Hammer issue identification functionality uses two Lambda functions: + +* Initialization: this Lambda function selects slave accounts to check for this issue as designated in the Dow Jones Hammer configuration files and triggers the check. +* Identification: this Lambda function identifies this issue for each account/region selected at the previous step. + +You can see the logs for each of these Lambda functions in the following Log Groups: + +|Lambda Function|CloudWatch Log Group Name | +|---------------|--------------------------------------------| +|Initialization |`/aws/lambda/initiate-ecs-privileged-access`| +|Identification |`/aws/lambda/describe-privileged-access`| + +### 4.2. Issue Reporting Logging + +Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: + +|Designation|CloudWatch Log Stream Name | +|-----------|---------------------------------------------------------| +|Reporting |`reporting.create_ecs_privileged_access_issue_tickets`| + + +### 4.3. Slack Reports + +In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). + +Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. + +### 4.4. Using CloudWatch Logs for Dow Jones Hammer + +To access Dow Jones Hammer logs, proceed as follows: + +1. Open **AWS Management Console**. +2. Select **CloudWatch** service. +3. Select **Logs** from the CloudWatch sidebar. +4. Select the log group you want to explore. The log group will open. +5. Select the log stream you want to explore. + +Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. + +## 5. Issue specific details in DynamoDB + +Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. + +|Key |Type |Description |Example | +|-------------|:----:|----------------------------------|------------------------------------------------| +|`id` |string|ecs task definition id |`ecs-task_definition-id` | +|`tags` |map |Tags associated with ECS task definition id |`{"Name": "TestKey", "service": "archive"}`| \ No newline at end of file diff --git a/docs/pages/remediation_backup_rollback.md b/docs/pages/remediation_backup_rollback.md index d05fe010..1579b5b6 100644 --- a/docs/pages/remediation_backup_rollback.md +++ b/docs/pages/remediation_backup_rollback.md @@ -27,6 +27,7 @@ The following table gives an overview of Dow Jones Hammer remediation functional |[SQS Queue Public Access](playbook10_sqs_public_policy.html#3-issue-remediation) | Yes | Yes | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html#3-issue-remediation) | Yes | Yes | |[RDS Unencrypted instances](playbook12_rds_unencryption.html#3-issue-remediation) | `No` | `No` | +|[ECS Privileged Access issues](playbook19_ecs_privileged_access.html#3-issue-remediation) | `No` | `No` | ## 2. How Remediation Backup Works From ce03e7ad0d478bddedaccc8f087202a6a33668ae Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 26 Mar 2019 17:11:20 +0530 Subject: [PATCH 011/193] Updated with ECS image source issue code changes. Updated with ECS image source issue code changes. --- deployment/build_packages.sh | 2 +- deployment/cf-templates/ddb.json | 33 +++ deployment/cf-templates/identification.json | 241 ++++++++++++++++-- .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 7 + docs/_data/sidebars/mydoc_sidebar.yml | 5 + docs/pages/deployment_cloudformation.md | 1 + docs/pages/editconfig.md | 14 +- docs/pages/features.md | 1 + .../playbook20_ecs_external_image_source.md | 178 +++++++++++++ ...scribe_ecs_external_image_source_issues.py | 86 +++++++ ...o_desc_ecs_external_image_source_issues.py | 36 +++ hammer/library/aws/ecs.py | 177 +++++++++++++ hammer/library/config.py | 3 + hammer/library/ddb_issues.py | 5 + ...ecs_external_image_source_issue_tickets.py | 160 ++++++++++++ 16 files changed, 932 insertions(+), 21 deletions(-) create mode 100644 docs/pages/playbook20_ecs_external_image_source.md create mode 100644 hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py create mode 100644 hammer/identification/lambdas/ecs-external-image-source-issues-identification/initiate_to_desc_ecs_external_image_source_issues.py create mode 100644 hammer/library/aws/ecs.py create mode 100644 hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index 2e00c69c..55bd92d6 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,7 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api" +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api ecs-external-image-source-issues-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 4ec7b653..de1739a1 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -330,6 +330,7 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-public-snapshots" ] ]} } }, + "DynamoDBSQSPublicPolicy": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", @@ -482,6 +483,38 @@ }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} } + }, + "DynamoDBECSExternalImageSource": { + "Type": "AWS::DynamoDB::Table", + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ecs-external-image-source" ] ]} + } } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 3618c393..0650ee75 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,7 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", - "SourceIdentificationAMIPublicAccess" + "SourceIdentificationAMIPublicAccess", + "SourceIdentificationECSExternalImageSource" ] }, { @@ -92,6 +93,9 @@ }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" + }, + "SourceIdentificationECSExternalImageSource":{ + "default": "Relative path to external image issue ECS sources" } } } @@ -184,6 +188,10 @@ "SourceIdentificationRDSEncryption": { "Type": "String", "Default": "rds-unencrypted-instance-identification.zip" + }, + "SourceIdentificationECSExternalImageSource": { + "Type": "String", + "Default": "ecs-image-source-issues-identification.zip" } }, "Conditions": { @@ -241,6 +249,9 @@ "IdentificationMetricRDSEncryptionError": { "value": "RDSEncryptionError" }, + "IdentificationMetricECSExternalImageSourceError": { + "value": "ECSExternalImageSourceError" + }, "SNSDisplayNameSecurityGroups": { "value": "describe-security-groups-sns" }, @@ -319,6 +330,11 @@ "SNSTopicNameRDSEncryption": { "value": "describe-rds-encryption-lambda" }, + "SNSDisplayNameECSExternalImageSource": { + "value": "describe-ecs-external-image-source-sns"}, + "SNSTopicNameECSExternalImageSource": { + "value": "describe-ecs-external-image-source-lambda" + }, "LogsForwarderLambdaFunctionName": { "value": "logs-forwarder" }, @@ -402,6 +418,12 @@ }, "IdentifyRDSEncryptionLambdaFunctionName": { "value": "describe-rds-encryption" + }, + "InitiateECSExternalImageSourceLambdaFunctionName": { + "value": "initiate-ecs-external-image-source" + }, + "IdentifyECSExternalImageSourceLambdaFunctionName": { + "value": "describe-ecs-external-image-source" } } }, @@ -442,7 +464,6 @@ "RetentionInDays": "7" } }, - "LambdaBackupDDB": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaBackupDDB"], @@ -490,7 +511,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaBackupDDB" } } }, - "LambdaInitiateSGEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LogGroupLambdaInitiateSGEvaluation"], @@ -543,7 +563,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSGEvaluation" } } }, - "LambdaEvaluateSG": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateSG"], @@ -607,7 +626,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSG" } } }, - "LambdaInitiateCloudTrailsEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LogGroupLambdaInitiateCloudTrailsEvaluation"], @@ -841,7 +859,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3ACL" } } }, - "LambdaInitiateS3PolicyEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LogGroupLambdaInitiateS3PolicyEvaluation"], @@ -894,7 +911,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3PolicyEvaluation" } } }, - "LambdaEvaluateS3Policy": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateS3Policy"], @@ -958,7 +974,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Policy" } } }, - "LambdaInitiateIAMUserKeysRotationEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], @@ -1309,7 +1324,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSVolumes" } } }, - "LambdaInitiateEBSSnapshotsEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], @@ -1362,7 +1376,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSSnapshotsEvaluation" } } }, - "LambdaEvaluateEBSSnapshots": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateEBSSnapshots"], @@ -1426,7 +1439,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSSnapshots" } } }, - "LambdaInitiateRDSSnapshotsEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], @@ -1479,7 +1491,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSSnapshotsEvaluation" } } }, - "LambdaEvaluateRDSSnapshots": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateRDSSnapshots"], @@ -1595,7 +1606,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSQSPublicPolicyEvaluation" } } }, - "LambdaEvaluateSQSPublicPolicy": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateSQSPublicPolicy"], @@ -1659,7 +1669,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSQSPublicPolicy" } } }, - "LambdaInitiateS3EncryptionEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LogGroupLambdaInitiateS3EncryptionEvaluation"], @@ -1759,7 +1768,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Encryption" } } }, - "LambdaInitiateRDSEncryptionEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LogGroupLambdaInitiateRDSEncryptionEvaluation"], @@ -1812,7 +1820,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSEncryptionEvaluation" } } }, - "LambdaEvaluateRDSEncryption": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateRDSEncryption"], @@ -1912,7 +1919,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } } }, - "LambdaEvaluateAMIPublicAccess": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], @@ -1960,6 +1966,105 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } } }, + "LambdaInitiateECSExternalImageSourceEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluateECSExternalImageSource", "LogGroupLambdaInitiateECSExternalImageSourceEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationECSExternalImageSource" } + }, + "Environment": { + "Variables": { + "SNS_ECS_EXTERNAL_IMAGE_ARN": { "Ref": "SNSNotifyLambdaEvaluateECSExternalImageSource" } + } + }, + "Description": "Lambda function for initiate to identify ECS task definition image source.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateECSExternalImageSourceLambdaFunctionName", "value"] } ] + ]}, + "Handler": "initiate_to_desc_ecs_external_image_source_issues.lambda_handler", + "MemorySize": 128, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateECSExternalImageSourceEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "InitiateECSExternalImageSourceLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateECSExternalImageSourceEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaInitiateECSExternalImageSourceEvaluation"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateECSExternalImageSourceEvaluation" } + } + }, + "LambdaEvaluateECSExternalImageSource": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluateECSExternalImageSource"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationECSExternalImageSource" } + }, + "Description": "Lambda function to describe ECS task definitions image source ", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyECSExternalImageSourceLambdaFunctionName", "value"] } ] + ]}, + "Handler": "describe_ecs_external_image_source_issues.lambda_handler", + "MemorySize": 256, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluateECSExternalImageSource": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "IdentifyECSExternalImageSourceLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaEvaluateECSExternalImageSource": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaEvaluateECSExternalImageSource"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateECSExternalImageSource" } + } + }, "EventBackupDDB": { "Type": "AWS::Events::Rule", "DependsOn": ["LambdaBackupDDB"], @@ -2140,6 +2245,22 @@ ] } }, + "EventInitiateEvaluationECSExternalImageSource": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateECSExternalImageSourceEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate ECS task definition image source evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSExternalImageSource"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateECSExternalImageSourceEvaluation", "Arn"] }, + "Id": "LambdaInitiateECSExternalImageSourceEvaluation" + } + ] + } + }, "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { "Type": "AWS::Lambda::Permission", "DependsOn": ["LambdaLogsForwarder"], @@ -2292,6 +2413,16 @@ "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } } }, + "PermissionToInvokeLambdaInitiateECSExternalImageSourceEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateECSExternalImageSourceEvaluation", "EventInitiateEvaluationECSExternalImageSource"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateECSExternalImageSourceEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationECSExternalImageSource", "Arn"] } + } + }, "SNSNotifyLambdaEvaluateSG": { "Type": "AWS::SNS::Topic", "DependsOn": ["LambdaEvaluateSG"], @@ -2526,6 +2657,24 @@ }] } }, + "SNSNotifyLambdaEvaluateECSExternalImageSource": { + "Type": "AWS::SNS::Topic", + "DependsOn": "LambdaEvaluateECSExternalImageSource", + "Properties": { + "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSExternalImageSource", "value"] } ] + ]}, + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSExternalImageSource", "value"] } ] + ]}, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluateECSExternalImageSource", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, "PermissionToInvokeLambdaEvaluateSgSNS": { "Type": "AWS::Lambda::Permission", "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], @@ -2654,6 +2803,17 @@ "Principal": "sns.amazonaws.com", "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } + + } + }, + "PermissionToInvokeLambdaEvaluateECSExternalImageSourceSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluateECSExternalImageSource", "LambdaEvaluateECSExternalImageSource"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateECSExternalImageSource" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateECSExternalImageSource", "Arn"] } } }, "SNSIdentificationErrors": { @@ -3303,9 +3463,54 @@ "Threshold": 0, "TreatMissingData": "notBreaching" } + }, + "AlarmErrorsLambdaInitiateECSExternalImageSourceEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateECSExternalImageSourceEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateECSExternalImageSourceEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateECSExternalImageSourceEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaECSExternalImageSourceEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateECSExternalImageSource"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateECSExternalImageSource" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluateECSExternalImageSource" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } } }, - "Outputs": { "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} } diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 38c7f93e..9ec1cb9e 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -15,7 +15,8 @@ resource "aws_cloudformation_stack" "identification" { "aws_s3_bucket_object.ami-public-access-issues-identification", "aws_s3_bucket_object.sqs-public-policy-identification", "aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification", - "aws_s3_bucket_object.rds-unencrypted-instance-identification" + "aws_s3_bucket_object.rds-unencrypted-instance-identification", + "aws_s3_bucket_object.ecs-external-image-source-issues-identification" ] tags = "${var.tags}" @@ -42,6 +43,7 @@ resource "aws_cloudformation_stack" "identification" { SourceIdentificationSQSPublicPolicy = "${aws_s3_bucket_object.sqs-public-policy-identification.id}" SourceIdentificationS3Encryption = "${aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification.id}" SourceIdentificationRDSEncryption = "${aws_s3_bucket_object.rds-unencrypted-instance-identification.id}" + SourceIdentificationECSExternalImageSource = "${aws_s3_bucket_object.ecs-external-image-source-issues-identification.id}" } template_url = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-cfn.id}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index c839c312..e4539e91 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -79,6 +79,7 @@ resource "aws_s3_bucket_object" "sqs-public-policy-identification" { key = "lambda/${format("sqs-public-policy-identification-%s.zip", "${md5(file("${path.module}/../../../packages/sqs-public-policy-identification.zip"))}")}" source = "${path.module}/../../../packages/sqs-public-policy-identification.zip" } + resource "aws_s3_bucket_object" "s3-unencrypted-bucket-issues-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("s3-unencrypted-bucket-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/s3-unencrypted-bucket-issues-identification.zip"))}")}" @@ -91,3 +92,9 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" } + +resource "aws_s3_bucket_object" "ecs-external-image-source-issues-identification" { + bucket = "${var.s3bucket}" + key = "lambda/${format("ecs-external-image-source-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/ecs-external-image-source-issues-identification.zip"))}")}" + source = "${path.module}/../../../packages/ecs-external-image-source-issues-identification.zip" +} \ No newline at end of file diff --git a/docs/_data/sidebars/mydoc_sidebar.yml b/docs/_data/sidebars/mydoc_sidebar.yml index c9c4bf6c..2db8c3d2 100644 --- a/docs/_data/sidebars/mydoc_sidebar.yml +++ b/docs/_data/sidebars/mydoc_sidebar.yml @@ -119,3 +119,8 @@ entries: - title: RDS Unencrypted instances url: /playbook12_rds_unencryption.html output: web, pdf + + - title: ECS External Image Source + url: /playbook20_ecs_external_image_source.html + output: web, pdf + diff --git a/docs/pages/deployment_cloudformation.md b/docs/pages/deployment_cloudformation.md index c7331eb7..cd7e71a7 100644 --- a/docs/pages/deployment_cloudformation.md +++ b/docs/pages/deployment_cloudformation.md @@ -98,6 +98,7 @@ You will need to set the following parameters: * **SourceIdentificationSQSPublicPolicy**: the relative path to the Lambda package that identifies SQS public queue issues. The default value is **sqs-public-policy-identification.zip**. * **SourceIdentificationS3Encryption**: the relative path to the Lambda package that identifies S3 un-encrypted bucket issues. The default value is **s3-unencrypted-bucket-issues-identification.zip**. * **SourceIdentificationRDSEncryption**: the relative path to the Lambda package that identifies RDS unencrypted instances. The default value is **rds-unencrypted-instance-identification.zip**. +* **SourceIdentificationECSExternalImageSource**: the relative path to the Lambda package that identifies ECS external image source issues. The default value is **ecs-external-image-source-issues-identification.zip**. **VPC config (optional)**: * **LambdaSubnets**: comma-separated list, without spaces, of subnet IDs in your VPC to run identification lambdas in. diff --git a/docs/pages/editconfig.md b/docs/pages/editconfig.md index 23ff0938..41505a0b 100644 --- a/docs/pages/editconfig.md +++ b/docs/pages/editconfig.md @@ -386,4 +386,16 @@ Parameters: * **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-rds-unencrypted`. * **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); * **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); -* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; \ No newline at end of file +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +### 2.17. ECS external image source issues + +This section describes how to detect whether you have ECS image source is external or internal. Refer to [issue-specific playbook](playbook20_ecs_external_image_source.html) for further details. + +Edit the **ecs_external_image_source** section of the `config.json` file to configure the handling of this issue. + +Parameters: +* **enabled**: enables/disables issue identification. The default value is `true`; +* **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-ecs-external-image-source`. +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +* **remediation**: defines whether Dow Jones Hammer will automatically remediate the detected issue. The default value is `false`; +* **remediation_retention_period**: the amount of days that should pass between the detection of an issue and its automatic remediation by Dow Jones Hammer. The default value is `0`. \ No newline at end of file diff --git a/docs/pages/features.md b/docs/pages/features.md index 3b830f91..77d3c572 100644 --- a/docs/pages/features.md +++ b/docs/pages/features.md @@ -21,5 +21,6 @@ Dow Jones Hammer can identify and report the following issues: |[SQS Policy Public Access](playbook10_sqs_public_policy.html) |Detects publicly accessible SQS policy |Any of SQS queues is worldwide accessible by policy | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html) |Detects not encrypted at reset S3 buckets |Any of S3 bucket is not encrypted at rest | |[RDS Unencrypted instances](playbook12_rds_unencryption.html) |Detects not encrypted at rest RDS instances |Any one of RDS instances is not encrypted at reset | +|[ECS External Image Source](playbook20_ecs_external_image_source.html) |Detects ECS task definitions image source issues |Any one of ECS image source is external or internal | Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**. \ No newline at end of file diff --git a/docs/pages/playbook20_ecs_external_image_source.md b/docs/pages/playbook20_ecs_external_image_source.md new file mode 100644 index 00000000..b3ebe9f1 --- /dev/null +++ b/docs/pages/playbook20_ecs_external_image_source.md @@ -0,0 +1,178 @@ +--- +title: ECS Image Source issues +keywords: playbook20 +sidebar: mydoc_sidebar +permalink: playbook20_ecs_external_image_source.html +--- + +# Playbook 20: ECS Image Source issues + +## Introduction + +This playbook describes how to configure Dow Jones Hammer to detect ECS image source is external or internal. + +## 1. Issue Identification + +Dow Jones Hammer identifies those ECS image source is external or internal. + +When Dow Jones Hammer detects an issue, it writes the issue to the designated DynamoDB table. + +According to the [Dow Jones Hammer architecture](/index.html), the issue identification functionality uses two Lambda functions. +The table lists the Python modules that implement this functionality: + +|Designation |Path | +|--------------|:--------------------:| +|Initialization|`hammer/identification/lambdas/ecs-external-image-source-issues-identification/initiate_to_desc_ecs_external_image_source_issues.py`| +|Identification|`hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_external_image_source_issues.py`| + +## 2. Issue Reporting + +You can configure automatic reporting of cases when Dow Jones Hammer identifies an issue of this type. Dow Jones Hammer supports integration with [JIRA](https://www.atlassian.com/software/jira) and [Slack](https://slack.com/). +These types of reporting are independent from one another and you can turn them on/off in the Dow Jones Hammer configuration. + +Thus, in case you have turned on the reporting functionality for this issue and configured corresponding integrations, Dow Jones Hammer, as [defined in the configuration](#43-the-ticket_ownersjson-file), can: +* raise a JIRA ticket and assign it to a specific person in your organization; +* send the issue notification to the Slack channel or directly to a Slack user. + +Additionally Dow Jones Hammer tries to detect person to report issue to by examining ECS image source status. In case the ECS image source is external or internal **valid JIRA/Slack user**: +* for JIRA: `jira_owner` parameter from [ticket_owners.json](#43-the-ticket_ownersjson-file) **is ignored** and discovered `owner` **is used instead** as a JIRA assignee; +* for Slack: discovered `owner` **is used in addition to** `slack_owner` value from [ticket_owners.json](#43-the-ticket_ownersjson-file). + +This Python module implements the issue reporting functionality: +``` +hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +``` + + +## 3. Setup Instructions For This Issue + +To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: + +### 3.1. The config.json File + +The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. +To identify and report issues of this type, you should add the following parameters in the **ecs_external_image_source** section of the **config.json** file: + +|Parameter Name |Description | Default Value| +|------------------------------|---------------------------------------|:------------:| +|`enabled` |Toggles issue detection for this issue |`true`| +|`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-ecs-external-image-source` | +|`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`true`| + +Sample **config.json** section: +``` +""" +"ecs_external_image_source": { + "enabled": true, + "ddb.table_name": "hammer-ecs-external-image-source", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 + } +``` + +### 3.2. The whitelist.json File + +You can define exceptions to the general automatic remediation settings for specific ECS task definitions. To configure such exceptions, you should edit the **ecs_external_image_source** section of the **whitelist.json** configuration file as follows: + +|Parameter Key | Parameter Value(s)| +|:------------:|:-----------------:| +|AWS Account ID|Task definition arn(s)| + +Sample **whitelist.json** section: +``` +"ecs_external_image_source": { + "123456789012": ["task_definition_id1", "task_definition2"] +} +``` + +### 3.3. The ticket_owners.json File + +You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. + +You can configure these parameters for specific AWS accounts and globally. Account-specific settings precede the global settings in the **ticket_owners.json** configuration file. + +Check the following table for parameters: + +|Parameter Name |Description |Sample Value | +|---------------------|--------------------------------------------------------------------|:---------------:| +|`jira_project` |The name of the JIRA project where Dow Jones Hammer will create the issue | `AWSSEC` | +|`jira_owner` |The name of the JIRA user to whom Dow Jones Hammer will assign the issue | `Support-Cloud` | +|`jira_parent_ticket` |The JIRA ticket to which Dow Jones Hammer will link the new ticket it creates | `AWSSEC-1234` | +|`slack_owner` |Name(s) of the Slack channels (prefixed by `#`) and/or Slack users that will receive issue reports from Dow Jones Hammer | `["#devops-channel", "bob"]` | + +Sample **ticket_owners.json** section: + +Account-specific settings: +``` +{ + "account": { + "123456789012": { + "jira_project": "", + "jira_owner": "Support-Cloud", + "jira_parent_ticket": "", + "slack_owner": "" + } + }, + "jira_project": "AWSSEC", + "jira_owner": "Support-General", + "jira_parent_ticket": "AWSSEC-1234", + "slack_owner": ["#devops-channel", "bob"] +} +``` + +## 4. Logging + +Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. + +Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. + +### 4.1. Issue Identification Logging + +Dow Jones Hammer issue identification functionality uses two Lambda functions: + +* Initialization: this Lambda function selects slave accounts to check for this issue as designated in the Dow Jones Hammer configuration files and triggers the check. +* Identification: this Lambda function identifies this issue for each account/region selected at the previous step. + +You can see the logs for each of these Lambda functions in the following Log Groups: + +|Lambda Function|CloudWatch Log Group Name | +|---------------|--------------------------------------------| +|Initialization |`/aws/lambda/initiate-ecs-external-image-source`| +|Identification |`/aws/lambda/describe-ecs-external-image-source`| + +### 4.2. Issue Reporting Logging + +Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: + +|Designation|CloudWatch Log Stream Name | +|-----------|---------------------------------------------------------| +|Reporting |`reporting.create_ecs_external_image_source_issue_tickets`| + + +### 4.3. Slack Reports + +In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). + +Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. + +### 4.4. Using CloudWatch Logs for Dow Jones Hammer + +To access Dow Jones Hammer logs, proceed as follows: + +1. Open **AWS Management Console**. +2. Select **CloudWatch** service. +3. Select **Logs** from the CloudWatch sidebar. +4. Select the log group you want to explore. The log group will open. +5. Select the log stream you want to explore. + +Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. + +## 5. Issue specific details in DynamoDB + +Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. + +|Key |Type |Description |Example | +|-------------|:----:|----------------------------------|------------------------------------------------| +|`id` |string|ecs task definition id |`task-definition-id` | +|`tags` |map |Tags associated with ECS task |`{"Name": "TestKey", "service": "archive"}`| \ No newline at end of file diff --git a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py new file mode 100644 index 00000000..6ad4bc2e --- /dev/null +++ b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py @@ -0,0 +1,86 @@ +import json +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.ecs import ECSChecker +from library.aws.utility import Account +from library.ddb_issues import IssueStatus, ECSExternalImageSourceIssue +from library.ddb_issues import Operations as IssueOperations +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to evaluate ECS task definition using external or internal image source. """ + set_logging(level=logging.DEBUG) + + try: + payload = json.loads(event["Records"][0]["Sns"]["Message"]) + account_id = payload['account_id'] + account_name = payload['account_name'] + # get the last region from the list to process + region = payload['regions'].pop() + # region = payload['region'] + except Exception: + logging.exception(f"Failed to parse event\n{event}") + return + + try: + config = Config() + + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(config.ecs_external_image_source.ddb_table_name) + + account = Account(id=account_id, + name=account_name, + region=region, + role_name=config.aws.role_name_identification) + if account.session is None: + return + + logging.debug(f"Checking Image source is external or internal for ecs task definitions in {account}") + + # existing open issues for account to check if resolved + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, ECSExternalImageSourceIssue) + # make dictionary for fast search by id + # and filter by current region + open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} + logging.debug(f"ECS task definitions in DDB:\n{open_issues.keys()}") + + checker = ECSChecker(account=account) + if checker.check(): + for task_definition in checker.task_definitions: + logging.debug(f"Checking {task_definition.name}") + if task_definition.external_image: + issue = ECSLoggingIssue(account_id, task_definition.name) + issue.issue_details.arn = task_definition.arn + issue.issue_details.tags = task_definition.tags + issue.issue_details.region = task_definition.account.region + if config.ecs_external_image_source.in_whitelist(account_id, task_definition.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {task_definition.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(task_definition.name, None) + + logging.debug(f"ECS task definitions in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated task definitions + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + except Exception: + logging.exception(f"Failed to check ECS task definitions for '{account_id} ({account_name})'") + return + + # push SNS messages until the list with regions to check is empty + if len(payload['regions']) > 0: + try: + Sns.publish(payload["sns_arn"], payload) + except Exception: + logging.exception("Failed to identify ECS task definitions external image source checking") + + logging.debug(f"Checked ECS task definitions for '{account_id} ({account_name})'") + + diff --git a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/initiate_to_desc_ecs_external_image_source_issues.py b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/initiate_to_desc_ecs_external_image_source_issues.py new file mode 100644 index 00000000..34e15859 --- /dev/null +++ b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/initiate_to_desc_ecs_external_image_source_issues.py @@ -0,0 +1,36 @@ +import os +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to initiate to find ecs task definitions' image source external or internal. """ + set_logging(level=logging.INFO) + logging.debug("Initiating ECS task definitions' image source checking") + + try: + sns_arn = os.environ["SNS_ECS_EXTERNAL_IMAGE_ARN"] + config = Config() + + if not config.ecs_external_image_source.enabled: + logging.debug("ECS task definitions' image source checking disabled") + return + + logging.debug("Iterating over each account to initiate ECS task definitions' image source check") + for account_id, account_name in config.ecs_external_image_source.accounts.items(): + payload = {"account_id": account_id, + "account_name": account_name, + "regions": config.aws.regions, + "sns_arn": sns_arn + } + logging.debug(f"Initiating ECS task definitions' image source checking for '{account_name}'") + Sns.publish(sns_arn, payload) + + except Exception: + logging.exception("Error occurred while initiation of ECS task definitions' image source checking") + return + + logging.debug("ECS task definitions' image source checking initiation done") diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py new file mode 100644 index 00000000..0dfdf058 --- /dev/null +++ b/hammer/library/aws/ecs.py @@ -0,0 +1,177 @@ +import json +import logging +import mimetypes +import pathlib + +from datetime import datetime, timezone +from io import BytesIO +from copy import deepcopy +from botocore.exceptions import ClientError +from library.utility import jsonDumps +from library.utility import timeit +from library.aws.security_groups import SecurityGroup +from collections import namedtuple +from library.aws.utility import convert_tags + +# structure which describes EC2 instance +ECSCluster_Details = namedtuple('ECSCluster_Details', [ + # cluster_id + 'cluster_arn', + # subnet_group_id + 'cluster_instance_arn' + ]) + + +class ECSClusterOperations(object): + @classmethod + @timeit + def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): + """ Retrieve ecs clusters meta data with security group attached + + :param ec2_client: boto3 ec2 client + :param ecs_client: boto3 ECS client + :param group_id: security group id + + :return: list with ecs clusters details + """ + # describe ecs instances with security group attached + ecs_instances = [] + + # this will include Clusters + clusters_res = ecs_client.list_clusters() + for cluster_arn in clusters_res["clusterArns"]: + list_container_instances = ecs_client.list_container_instances( + cluster=cluster_arn + ) + + for instance_arn in list_container_instances["containerInstanceArns"]: + container_instance = ecs_client.describe_container_instances( + cluster=cluster_arn, + containerInstances=[ + instance_arn, + ] + ) + + ec2_instance_id = container_instance[0]["ec2InstanceId"] + ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] + + if group_id in str(ec2_instance["SecurityGroups"]): + ecs_instances.append(ECSCluster_Details( + cluster_arn=cluster_arn, + cluster_instance_arn=instance_arn + )) + + return ecs_instances + + +class ECSTaskDefinitions(object): + """ + Basic class for ECS task definitions. + + """ + def __init__(self, account, name, arn, tags, is_logging=None, is_privileged=None, external_image=None): + """ + :param account: `Account` instance where ECS task definition is present + + :param name: name of the task definition + :param arn: arn of the task definition + :param arn: tags of task definition. + :param is_logging: logging enabled or not. + """ + self.account = account + self.name = name + self.arn = arn + self.tags = convert_tags(tags) + self.is_logging = is_logging + self.is_privileged = is_privileged + self.external_image = external_image + + +class ECSChecker(object): + """ + Basic class for checking ecs task definition's logging enabled or not in account/region. + Encapsulates check settings and discovered task definitions. + """ + + def __init__(self, account): + """ + :param account: `Account` task definitions to check + + """ + self.account = account + self.task_definitions = [] + + def task_definition_arns(self, name): + """ + :return: `ECS task definition' by arn + """ + for task_definition in self.task_definitions: + if task_definition.name == name: + return task_definition + return None + + def check(self, task_definitions=None): + """ + Walk through clusters in the account/region and check them. + Put all gathered clusters to `self.clusters`. + + :param task_definitions: list with task definitions to check, if it is not supplied - all taks definitions must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering, so get all task definition family details for account + response = self.account.client("ecs").list_task_definition_families() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(ecs:{err.operation_name})") + else: + logging.exception(f"Failed to list task definitions in {self.account}") + return False + + if "families" in response: + tags = {} + for task_definition_name in response["families"]: + if task_definitions is not None and task_definition_name not in task_definitions: + continue + + logging_enabled = False + external_image = False + is_privileged = False + task_definition = self.account.client("ecs").describe_task_definition( + taskDefinition=task_definition_name + )['taskDefinition'] + task_definition_arn = task_definition["taskDefinitionArn"] + if "containerDefinitions" in task_definition: + for container_definition in task_definition['containerDefinitions']: + if container_definition.get('logConfiguration') is None: + logging_enabled = False + else: + logging_enabled = True + + if container_definition['privileged']: + is_privileged = True + else: + is_privileged = False + + image = container_definition['image'] + if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: + external_image = True + else: + external_image = False + + if "Tags" in task_definition: + tags = task_definition["Tags"] + task_definition_details = ECSTaskDefinitions(account=self.account, + name=task_definition_name, + arn=task_definition_arn, + tags=tags, + is_logging=logging_enabled, + is_privileged=is_privileged, + external_image=external_image + ) + self.task_definitions.append(task_definition_details) + + return True \ No newline at end of file diff --git a/hammer/library/config.py b/hammer/library/config.py index 504f1a1d..2676a0ce 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -66,6 +66,9 @@ def __init__(self, # AMI public access issue config self.publicAMIs = ModuleConfig(self._config, "ec2_public_ami") + # ECS image source issue config + self.ecs_external_image_source = ModuleConfig(self._config, "ecs_external_image_source") + self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index d9ae7de2..f94715b0 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -238,6 +238,11 @@ def __init__(self, *args): super().__init__(*args) +class ECSExternalImageSourceIssue(Issue): + def __init__(self, *args): + super().__init__(*args) + + class Operations(object): @staticmethod def find(ddb_table, issue): diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py new file mode 100644 index 00000000..f0b780c7 --- /dev/null +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -0,0 +1,160 @@ +""" +Class to create ecs external image source issue tickets. +""" +import sys +import logging + + +from library.logger import set_logging, add_cw_logging +from library.aws.utility import Account +from library.config import Config +from library.jiraoperations import JiraReporting, JiraOperations +from library.slack_utility import SlackNotification +from library.ddb_issues import IssueStatus, ECSExternalImageSourceIssue +from library.ddb_issues import Operations as IssueOperations +from library.utility import SingletonInstance, SingletonInstanceException + + +class CreateECSExternalImageSourceIssueTickets(object): + """ Class to create ECS external image source issue tickets """ + def __init__(self, config): + self.config = config + + def create_tickets_ecs_external_images(self): + """ Class method to create jira tickets """ + table_name = self.config.ecs_external_image_source.ddb_table_name + + main_account = Account(region=self.config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(table_name) + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.ecs_external_image_source.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ECSExternalImageSourceIssue) + for issue in issues: + task_definition_arn = issue.issue_id + region = issue.issue_details.region + tags = issue.issue_details.tags + # issue has been already reported + if issue.timestamps.reported is not None: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} ECS external image source '{task_definition_arn}' issue") + + comment = (f"Closing {issue.status.value} ECS external image source '{task_definition_arn}' issue " + f"in '{account_name} / {account_id}' account, '{region}' region") + if issue.status == IssueStatus.Whitelisted: + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + labels=IssueStatus.Whitelisted + ) + jira.close_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_closed(ddb_table, issue) + # issue.status != IssueStatus.Closed (should be IssueStatus.Open) + elif issue.timestamps.updated > issue.timestamps.reported: + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_arn}") + slack.report_issue( + msg=f"ECS external image source '{task_definition_arn}' issue is changed " + f"in '{account_name} / {account_id}' account, '{region}' region" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_updated(ddb_table, issue) + else: + logging.debug(f"No changes for '{task_definition_arn}'") + # issue has not been reported yet + else: + logging.debug(f"Reporting ECS external image source issue for '{task_definition_arn}'") + + owner = tags.get("owner", None) + bu = tags.get("bu", None) + product = tags.get("product", None) + + issue_summary = (f"ECS external image source '{task_definition_arn}'" + f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + issue_description = ( + f"The ECS image source taken from external.\n\n" + f"*Risk*: High\n\n" + f"*Account Name*: {account_name}\n" + f"*Account ID*: {account_id}\n" + f"*Region*: {region}\n" + f"*ECS Task Definition*: {task_definition_arn}\n") + + auto_remediation_date = (self.config.now + self.config.ecs_external_image_source.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + + issue_description += JiraOperations.build_tags_table(tags) + + issue_description += "\n" + issue_description += ( + f"*Recommendation*: " + f"For both security and reliability, it would be better to use ECS container registry and maintain all required container images within ECS.") + + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["ecs-external-image"], + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + + issue.jira_details.owner = owner + issue.jira_details.business_unit = bu + issue.jira_details.product = product + + slack.report_issue( + msg=f"Discovered {issue_summary}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + + IssueOperations.set_status_reported(ddb_table, issue) + + +if __name__ == '__main__': + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + try: + obj = CreateECSExternalImageSourceIssueTickets(config) + obj.create_tickets_ecs_external_images() + except Exception: + logging.exception("Failed to create ECS external image issue tickets") From 0468e81ae06edb8e306a3a0df42670dd4b6971cf Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 26 Mar 2019 20:06:15 +0530 Subject: [PATCH 012/193] Updated with deployment issue changes. Updated with deployment issue changes. --- deployment/cf-templates/identification-crossaccount-role.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..e0badae3 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -47,7 +47,7 @@ "ec2:DescribeInstances", "ec2:DescribeRouteTables", "ec2:DescribeSubnets", - "ec2:DescribeImages", + "ec2:DescribeImages" ], "Resource": "*" }, From 37799e3b1fed9db865468487874d21e0a21afb54 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla <30701892+vigneswararaomacharla@users.noreply.github.com> Date: Thu, 28 Mar 2019 22:27:07 +0530 Subject: [PATCH 013/193] Updated with deployment issue fixes. Updated with deployment issue fixes. --- deployment/terraform/modules/identification/identification.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index d846d4fe..8d977168 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -43,7 +43,8 @@ resource "aws_cloudformation_stack" "identification" { SourceIdentificationSQSPublicPolicy = "${aws_s3_bucket_object.sqs-public-policy-identification.id}" SourceIdentificationS3Encryption = "${aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification.id}" SourceIdentificationRDSEncryption = "${aws_s3_bucket_object.rds-unencrypted-instance-identification.id}" + SourceIdentificationECSPrivilegedAccess = "${aws_s3_bucket_object.ecs-privileged-access-issues-identification.id}" } template_url = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-cfn.id}" -} \ No newline at end of file +} From 78804d12d0cde65b616b5b421cb8493d18fbbb56 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 29 Mar 2019 19:29:47 +0530 Subject: [PATCH 014/193] Updated with ecs changes. Updated with ecs changes. --- hammer/library/aws/ecs.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 58ce8c01..504e24ba 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -132,9 +132,14 @@ def check(self, task_definitions=None): logging_enabled = False external_image = False is_privileged = False - task_definition = self.account.client("ecs").describe_task_definition( - taskDefinition=task_definition_name - )['taskDefinition'] + try: + task_definition = self.account.client("ecs").describe_task_definition( + taskDefinition=task_definition_name + )['taskDefinition'] + except ClientError as err: + logging.exception(f"Failed to describe task definitions in {self.account} ") + continue + task_definition_arn = task_definition["taskDefinitionArn"] if "containerDefinitions" in task_definition: for container_definition in task_definition['containerDefinitions']: @@ -143,7 +148,7 @@ def check(self, task_definitions=None): else: logging_enabled = True - if container_definition['privileged']: + if "privileged" in str(container_definition) and container_definition['privileged']: is_privileged = True else: is_privileged = False From b53ac34995aa81ffd51c5001a81547729173ed34 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 1 Apr 2019 15:37:43 +0530 Subject: [PATCH 015/193] Updated with ecs image source changes. Updated with ecs image source changes. --- ...scribe_ecs_external_image_source_issues.py | 2 +- hammer/library/aws/ecs.py | 33 +++++++++---------- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py index 6ad4bc2e..58d2b045 100644 --- a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py +++ b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py @@ -52,7 +52,7 @@ def lambda_handler(event, context): for task_definition in checker.task_definitions: logging.debug(f"Checking {task_definition.name}") if task_definition.external_image: - issue = ECSLoggingIssue(account_id, task_definition.name) + issue = ECSExternalImageSourceIssue(account_id, task_definition.name) issue.issue_details.arn = task_definition.arn issue.issue_details.tags = task_definition.tags issue.issue_details.region = task_definition.account.region diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 0dfdf058..22c4d75f 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -1,15 +1,7 @@ -import json import logging -import mimetypes -import pathlib -from datetime import datetime, timezone -from io import BytesIO -from copy import deepcopy from botocore.exceptions import ClientError -from library.utility import jsonDumps from library.utility import timeit -from library.aws.security_groups import SecurityGroup from collections import namedtuple from library.aws.utility import convert_tags @@ -19,7 +11,7 @@ 'cluster_arn', # subnet_group_id 'cluster_instance_arn' - ]) +]) class ECSClusterOperations(object): @@ -27,7 +19,7 @@ class ECSClusterOperations(object): @timeit def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): """ Retrieve ecs clusters meta data with security group attached - + :param ec2_client: boto3 ec2 client :param ecs_client: boto3 ECS client :param group_id: security group id @@ -53,7 +45,8 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): ) ec2_instance_id = container_instance[0]["ec2InstanceId"] - ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] + ec2_instance = \ + ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] if group_id in str(ec2_instance["SecurityGroups"]): ecs_instances.append(ECSCluster_Details( @@ -67,12 +60,13 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): class ECSTaskDefinitions(object): """ Basic class for ECS task definitions. - + """ + def __init__(self, account, name, arn, tags, is_logging=None, is_privileged=None, external_image=None): """ :param account: `Account` instance where ECS task definition is present - + :param name: name of the task definition :param arn: arn of the task definition :param arn: tags of task definition. @@ -140,9 +134,14 @@ def check(self, task_definitions=None): logging_enabled = False external_image = False is_privileged = False - task_definition = self.account.client("ecs").describe_task_definition( - taskDefinition=task_definition_name - )['taskDefinition'] + try: + task_definition = self.account.client("ecs").describe_task_definition( + taskDefinition=task_definition_name + )['taskDefinition'] + except ClientError as err: + logging.exception(f"Failed to describe task definitions in {self.account} ") + continue + task_definition_arn = task_definition["taskDefinitionArn"] if "containerDefinitions" in task_definition: for container_definition in task_definition['containerDefinitions']: @@ -151,7 +150,7 @@ def check(self, task_definitions=None): else: logging_enabled = True - if container_definition['privileged']: + if "privileged" in str(container_definition) and container_definition['privileged']: is_privileged = True else: is_privileged = False From 1b04d4239c97ab81231bb6f8e497a184035969ff Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 2 Apr 2019 16:39:27 +0530 Subject: [PATCH 016/193] Updated with redshift changes. Updated with redshift changes. --- hammer/library/aws/redshift.py | 193 +++++++++++++++++++++++---------- 1 file changed, 134 insertions(+), 59 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 621b4cf5..20d66360 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -11,6 +11,7 @@ from library.utility import timeit from library.aws.security_groups import SecurityGroup from collections import namedtuple +from library.aws.utility import convert_tags # structure which describes EC2 instance @@ -49,30 +50,66 @@ def get_redshift_vpc_security_groups(cls, redshift_client, group_id): return redshift_clusters - @staticmethod + """@staticmethod def set_cluster_encryption(redshift_client, cluster_id, kms_master_key_id): - """ + Sets the cluster encryption using Server side encryption. :param redshift_client: Redshift boto3 client :param cluster_id: Redshift cluster name which to encrypt :param kms_master_key_id: Redshift cluster encryption key. default value is none. + :return: nothing + + + redshift_client.modify_cluster( + ClusterIdentifier=cluster_id, + Encrypted=True + ) + """ + + @staticmethod + def set_cluster_access(redshift_client, cluster_id, public_access): + """ + Sets the cluster access as private. + + :param redshift_client: Redshift boto3 client + :param cluster_id: Redshift cluster name which to make as private + :param public_access: Redshift cluster public access True or False. + :return: nothing """ redshift_client.modify_cluster( ClusterIdentifier=cluster_id, - Encryption=True, - KmsKeyId=kms_master_key_id + PubliclyAccessible=public_access ) + """@staticmethod + def enable_logging(redshift_client, cluster_id, s3_bucket): + + Enable cluster audit logging. + + :param redshift_client: Redshift boto3 client + :param cluster_id: Redshift cluster name which to make as private + :param s3_bucket: S3 bucket to store audit logs. + + :return: nothing + + + redshift_client.enable_logging( + ClusterIdentifier=cluster_id, + BucketName=s3_bucket + ) + """ + + class RedshiftCluster(object): """ Basic class for Redshift Cluster. Encapsulates `Owner`/`Tags`. """ - def __init__(self, account, name, tags, is_encrypted): + def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_logging=None): """ :param account: `Account` instance where redshift cluster is present @@ -82,31 +119,65 @@ def __init__(self, account, name, tags, is_encrypted): """ self.account = account self.name =name - self.tags = tags + self.tags = convert_tags(tags) self.is_encrypt = is_encrypted + self.is_public = is_public + self.is_logging = is_logging - def encrypt_cluster(self, kms_key_id=None): - """ + """def encrypt_cluster(self, kms_key_id=None): + Encrypt bucket with SSL encryption. :return: nothing - """ + try: RedshiftClusterOperations.set_cluster_encryption(self.account.client("redshift"), self.name, kms_key_id) except Exception: logging.exception(f"Failed to encrypt {self.name} cluster ") return False + return True""" + + def modify_cluster(self, public_access): + """ + Modify cluster as private. + :return: nothing + """ + try: + RedshiftClusterOperations.set_cluster_access(self.account.client("redshift"), self.name, public_access) + except Exception: + logging.exception(f"Failed to modify {self.name} cluster ") + return False + return True + """ + def enable_cluster_logging(self, s3_bucket): + + Enable audit logging for cluster. + + @:param s3_bucket: s3 bucket to store audit logs. + :return: nothing + + try: + RedshiftClusterOperations.enable_logging(self.account.client("redshift"), self.name, s3_bucket) + except Exception: + logging.exception(f"Failed to enable logging for {self.name} cluster ") + return False + + return True + """ + class RedshiftClusterChecker(object): + """ - Basic class for checking Redshift cluster in account. - Encapsulates discovered Redshift cluster. + Basic class for checking redshift clusters public access and encryption in account/region. + Encapsulates check settings and discovered clusters. """ def __init__(self, account): """ - :param account: `Account` instance with Redshift cluster to check + :param account: `Account` clusters to check + """ self.account = account self.clusters = [] @@ -120,12 +191,13 @@ def get_cluster(self, name): return cluster return None + def check(self, clusters=None): """ - Walk through Redshift clusters in the account and check them (encrypted or not). + Walk through clusters in the account/region and check them. Put all gathered clusters to `self.clusters`. - :param clusters: list with Redshift cluster names to check, if it is not supplied - all clusters must be checked + :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked :return: boolean. True - if check was successful, False - otherwise @@ -143,11 +215,13 @@ def check(self, clusters=None): if "Clusters" in response: for cluster_details in response["Clusters"]: + tags = {} cluster_id = cluster_details["ClusterIdentifier"] if clusters is not None and cluster_id not in clusters: continue + is_public = cluster_details["PubliclyAccessible"] is_encrypted = cluster_details["Encrypted"] if "Tags" in cluster_details: tags = cluster_details["Tags"] @@ -155,76 +229,77 @@ def check(self, clusters=None): cluster = RedshiftCluster(account=self.account, name=cluster_id, tags=tags, - is_encrypt=is_encrypted) + is_encrypted = is_encrypted, + is_public=is_public) self.clusters.append(cluster) - return True + return True -class RedshiftInsecureSGsChecker(object): +class RedshiftLoggingChecker(object): """ - Basic class for checking security group in account/region. - Encapsulates check settings and discovered security groups. + Basic class for checking redshift cluster's logging enabled or not in account/region. + Encapsulates check settings and discovered clusters. """ - def __init__(self, - account, - restricted_ports): + + def __init__(self, account): """ - :param account: `Account` instance with security groups to check - :param restricted_ports: list with ports to consider `SecurityGroup` as not restricted + :param account: `Account` clusters to check + """ self.account = account - self.restricted_ports = restricted_ports - self.groups = [] + self.clusters = [] - def get_security_group(self, id): + def get_cluster(self, name): """ - :return: `SecurityGroup` by id + :return: `Redshift cluster` by name """ - for group in self.groups: - if group.id == id: - return group + for cluster in self.clusters: + if cluster.name == name: + return cluster return None - def check(self, ids=None): + def check(self, clusters=None): """ - Walk through security groups in the account/region and check them (restricted or not). - Put all gathered groups to `self.groups`. + Walk through clusters in the account/region and check them. + Put all gathered clusters to `self.clusters`. - :param ids: list with security group ids to check, if it is not supplied - all groups must be checked + :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked :return: boolean. True - if check was successful, False - otherwise """ - args = {'DryRun': False} - if ids: - args['GroupIds'] = ids try: - clusters = self.account.client("redshift").describe_clusters() - for cluster in clusters["Clusters"]: - for security_group in cluster["ClusterSecurityGroups"]: - sg_name = security_group["ClusterSecurityGroupName"] - status = security_group["Status"] - sg_details = self.account.client("redshift").describe_cluster_security_groups( - ClusterSecurityGroupName=sg_name) - - - - #describe_security_groups(**args)["SecurityGroups"] + # AWS does not support filtering dirung list, so get all clusters for account + response = self.account.client("redshift").describe_clusters() except ClientError as err: if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: logging.error(f"Access denied in {self.account} " - f"(ec2:{err.operation_name})") - elif err.response['Error']['Code'] == "InvalidGroup.NotFound": - logging.error(err.response['Error']['Message']) - return False + f"(redshift:{err.operation_name})") else: - logging.exception(f"Failed to describe security groups in {self.account}") + logging.exception(f"Failed to list cluster in {self.account}") return False - for security_group in secgroups: - sg = SecurityGroup(self.account, - security_group) - sg.check(self.restricted_ports) - self.groups.append(sg) + if "Clusters" in response: + for cluster_details in response["Clusters"]: + logging_enabled = True + tags = {} + cluster_id = cluster_details["ClusterIdentifier"] + + if clusters is not None and cluster_id not in clusters: + continue + + logging_details = self.account.client("redshift").describe_logging_status(ClusterIdentifier=cluster_id) + if "LoggingEnabled" in logging_details: + logging_enabled = logging_details["LoggingEnabled"] + + if "Tags" in cluster_details: + tags = cluster_details["Tags"] + + cluster = RedshiftCluster(account=self.account, + name=cluster_id, + tags=tags, + is_logging=logging_enabled) + self.clusters.append(cluster) + return True \ No newline at end of file From 512466ede5d654198367566a0e70db7ef0a18bbf Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 2 Apr 2019 20:03:36 +0530 Subject: [PATCH 017/193] Updated with redshift changes. Updated with redshift changes. --- hammer/library/aws/redshift.py | 65 ---------------------------------- 1 file changed, 65 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 20d66360..271592bd 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -50,24 +50,6 @@ def get_redshift_vpc_security_groups(cls, redshift_client, group_id): return redshift_clusters - """@staticmethod - def set_cluster_encryption(redshift_client, cluster_id, kms_master_key_id): - - Sets the cluster encryption using Server side encryption. - - :param redshift_client: Redshift boto3 client - :param cluster_id: Redshift cluster name which to encrypt - :param kms_master_key_id: Redshift cluster encryption key. default value is none. - - :return: nothing - - - redshift_client.modify_cluster( - ClusterIdentifier=cluster_id, - Encrypted=True - ) - """ - @staticmethod def set_cluster_access(redshift_client, cluster_id, public_access): """ @@ -85,24 +67,6 @@ def set_cluster_access(redshift_client, cluster_id, public_access): PubliclyAccessible=public_access ) - """@staticmethod - def enable_logging(redshift_client, cluster_id, s3_bucket): - - Enable cluster audit logging. - - :param redshift_client: Redshift boto3 client - :param cluster_id: Redshift cluster name which to make as private - :param s3_bucket: S3 bucket to store audit logs. - - :return: nothing - - - redshift_client.enable_logging( - ClusterIdentifier=cluster_id, - BucketName=s3_bucket - ) - """ - class RedshiftCluster(object): """ @@ -124,18 +88,6 @@ def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_lo self.is_public = is_public self.is_logging = is_logging - """def encrypt_cluster(self, kms_key_id=None): - - Encrypt bucket with SSL encryption. - :return: nothing - - try: - RedshiftClusterOperations.set_cluster_encryption(self.account.client("redshift"), self.name, kms_key_id) - except Exception: - logging.exception(f"Failed to encrypt {self.name} cluster ") - return False - - return True""" def modify_cluster(self, public_access): """ @@ -150,23 +102,6 @@ def modify_cluster(self, public_access): return True - """ - def enable_cluster_logging(self, s3_bucket): - - Enable audit logging for cluster. - - @:param s3_bucket: s3 bucket to store audit logs. - :return: nothing - - try: - RedshiftClusterOperations.enable_logging(self.account.client("redshift"), self.name, s3_bucket) - except Exception: - logging.exception(f"Failed to enable logging for {self.name} cluster ") - return False - - return True - """ - class RedshiftClusterChecker(object): From 1a70d675d395cf3bc2549df7085049b89b6e20eb Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 3 Apr 2019 13:03:53 +0530 Subject: [PATCH 018/193] Updated with redshift public access issue changes. Updated with redshift public access issue changes. --- docs/_data/sidebars/mydoc_sidebar.yml | 4 + docs/pages/deployment_cloudformation.md | 1 + docs/pages/editconfig.md | 14 +- docs/pages/features.md | 1 + .../playbook16_redshift_public_clusters.md | 178 ++++++++++++++++++ docs/pages/remediation_backup_rollback.md | 1 + ...describe_redshift_cluster_public_access.py | 4 +- hammer/library/aws/redshift.py | 129 +------------ .../clean_redshift_public_access.py | 2 +- 9 files changed, 205 insertions(+), 129 deletions(-) create mode 100644 docs/pages/playbook16_redshift_public_clusters.md diff --git a/docs/_data/sidebars/mydoc_sidebar.yml b/docs/_data/sidebars/mydoc_sidebar.yml index 9b0b67ba..ff8f8da7 100644 --- a/docs/_data/sidebars/mydoc_sidebar.yml +++ b/docs/_data/sidebars/mydoc_sidebar.yml @@ -115,3 +115,7 @@ entries: - title: RDS Unencrypted instances url: /playbook12_rds_unencryption.html output: web, pdf + + - title: Redshift Publicly Accessible clusters + url: /playbook16_redshift_public_clusters.html + output: web, pdf diff --git a/docs/pages/deployment_cloudformation.md b/docs/pages/deployment_cloudformation.md index ce1bb23f..dc5b6edf 100644 --- a/docs/pages/deployment_cloudformation.md +++ b/docs/pages/deployment_cloudformation.md @@ -93,6 +93,7 @@ You will need to set the following parameters: * **SourceIdentificationSQSPublicPolicy**: the relative path to the Lambda package that identifies SQS public queue issues. The default value is **sqs-public-policy-identification.zip**. * **SourceIdentificationS3Encryption**: the relative path to the Lambda package that identifies S3 un-encrypted bucket issues. The default value is **s3-unencrypted-bucket-issues-identification.zip**. * **SourceIdentificationRDSEncryption**: the relative path to the Lambda package that identifies RDS unencrypted instances. The default value is **rds-unencrypted-instance-identification.zip**. +* **SourceIdentificationRedshiftPublicAccess**: the relative path to the Lambda package that identifies publicly accessibly redshift cluster issues. The default value is **redshift-cluster-public-access-identification.zip**. **VPC config (optional)**: * **LambdaSubnets**: comma-separated list, without spaces, of subnet IDs in your VPC to run identification lambdas in. diff --git a/docs/pages/editconfig.md b/docs/pages/editconfig.md index 23ff0938..209f9a4c 100644 --- a/docs/pages/editconfig.md +++ b/docs/pages/editconfig.md @@ -386,4 +386,16 @@ Parameters: * **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-rds-unencrypted`. * **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); * **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); -* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; \ No newline at end of file +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +### 2.16. Redshift publicly accessibly cluster issues. + +This section describes how to detect whether you have publicly accessible redshift cluster issues or not. Refer to [issue-specific playbook](playbook16_redshift_public_clusters.html) for further details. + +Edit the **redshift_public_access** section of the `config.json` file to configure the handling of this issue. + +Parameters: +* **enabled**: enables/disables issue identification. The default value is `true`; +* **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-redshift-public-access`. +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +* **remediation**: defines whether Dow Jones Hammer will automatically remediate the detected issue. The default value is `false`; +* **remediation_retention_period**: the amount of days that should pass between the detection of an issue and its automatic remediation by Dow Jones Hammer. The default value is `0`. diff --git a/docs/pages/features.md b/docs/pages/features.md index 3b830f91..78b1cefa 100644 --- a/docs/pages/features.md +++ b/docs/pages/features.md @@ -21,5 +21,6 @@ Dow Jones Hammer can identify and report the following issues: |[SQS Policy Public Access](playbook10_sqs_public_policy.html) |Detects publicly accessible SQS policy |Any of SQS queues is worldwide accessible by policy | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html) |Detects not encrypted at reset S3 buckets |Any of S3 bucket is not encrypted at rest | |[RDS Unencrypted instances](playbook12_rds_unencryption.html) |Detects not encrypted at rest RDS instances |Any one of RDS instances is not encrypted at reset | +|[Redshift Public Access Clusters](playbook16_redshift_public_clusters.html) |Detects Redshift publicly accessible cluster isues |Any one of Redshift cluster publicly accessible | Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**. \ No newline at end of file diff --git a/docs/pages/playbook16_redshift_public_clusters.md b/docs/pages/playbook16_redshift_public_clusters.md new file mode 100644 index 00000000..f6c7e4c3 --- /dev/null +++ b/docs/pages/playbook16_redshift_public_clusters.md @@ -0,0 +1,178 @@ +--- +title: Redshift publicly accessible cluster issues +keywords: playbook16 +sidebar: mydoc_sidebar +permalink: playbook16_redshift_public_clusters.html +--- + +# Playbook 16: Redshift publicly accessible cluster issues + +## Introduction + +This playbook describes how to configure Dow Jones Hammer to detect Redshift public accessible cluster issues. + +## 1. Issue Identification + +Dow Jones Hammer identifies those Redshift clusters ```PubliclyAccessible``` parameters. + +When Dow Jones Hammer detects an issue, it writes the issue to the designated DynamoDB table. + +According to the [Dow Jones Hammer architecture](/index.html), the issue identification functionality uses two Lambda functions. +The table lists the Python modules that implement this functionality: + +|Designation |Path | +|--------------|:--------------------:| +|Initialization|`hammer/identification/lambdas/redshift-cluster-public-access-identification/initiate_to_desc_redshift_cluster_public_access.py`| +|Identification|`hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py`| + +## 2. Issue Reporting + +You can configure automatic reporting of cases when Dow Jones Hammer identifies an issue of this type. Dow Jones Hammer supports integration with [JIRA](https://www.atlassian.com/software/jira) and [Slack](https://slack.com/). +These types of reporting are independent from one another and you can turn them on/off in the Dow Jones Hammer configuration. + +Thus, in case you have turned on the reporting functionality for this issue and configured corresponding integrations, Dow Jones Hammer, as [defined in the configuration](#43-the-ticket_ownersjson-file), can: +* raise a JIRA ticket and assign it to a specific person in your organization; +* send the issue notification to the Slack channel or directly to a Slack user. + +Additionally Dow Jones Hammer tries to detect person to report issue to by examining Redshift cluster publicly accessible status. In case the cluster is publicly accessible (true) **valid JIRA/Slack user**: +* for JIRA: `jira_owner` parameter from [ticket_owners.json](#43-the-ticket_ownersjson-file) **is ignored** and discovered `owner` **is used instead** as a JIRA assignee; +* for Slack: discovered `owner` **is used in addition to** `slack_owner` value from [ticket_owners.json](#43-the-ticket_ownersjson-file). + +This Python module implements the issue reporting functionality: +``` +hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py +``` + + +## 3. Setup Instructions For This Issue + +To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: + +### 3.1. The config.json File + +The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. +To identify and report issues of this type, you should add the following parameters in the **redshift_public_access** section of the **config.json** file: + +|Parameter Name |Description | Default Value| +|------------------------------|---------------------------------------|:------------:| +|`enabled` |Toggles issue detection for this issue |`true`| +|`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-redshift-public-access` | +|`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| + +Sample **config.json** section: +``` +""" +"redshift_public_access": { + "enabled": true, + "ddb.table_name": "hammer-redshift-public-access", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 + } + ``` + +### 3.2. The whitelist.json File + +You can define exceptions to the general automatic remediation settings for specific Redshift clusters. To configure such exceptions, you should edit the **redshift_public_access** section of the **whitelist.json** configuration file as follows: + +|Parameter Key | Parameter Value(s)| +|:------------:|:-----------------:| +|AWS Account ID|Redshift cluster ids(s)| + +Sample **whitelist.json** section: +``` +"redshift_public_access": { + "123456789012": ["redshift_id1", "redshift_id2"] +} +``` + +### 3.3. The ticket_owners.json File + +You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. + +You can configure these parameters for specific AWS accounts and globally. Account-specific settings precede the global settings in the **ticket_owners.json** configuration file. + +Check the following table for parameters: + +|Parameter Name |Description |Sample Value | +|---------------------|--------------------------------------------------------------------|:---------------:| +|`jira_project` |The name of the JIRA project where Dow Jones Hammer will create the issue | `AWSSEC` | +|`jira_owner` |The name of the JIRA user to whom Dow Jones Hammer will assign the issue | `Support-Cloud` | +|`jira_parent_ticket` |The JIRA ticket to which Dow Jones Hammer will link the new ticket it creates | `AWSSEC-1234` | +|`slack_owner` |Name(s) of the Slack channels (prefixed by `#`) and/or Slack users that will receive issue reports from Dow Jones Hammer | `["#devops-channel", "bob"]` | + +Sample **ticket_owners.json** section: + +Account-specific settings: +``` +{ + "account": { + "123456789012": { + "jira_project": "", + "jira_owner": "Support-Cloud", + "jira_parent_ticket": "", + "slack_owner": "" + } + }, + "jira_project": "AWSSEC", + "jira_owner": "Support-General", + "jira_parent_ticket": "AWSSEC-1234", + "slack_owner": ["#devops-channel", "bob"] +} +``` + +## 4. Logging + +Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. + +Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. + +### 4.1. Issue Identification Logging + +Dow Jones Hammer issue identification functionality uses two Lambda functions: + +* Initialization: this Lambda function selects slave accounts to check for this issue as designated in the Dow Jones Hammer configuration files and triggers the check. +* Identification: this Lambda function identifies this issue for each account/region selected at the previous step. + +You can see the logs for each of these Lambda functions in the following Log Groups: + +|Lambda Function|CloudWatch Log Group Name | +|---------------|--------------------------------------------| +|Initialization |`/aws/lambda/initiate-redshift-public-access`| +|Identification |`/aws/lambda/describe-redshift-public-access`| + +### 4.2. Issue Reporting Logging + +Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: + +|Designation|CloudWatch Log Stream Name | +|-----------|---------------------------------------------------------| +|Reporting |`reporting.create_redshift_public_access_issue_tickets`| + + +### 4.3. Slack Reports + +In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). + +Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. + +### 4.4. Using CloudWatch Logs for Dow Jones Hammer + +To access Dow Jones Hammer logs, proceed as follows: + +1. Open **AWS Management Console**. +2. Select **CloudWatch** service. +3. Select **Logs** from the CloudWatch sidebar. +4. Select the log group you want to explore. The log group will open. +5. Select the log stream you want to explore. + +Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. + +## 5. Issue specific details in DynamoDB + +Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. + +|Key |Type |Description |Example | +|-------------|:----:|----------------------------------|------------------------------------------------| +|`id` |string|redshift id |`redshift-id` | +|`tags` |map |Tags associated with Redshift id |`{"Name": "TestKey", "service": "archive"}`| \ No newline at end of file diff --git a/docs/pages/remediation_backup_rollback.md b/docs/pages/remediation_backup_rollback.md index d05fe010..9c833126 100644 --- a/docs/pages/remediation_backup_rollback.md +++ b/docs/pages/remediation_backup_rollback.md @@ -27,6 +27,7 @@ The following table gives an overview of Dow Jones Hammer remediation functional |[SQS Queue Public Access](playbook10_sqs_public_policy.html#3-issue-remediation) | Yes | Yes | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html#3-issue-remediation) | Yes | Yes | |[RDS Unencrypted instances](playbook12_rds_unencryption.html#3-issue-remediation) | `No` | `No` | +|[Redshift Public Access issues](playbook16_redshift_public_clusters.html#3-issue-remediation) | `No` | `No` | ## 2. How Remediation Backup Works diff --git a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py index a02a7d22..2412a219 100644 --- a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py +++ b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py @@ -3,7 +3,7 @@ from library.logger import set_logging from library.config import Config -from library.aws.redshift import RedshiftClusterPublicAccessChecker +from library.aws.redshift import RedshiftClusterChecker from library.aws.utility import Account from library.ddb_issues import IssueStatus, RedshiftPublicAccessIssue from library.ddb_issues import Operations as IssueOperations @@ -47,7 +47,7 @@ def lambda_handler(event, context): open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} logging.debug(f"Redshift clusters in DDB:\n{open_issues.keys()}") - checker = RedshiftClusterPublicAccessChecker(account=account) + checker = RedshiftClusterChecker(account=account) if checker.check(): for cluster in checker.clusters: logging.debug(f"Checking {cluster.name}") diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 6d068096..271592bd 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -50,23 +50,6 @@ def get_redshift_vpc_security_groups(cls, redshift_client, group_id): return redshift_clusters - @staticmethod - def set_cluster_encryption(redshift_client, cluster_id, kms_master_key_id): - """ - Sets the cluster encryption using Server side encryption. - - :param redshift_client: Redshift boto3 client - :param cluster_id: Redshift cluster name which to encrypt - :param kms_master_key_id: Redshift cluster encryption key. default value is none. - - :return: nothing - """ - - redshift_client.modify_cluster( - ClusterIdentifier=cluster_id, - Encrypted=True - ) - @staticmethod def set_cluster_access(redshift_client, cluster_id, public_access): """ @@ -84,23 +67,6 @@ def set_cluster_access(redshift_client, cluster_id, public_access): PubliclyAccessible=public_access ) - @staticmethod - def enable_logging(redshift_client, cluster_id, s3_bucket): - """ - Enable cluster audit logging. - - :param redshift_client: Redshift boto3 client - :param cluster_id: Redshift cluster name which to make as private - :param s3_bucket: S3 bucket to store audit logs. - - :return: nothing - """ - - redshift_client.enable_logging( - ClusterIdentifier=cluster_id, - BucketName=s3_bucket - ) - class RedshiftCluster(object): """ @@ -122,18 +88,6 @@ def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_lo self.is_public = is_public self.is_logging = is_logging - def encrypt_cluster(self, kms_key_id=None): - """ - Encrypt bucket with SSL encryption. - :return: nothing - """ - try: - RedshiftClusterOperations.set_cluster_encryption(self.account.client("redshift"), self.name, kms_key_id) - except Exception: - logging.exception(f"Failed to encrypt {self.name} cluster ") - return False - - return True def modify_cluster(self, public_access): """ @@ -148,88 +102,11 @@ def modify_cluster(self, public_access): return True - def enable_cluster_logging(self, s3_bucket): - """ - Enable audit logging for cluster. - - @:param s3_bucket: s3 bucket to store audit logs. - :return: nothing - """ - try: - RedshiftClusterOperations.enable_logging(self.account.client("redshift"), self.name, s3_bucket) - except Exception: - logging.exception(f"Failed to enable logging for {self.name} cluster ") - return False - - return True - - -class RedshiftEncryptionChecker(object): - """ - Basic class for checking Redshift cluster in account. - Encapsulates discovered Redshift cluster. - """ - def __init__(self, account): - """ - :param account: `Account` instance with Redshift cluster to check - """ - self.account = account - self.clusters = [] - - def get_cluster(self, name): - """ - :return: `Redshift cluster` by name - """ - for cluster in self.clusters: - if cluster.name == name: - return cluster - return None - - def check(self, clusters=None): - """ - Walk through Redshift clusters in the account and check them (encrypted or not). - Put all gathered clusters to `self.clusters`. - - :param clusters: list with Redshift cluster names to check, if it is not supplied - all clusters must be checked - - :return: boolean. True - if check was successful, - False - otherwise - """ - try: - # AWS does not support filtering dirung list, so get all clusters for account - response = self.account.client("redshift").describe_clusters() - except ClientError as err: - if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: - logging.error(f"Access denied in {self.account} " - f"(redshift:{err.operation_name})") - else: - logging.exception(f"Failed to list cluster in {self.account}") - return False - - if "Clusters" in response: - for cluster_details in response["Clusters"]: - tags = {} - cluster_id = cluster_details["ClusterIdentifier"] - - if clusters is not None and cluster_id not in clusters: - continue - - is_encrypted = cluster_details["Encrypted"] - if "Tags" in cluster_details: - tags = cluster_details["Tags"] - - cluster = RedshiftCluster(account=self.account, - name=cluster_id, - tags=tags, - is_encrypted=is_encrypted) - self.clusters.append(cluster) - return True - -class RedshiftClusterPublicAccessChecker(object): +class RedshiftClusterChecker(object): """ - Basic class for checking redshift clusters public access in account/region. + Basic class for checking redshift clusters public access and encryption in account/region. Encapsulates check settings and discovered clusters. """ def __init__(self, account): @@ -280,12 +157,14 @@ def check(self, clusters=None): continue is_public = cluster_details["PubliclyAccessible"] + is_encrypted = cluster_details["Encrypted"] if "Tags" in cluster_details: tags = cluster_details["Tags"] cluster = RedshiftCluster(account=self.account, name=cluster_id, tags=tags, + is_encrypted = is_encrypted, is_public=is_public) self.clusters.append(cluster) diff --git a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py index 4029a8fc..91d310ea 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py @@ -47,7 +47,7 @@ def clean_redshift_public_access(self, batch=False): # Adding label with "whitelisted" to jira ticket. jira.add_label( ticket_id=issue.jira_details.ticket, - labels=IssueStatus.Whitelisted + label=IssueStatus.Whitelisted.value ) continue if not in_fixlist: From 394c24fdf49f575d0b39d1ceb0af740ec2fa5b6e Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Apr 2019 15:20:54 +0530 Subject: [PATCH 019/193] resolved conflicts. resolved conflicts. --- deployment/build_packages.sh | 6 +- deployment/cf-templates/ddb.json | 39 +++- deployment/cf-templates/identification.json | 170 ++++++++++-------- .../modules/identification/sources.tf | 5 +- hammer/library/config.py | 5 - hammer/library/ddb_issues.py | 6 +- 6 files changed, 132 insertions(+), 99 deletions(-) diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index e147a4fe..77652b88 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,11 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -<<<<<<< HEAD -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification redshift-cluster-public-access-identification" -======= -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api" ->>>>>>> refs/remotes/origin/dev +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification redshift-cluster-public-access-identification ami-public-access-issues-identification api" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index dbe196d5..cf91f971 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -426,11 +426,7 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-unencrypted" ] ]} } }, -<<<<<<< HEAD "DynamoDBRedshiftClusterPublicAccess": { -======= - "DynamoDBAMIPublicAccess": { ->>>>>>> refs/remotes/origin/dev "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", "DependsOn": ["DynamoDBCredentials"], @@ -459,9 +455,39 @@ "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, -<<<<<<< HEAD "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-public-access" ] ]} -======= + } + }, + + "DynamoDBAMIPublicAccess": { + "Type": "AWS::DynamoDB::Table", + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ec2-public-ami" ] ]} } }, @@ -486,7 +512,6 @@ "WriteCapacityUnits": "2" }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} ->>>>>>> refs/remotes/origin/dev } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 3a423c14..04e2139e 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,11 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", -<<<<<<< HEAD - "SourceIdentificationRedshiftPublicAccess" -======= + "SourceIdentificationRedshiftPublicAccess", "SourceIdentificationAMIPublicAccess" ->>>>>>> refs/remotes/origin/dev ] }, { @@ -94,13 +91,11 @@ "SourceIdentificationRDSSnapshots": { "default": "Relative path to public RDS snapshots lambda sources" }, -<<<<<<< HEAD "SourceIdentificationRedshiftPublicAccess":{ "default": "Relative path to publicly accessible Redshift Cluster sources" -======= + }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" ->>>>>>> refs/remotes/origin/dev } } } @@ -1840,7 +1835,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSEncryptionEvaluation" } } }, - "LambdaEvaluateRDSEncryption": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaEvaluateRDSEncryption"], @@ -1940,8 +1934,53 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } } }, - -<<<<<<< HEAD + "LambdaEvaluateAMIPublicAccess": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } + }, + "Description": "Lambda function to describe public AMI issues.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] + ]}, + "Handler": "describe_public_ami_issues.lambda_handler", + "MemorySize": 256, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluateAMIPublicAccess": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "IdentifyAMIPublicAccessLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaEvaluateAMIPublicAccess"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } + } + }, "LambdaInitiateRedshiftPublicAccessEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftPublicAccess", "LogGroupLambdaInitiateRedshiftPublicAccess"], @@ -2007,21 +2046,6 @@ { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftPublicAccessLambdaFunctionName", "value"] } ] ]}, "Handler": "describe_redshift_cluster_public_access.lambda_handler", -======= - "LambdaEvaluateAMIPublicAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Description": "Lambda function to describe public AMI issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_public_ami_issues.lambda_handler", ->>>>>>> refs/remotes/origin/dev "MemorySize": 256, "Timeout": "300", "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", @@ -2033,27 +2057,18 @@ "Runtime": "python3.6" } }, -<<<<<<< HEAD "LogGroupLambdaEvaluateRedshiftPublicAccess": { -======= - "LogGroupLambdaEvaluateAMIPublicAccess": { ->>>>>>> refs/remotes/origin/dev "Type" : "AWS::Logs::LogGroup", "Properties" : { "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", -<<<<<<< HEAD "IdentifyRedshiftPublicAccessLambdaFunctionName", -======= - "IdentifyAMIPublicAccessLambdaFunctionName", ->>>>>>> refs/remotes/origin/dev "value"] } ] ] }, "RetentionInDays": "7" } }, -<<<<<<< HEAD "SubscriptionFilterLambdaEvaluateRedshiftPublicAccess": { "Type" : "AWS::Logs::SubscriptionFilter", "DependsOn": ["LambdaLogsForwarder", @@ -2065,20 +2080,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRedshiftPublicAccess" } } }, - -======= - "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } - } - }, ->>>>>>> refs/remotes/origin/dev "EventBackupDDB": { "Type": "AWS::Events::Rule", "DependsOn": ["LambdaBackupDDB"], @@ -2243,7 +2244,6 @@ ] } }, -<<<<<<< HEAD "EventInitiateEvaluationRedshiftPublicAccess": { "Type": "AWS::Events::Rule", @@ -2257,7 +2257,10 @@ { "Arn": { "Fn::GetAtt": ["LambdaInitiateRedshiftPublicAccessEvaluation", "Arn"] }, "Id": "LambdaInitiateRedshiftPublicAccessEvaluation" -======= + } + ] + } + }, "EventInitiateEvaluationAMIPublicAccess": { "Type": "AWS::Events::Rule", "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation"], @@ -2270,7 +2273,6 @@ { "Arn": { "Fn::GetAtt": ["LambdaInitiateAMIPublicAccessEvaluation", "Arn"] }, "Id": "LambdaInitiateAMIPublicAccessEvaluation" ->>>>>>> refs/remotes/origin/dev } ] } @@ -2417,8 +2419,6 @@ "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSEncryption", "Arn"] } } }, -<<<<<<< HEAD - "PermissionToInvokeLambdaInitiateRedshiftPublicAccessEvaluationCloudWatchEvents": { "Type": "AWS::Lambda::Permission", "DependsOn": ["LambdaInitiateRedshiftPublicAccessEvaluation", "EventInitiateEvaluationRedshiftPublicAccess"], @@ -2427,7 +2427,8 @@ "Action": "lambda:InvokeFunction", "Principal": "events.amazonaws.com", "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRedshiftPublicAccess", "Arn"] } -======= + } + }, "PermissionToInvokeLambdaInitiateAMIPublicAccessEvaluationCloudWatchEvents": { "Type": "AWS::Lambda::Permission", "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation", "EventInitiateEvaluationAMIPublicAccess"], @@ -2436,7 +2437,6 @@ "Action": "lambda:InvokeFunction", "Principal": "events.amazonaws.com", "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } ->>>>>>> refs/remotes/origin/dev } }, "SNSNotifyLambdaEvaluateSG": { @@ -2655,8 +2655,6 @@ }] } }, -<<<<<<< HEAD - "SNSNotifyLambdaEvaluateRedshiftPublicAccess": { "Type": "AWS::SNS::Topic", "DependsOn": "LambdaEvaluateRedshiftPublicAccess", @@ -2670,7 +2668,11 @@ "Subscription": [{ "Endpoint": { "Fn::GetAtt": ["LambdaEvaluateRedshiftPublicAccess", "Arn"] -======= + }, + "Protocol": "lambda" + }] + } + }, "SNSNotifyLambdaEvaluateAMIPublicAccess": { "Type": "AWS::SNS::Topic", "DependsOn": "LambdaEvaluateAMIPublicAccess", @@ -2684,7 +2686,6 @@ "Subscription": [{ "Endpoint": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] ->>>>>>> refs/remotes/origin/dev }, "Protocol": "lambda" }] @@ -2810,7 +2811,6 @@ "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] } } }, -<<<<<<< HEAD "PermissionToInvokeLambdaEvaluateRedshiftPublicAccessSNS": { "Type": "AWS::Lambda::Permission", @@ -2820,7 +2820,8 @@ "Principal": "sns.amazonaws.com", "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRedshiftPublicAccess" }, "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRedshiftPublicAccess", "Arn"] } -======= + } + }, "PermissionToInvokeLambdaEvaluateAMIPublicAccessSNS": { "Type": "AWS::Lambda::Permission", "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LambdaEvaluateAMIPublicAccess"], @@ -2829,7 +2830,6 @@ "Principal": "sns.amazonaws.com", "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } ->>>>>>> refs/remotes/origin/dev } }, "SNSIdentificationErrors": { @@ -3434,7 +3434,6 @@ "TreatMissingData": "notBreaching" } }, -<<<<<<< HEAD "AlarmErrorsLambdaInitiateRedshiftPublicAccessEvaluation": { "Type": "AWS::CloudWatch::Alarm", "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRedshiftPublicAccessEvaluation"], @@ -3442,7 +3441,22 @@ "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRedshiftPublicAccessEvaluation" }, "LambdaError" ] ]}, -======= + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateRedshiftPublicAccessEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, "AlarmErrorsLambdaInitiateAMIPublicAccessEvaluation": { "Type": "AWS::CloudWatch::Alarm", "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateAMIPublicAccessEvaluation"], @@ -3450,18 +3464,13 @@ "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, "LambdaError" ] ]}, ->>>>>>> refs/remotes/origin/dev "EvaluationPeriods": 1, "Namespace": "AWS/Lambda", "MetricName": "Errors", "Dimensions": [ { "Name": "FunctionName", -<<<<<<< HEAD - "Value": { "Ref": "LambdaInitiateRedshiftPublicAccessEvaluation" } -======= "Value": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" } ->>>>>>> refs/remotes/origin/dev } ], "Period": 3600, @@ -3471,7 +3480,6 @@ "TreatMissingData": "notBreaching" } }, -<<<<<<< HEAD "AlarmErrorsLambdaRedshiftPublicAccessEvaluation": { "Type": "AWS::CloudWatch::Alarm", "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRedshiftPublicAccess"], @@ -3479,7 +3487,22 @@ "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRedshiftPublicAccess" }, "LambdaError" ] ]}, -======= + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluateRedshiftPublicAccess" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, "AlarmErrorsLambdaAMIPublicAccessEvaluation": { "Type": "AWS::CloudWatch::Alarm", "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateAMIPublicAccess"], @@ -3487,18 +3510,13 @@ "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateAMIPublicAccess" }, "LambdaError" ] ]}, ->>>>>>> refs/remotes/origin/dev "EvaluationPeriods": 1, "Namespace": "AWS/Lambda", "MetricName": "Errors", "Dimensions": [ { "Name": "FunctionName", -<<<<<<< HEAD - "Value": { "Ref": "LambdaEvaluateRedshiftPublicAccess" } -======= "Value": { "Ref": "LambdaEvaluateAMIPublicAccess" } ->>>>>>> refs/remotes/origin/dev } ], "Period": 3600, diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index 4bc02b83..b1124915 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -90,12 +90,11 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { key = "lambda/${format("rds-unencrypted-instance-identification-%s.zip", "${md5(file("${path.module}/../../../packages/rds-unencrypted-instance-identification.zip"))}")}" source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" } -<<<<<<< HEAD + resource "aws_s3_bucket_object" "redshift-cluster-public-access-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("redshift-cluster-public-access-identification-%s.zip", "${md5(file("${path.module}/../../../packages/redshift-cluster-public-access-identification.zip"))}")}" source = "${path.module}/../../../packages/redshift-cluster-public-access-identification.zip" } -======= ->>>>>>> refs/remotes/origin/dev + diff --git a/hammer/library/config.py b/hammer/library/config.py index 1b134234..129c94f2 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -63,14 +63,9 @@ def __init__(self, # RDS encryption issue config self.rdsEncrypt = ModuleConfig(self._config, "rds_encryption") -<<<<<<< HEAD - - self.redshift_public_access = ModuleConfig(self._config, "redshift_public_access") -======= # AMI public access issue config self.publicAMIs = ModuleConfig(self._config, "ec2_public_ami") ->>>>>>> refs/remotes/origin/dev self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index ed0fad5c..954309f5 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -233,11 +233,11 @@ def __init__(self, *args): super().__init__(*args) -<<<<<<< HEAD class RedshiftPublicAccessIssue(Issue): -======= + def __init__(self, *args): + super().__init__(*args) + class PublicAMIIssue(Issue): ->>>>>>> refs/remotes/origin/dev def __init__(self, *args): super().__init__(*args) From 3e5bf5ecf2349664ff4078218440d41a30776292 Mon Sep 17 00:00:00 2001 From: MrBakalo Date: Fri, 5 Apr 2019 13:55:21 +0300 Subject: [PATCH 020/193] Add option to disable/enable jira and slack reporting separately --- deployment/configs/config.json | 55 ++++++++++++------- .../remediation/clean_iam_key_rotation.py | 4 ++ .../remediation/clean_iam_keys_inactive.py | 4 ++ .../remediation/clean_public_ebs_snapshots.py | 4 ++ .../remediation/clean_public_rds_snapshots.py | 4 ++ .../clean_s3bucket_acl_permissions.py | 4 ++ .../clean_s3bucket_policy_permissions.py | 4 ++ .../remediation/clean_s3bucket_unencrypted.py | 4 ++ .../remediation/clean_security_groups.py | 4 ++ .../clean_sqs_policy_permissions.py | 4 ++ .../reporting/create_cloudtrail_tickets.py | 4 ++ ...reate_ebs_public_snapshot_issue_tickets.py | 4 ++ .../create_ebs_volume_issue_tickets.py | 4 ++ .../create_iam_key_inactive_tickets.py | 4 ++ .../create_iam_key_rotation_tickets.py | 4 ++ ...reate_rds_public_snapshot_issue_tickets.py | 4 ++ ..._rds_unencrypted_instance_issue_tickets.py | 4 ++ ...ate_s3_unencrypted_bucket_issue_tickets.py | 4 ++ .../create_s3bucket_acl_issue_tickets.py | 4 ++ .../create_s3bucket_policy_issue_tickets.py | 4 ++ .../create_security_groups_tickets.py | 4 ++ .../create_sqs_policy_issue_tickets.py | 4 ++ 22 files changed, 120 insertions(+), 19 deletions(-) diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 68bb3bef..d3be03ac 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -53,7 +53,9 @@ "topic_name": "hammer-describe-s3-acl-lambda", "reporting": false, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "secgrp_unrestricted_access": { "enabled": true, @@ -75,7 +77,9 @@ "reporting": false, "remediation": false, "remediation_accounts": ["210987654321", "654321210987"], - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "user_inactivekeys": { "enabled": true, @@ -85,7 +89,9 @@ "inactive_criteria_days": 1, "reporting": false, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "user_keysrotation": { "enabled": true, @@ -94,7 +100,9 @@ "rotation_criteria_days": 10, "reporting": false, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "s3_bucket_policy": { "enabled": true, @@ -102,20 +110,26 @@ "topic_name": "hammer-describe-s3-policy-lambda", "reporting": false, "remediation": false, - "remediation_retention_period": 7 + "remediation_retention_period": 7, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "cloudtrails": { "enabled": true, "ddb.table_name": "hammer-cloudtrails", "topic_name": "hammer-describe-cloudtrails-lambda", - "reporting": false + "reporting": false, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "ebs_unencrypted_volume": { "enabled": true, "ddb.table_name": "hammer-ebs-volumes-unencrypted", "topic_name": "hammer-describe-ebs-unencrypted-volumes-lambda", "accounts": ["123456789012", "210987654321"], - "reporting": false + "reporting": false, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "ebs_public_snapshot": { "enabled": true, @@ -123,7 +137,9 @@ "topic_name": "hammer-describe-ebs-public-snapshots-lambda", "reporting": false, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "rds_public_snapshot": { "enabled": true, @@ -131,14 +147,9 @@ "topic_name": "hammer-describe-rds-public-snapshots-lambda", "reporting": false, "remediation": false, - "remediation_retention_period": 0 - }, - "ec2_public_ami": { - "enabled": true, - "ddb.table_name": "hammer-ec2-public-ami", - "reporting": false, - "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 0, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "sqs_public_access": { "enabled": true, @@ -146,7 +157,9 @@ "topic_name": "hammer-describe-sqs-public-policy-lambda", "reporting": true, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "s3_encryption": { "enabled": true, @@ -154,12 +167,16 @@ "topic_name": "hammer-describe-s3-encryption-lambda", "reporting": true, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "jira": {"enabled": true}, + "slack": {"enabled": true} }, "rds_encryption": { "enabled": true, "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", - "reporting": true + "reporting": true, + "jira": {"enabled": true}, + "slack": {"enabled": true} } } diff --git a/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py b/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py index 65c2c97f..cf91f0e8 100755 --- a/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py +++ b/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py @@ -113,6 +113,10 @@ def clean_iam_access_keys(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.user_keysrotation.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.user_keysrotation.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py b/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py index a2d3849c..78716700 100755 --- a/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py +++ b/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py @@ -113,6 +113,10 @@ def clean_iam_access_keys(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.user_inactivekeys.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.user_inactivekeys.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py b/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py index b65d22c8..a1b7b695 100755 --- a/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py +++ b/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py @@ -117,6 +117,10 @@ def clean_public_ebs_snapshots(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.ebs_public_snapshot.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.ebs_public_snapshot.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py b/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py index eb72bd10..44241455 100755 --- a/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py +++ b/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py @@ -112,6 +112,10 @@ def clean_public_rds_snapshots(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.rds_public_snapshot.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.rds_public_snapshot.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py b/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py index 0f3fa5b3..3eba4b7e 100755 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py @@ -133,6 +133,10 @@ def cleans3bucketaclpermissions(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.s3_bucket_acl.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.s3_bucket_acl.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py index 1a6459c6..42e8c6ba 100755 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py @@ -132,6 +132,10 @@ def clean_s3bucket_policy_permissions(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.s3_bucket_policy.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.s3_bucket_policy.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py index 75e783ae..7451b109 100644 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py @@ -129,6 +129,10 @@ def cleans3bucketunencrypted(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.s3_encryption.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.s3_encryption.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_security_groups.py b/hammer/reporting-remediation/remediation/clean_security_groups.py index 0465384d..55ce1ff7 100755 --- a/hammer/reporting-remediation/remediation/clean_security_groups.py +++ b/hammer/reporting-remediation/remediation/clean_security_groups.py @@ -142,6 +142,10 @@ def clean_security_groups(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.secgrp_unrestricted_access.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.secgrp_unrestricted_access.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py index 09946325..c882c8f0 100644 --- a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py @@ -125,6 +125,10 @@ def clean_sqs_policy_permissions(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.sqs_public_access.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.sqs_public_access.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py index 9a9768d0..5d17756f 100755 --- a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py +++ b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py @@ -157,6 +157,10 @@ def create_tickets_cloud_trail_logging(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.cloudtrails.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.cloudtrails.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py index 204fc4f5..a8501156 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py @@ -152,6 +152,10 @@ def create_tickets_ebs_public_snapshots(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.ebs_public_snapshot.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.ebs_public_snapshot.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py index b8dc8db7..19d6d166 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py @@ -215,6 +215,10 @@ def create_tickets_ebsvolumes(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.ebs_unencrypted_volume.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.ebs_unencrypted_volume.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py index f43b7555..76f0ac15 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py @@ -118,6 +118,10 @@ def create_jira_ticket(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.user_inactivekeys.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.user_inactivekeys.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py index 74fd5872..60fcea62 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py @@ -116,6 +116,10 @@ def create_jira_ticket(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.user_keysrotation.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.user_keysrotation.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py index e0227dca..5f4f2ff1 100755 --- a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py @@ -149,6 +149,10 @@ def create_tickets_rds_public_snapshots(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.rds_public_snapshot.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.rds_public_snapshot.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py index 10a71429..01cf81c4 100644 --- a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py @@ -146,6 +146,10 @@ def create_tickets_rds_unencrypted_instances(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.rds_encryption.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.rds_encryption.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py index f8b2fdb5..e36c8dde 100644 --- a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py @@ -159,6 +159,10 @@ def create_tickets_s3_unencrypted_buckets(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.s3_encryption.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.s3_encryption.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py index 8fad3747..f99f4ce1 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py @@ -171,6 +171,10 @@ def create_tickets_s3buckets(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.s3_bucket_acl.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.s3_bucket_acl.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py index b4411daa..54e1234e 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py @@ -178,6 +178,10 @@ def create_tickets_s3buckets(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.s3_bucket_policy.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.s3_bucket_policy.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 3ade7aff..f6ef9508 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -492,6 +492,10 @@ def create_tickets_securitygroups(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.secgrp_unrestricted_access.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.secgrp_unrestricted_access.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py index 2f452024..dc5671e3 100644 --- a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py @@ -182,6 +182,10 @@ def create_tickets_sqs_policy(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() + if config.jira.enabled: + config.jira.enabled = config.sqs_public_access.jira.enabled + if config.slack.enabled: + config.slack.enabled = config.sqs_public_access.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, From c01e5da7ca6a897d2819b6f47be612efed068ed6 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 9 Apr 2019 14:08:45 +0530 Subject: [PATCH 021/193] Updated with redshfit encryption issue changes. Updated with redshfit encryption issue changes. --- deployment/build_packages.sh | 2 +- deployment/cf-templates/ddb.json | 32 +++ deployment/cf-templates/identification.json | 222 ++++++++++++++++ .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 5 + .../describe_redshift_encryption.py | 83 ++++++ .../initiate_to_desc_redshift_encryption.py | 36 +++ hammer/library/aws/redshift.py | 240 ++++++++++++++++++ hammer/library/config.py | 1 + hammer/library/ddb_issues.py | 5 + ...shift_unencrypted_cluster_issue_tickets.py | 160 ++++++++++++ 11 files changed, 788 insertions(+), 2 deletions(-) create mode 100644 hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py create mode 100644 hammer/identification/lambdas/redshift-unencrypted-cluster-identification/initiate_to_desc_redshift_encryption.py create mode 100644 hammer/library/aws/redshift.py create mode 100644 hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index 2e00c69c..db160249 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,7 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api" +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api redshift-unencrypted-cluster-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 11fe476b..22ffa9d2 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -458,6 +458,38 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ec2-public-ami" ] ]} } }, + "DynamoDBRedshiftClusterEncryption": { + "Type": "AWS::DynamoDB::Table", + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-unencrypted" ] ]} + } + }, "DynamoDBApiRequests": { "Type": "AWS::DynamoDB::Table", "DependsOn": ["DynamoDBCredentials", "DynamoDBSQSPublicPolicy"], diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 3618c393..893942af 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,6 +27,7 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", + "SourceIdentificationRedshiftClusterEncryption", "SourceIdentificationAMIPublicAccess" ] }, @@ -90,6 +91,9 @@ "SourceIdentificationRDSSnapshots": { "default": "Relative path to public RDS snapshots lambda sources" }, + "SourceIdentificationRedshiftClusterEncryption":{ + "default": "Relative path to unencrypted Redshift Cluster sources" + }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" } @@ -184,6 +188,10 @@ "SourceIdentificationRDSEncryption": { "Type": "String", "Default": "rds-unencrypted-instance-identification.zip" + }, + "SourceIdentificationRedshiftClusterEncryption": { + "Type": "String", + "Default": "redshift-unencrypted-cluster-identification.zip" } }, "Conditions": { @@ -241,6 +249,9 @@ "IdentificationMetricRDSEncryptionError": { "value": "RDSEncryptionError" }, + "IdentificationMetricRedshiftClusterEncryptionError": { + "value": "RedshiftClusterEncryptionError" + }, "SNSDisplayNameSecurityGroups": { "value": "describe-security-groups-sns" }, @@ -319,6 +330,12 @@ "SNSTopicNameRDSEncryption": { "value": "describe-rds-encryption-lambda" }, + "SNSDisplayNameRedshiftClusterEncryption": { + "value": "describe-redshift-cluster-encryption-sns" + }, + "SNSTopicNameRedshiftClusterEncryption": { + "value": "describe-redshift-cluster-encryption-lambda" + }, "LogsForwarderLambdaFunctionName": { "value": "logs-forwarder" }, @@ -402,6 +419,12 @@ }, "IdentifyRDSEncryptionLambdaFunctionName": { "value": "describe-rds-encryption" + }, + "InitiateRedshiftClusterEncryptionLambdaFunctionName": { + "value": "initiate-redshift-cluster-encryption" + }, + "IdentifyRedshiftClusterEncryptionLambdaFunctionName": { + "value": "describe-redshift-cluster-encryption" } } }, @@ -1960,6 +1983,105 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } } }, + "LambdaInitiateRedshiftClusterEncryptionEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftClusterEncryption", "LogGroupLambdaInitiateRedshiftClusterEncryptionEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationRedshiftClusterEncryption" } + }, + "Environment": { + "Variables": { + "SNS_REDSHIFT_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateRedshiftClusterEncryption" } + } + }, + "Description": "Lambda function for initiate to identify unencrypted Redshift clusters.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRedshiftClusterEncryptionLambdaFunctionName", "value"] } ] + ]}, + "Handler": "initiate_to_desc_redshift_encryption.lambda_handler", + "MemorySize": 128, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateRedshiftClusterEncryptionEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "InitiateRedshiftClusterEncryptionLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateRedshiftClusterEncryptionEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaInitiateRedshiftClusterEncryptionEvaluation"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRedshiftClusterEncryptionEvaluation" } + } + }, + "LambdaEvaluateRedshiftClusterEncryption": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluateRedshiftClusterEncryption"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationRedshiftClusterEncryption" } + }, + "Description": "Lambda function to describe unencrypted Redshift clusters.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftClusterEncryptionLambdaFunctionName", "value"] } ] + ]}, + "Handler": "describe_redshift_encryption.lambda_handler", + "MemorySize": 256, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluateRedshiftClusterEncryption": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "IdentifyRedshiftClusterEncryptionLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaEvaluateRedshiftClusterEncryption": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaEvaluateRedshiftClusterEncryption"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRedshiftClusterEncryption" } + } + }, "EventBackupDDB": { "Type": "AWS::Events::Rule", "DependsOn": ["LambdaBackupDDB"], @@ -2140,6 +2262,22 @@ ] } }, + "EventInitiateEvaluationRedshiftClusterEncryption": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateRedshiftClusterEncryptionEvaluation"], + "Properties": { + "Description": "Hammer ScheduledRule to initiate unencrypted Redshift cluster evaluations", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRedshiftClusterEncryption"] ] }, + "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateRedshiftClusterEncryptionEvaluation", "Arn"] }, + "Id": "LambdaInitiateRedshiftClusterEncryptionEvaluation" + } + ] + } + }, "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { "Type": "AWS::Lambda::Permission", "DependsOn": ["LambdaLogsForwarder"], @@ -2292,6 +2430,16 @@ "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } } }, + "PermissionToInvokeLambdaInitiateRedshiftClusterEncryptionEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateRedshiftClusterEncryptionEvaluation", "EventInitiateEvaluationRedshiftClusterEncryption"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateRedshiftClusterEncryptionEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRedshiftClusterEncryption", "Arn"] } + } + }, "SNSNotifyLambdaEvaluateSG": { "Type": "AWS::SNS::Topic", "DependsOn": ["LambdaEvaluateSG"], @@ -2526,6 +2674,24 @@ }] } }, + "SNSNotifyLambdaEvaluateRedshiftClusterEncryption": { + "Type": "AWS::SNS::Topic", + "DependsOn": "LambdaEvaluateRedshiftClusterEncryption", + "Properties": { + "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRedshiftClusterEncryption", "value"] } ] + ]}, + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftClusterEncryption", "value"] } ] + ]}, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluateRedshiftClusterEncryption", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, "PermissionToInvokeLambdaEvaluateSgSNS": { "Type": "AWS::Lambda::Permission", "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], @@ -2656,6 +2822,16 @@ "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } } }, + "PermissionToInvokeLambdaEvaluateRedshiftClusterEncryptionSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftClusterEncryption", "LambdaEvaluateRedshiftClusterEncryption"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRedshiftClusterEncryption" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRedshiftClusterEncryption", "Arn"] } + } + }, "SNSIdentificationErrors": { "Type": "AWS::SNS::Topic", "Properties": { @@ -3303,6 +3479,52 @@ "Threshold": 0, "TreatMissingData": "notBreaching" } + }, + "AlarmErrorsLambdaInitiateRedshiftClusterEncryptionEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRedshiftClusterEncryptionEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRedshiftClusterEncryptionEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateRedshiftClusterEncryptionEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaRedshiftClusterEncryptionEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRedshiftClusterEncryption"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRedshiftClusterEncryption" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluateRedshiftClusterEncryption" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } } }, diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 38c7f93e..5c61813e 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -15,7 +15,8 @@ resource "aws_cloudformation_stack" "identification" { "aws_s3_bucket_object.ami-public-access-issues-identification", "aws_s3_bucket_object.sqs-public-policy-identification", "aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification", - "aws_s3_bucket_object.rds-unencrypted-instance-identification" + "aws_s3_bucket_object.rds-unencrypted-instance-identification", + "aws_s3_bucket_object.redshift-unencrypted-cluster-identification" ] tags = "${var.tags}" @@ -42,6 +43,7 @@ resource "aws_cloudformation_stack" "identification" { SourceIdentificationSQSPublicPolicy = "${aws_s3_bucket_object.sqs-public-policy-identification.id}" SourceIdentificationS3Encryption = "${aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification.id}" SourceIdentificationRDSEncryption = "${aws_s3_bucket_object.rds-unencrypted-instance-identification.id}" + SourceIdentificationRedshiftClusterEncryption = "${aws_s3_bucket_object.redshift-unencrypted-cluster-identification.id}" } template_url = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-cfn.id}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index c839c312..7fb3a64b 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -91,3 +91,8 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" } +resource "aws_s3_bucket_object" "redshift-unencrypted-cluster-identification" { + bucket = "${var.s3bucket}" + key = "lambda/${format("redshift-unencrypted-cluster-identification-%s.zip", "${md5(file("${path.module}/../../../packages/redshift-unencrypted-cluster-identification.zip"))}")}" + source = "${path.module}/../../../packages/redshift-unencrypted-cluster-identification.zip" +} \ No newline at end of file diff --git a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py new file mode 100644 index 00000000..a3001955 --- /dev/null +++ b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py @@ -0,0 +1,83 @@ +import json +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.redshift import RedshiftEncryptionChecker +from library.aws.utility import Account +from library.ddb_issues import IssueStatus, RedshiftEncryptionIssue +from library.ddb_issues import Operations as IssueOperations +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to evaluate Redshift cluster encryption """ + set_logging(level=logging.DEBUG) + + try: + payload = json.loads(event["Records"][0]["Sns"]["Message"]) + account_id = payload['account_id'] + account_name = payload['account_name'] + # get the last region from the list to process + region = payload['regions'].pop() + # region = payload['region'] + except Exception: + logging.exception(f"Failed to parse event\n{event}") + return + + try: + config = Config() + + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(config.redshiftEncrypt.ddb_table_name) + + account = Account(id=account_id, + name=account_name, + region=region, + role_name=config.aws.role_name_identification) + if account.session is None: + return + + logging.debug(f"Checking for unencrypted Redshift clusters policies in {account}") + + # existing open issues for account to check if resolved + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, RedshiftEncryptionIssue) + # make dictionary for fast search by id + # and filter by current region + open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} + logging.debug(f"Redshift clusters in DDB:\n{open_issues.keys()}") + + checker = RedshiftEncryptionChecker(account=account) + if checker.check(): + for cluster in checker.clusters: + logging.debug(f"Checking {cluster.name}") + if not cluster.is_encrypt: + issue = RedshiftEncryptionIssue(account_id, cluster.name) + issue.issue_details.tags = cluster.tags + issue.issue_details.region = cluster.account.region + if config.redshiftEncrypt.in_whitelist(account_id, cluster.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {cluster.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(cluster.name, None) + + logging.debug(f"Redshift Clusters in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated clusters + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + except Exception: + logging.exception(f"Failed to check Redshift clusters for '{account_id} ({account_name})'") + return + + # push SNS messages until the list with regions to check is empty + if len(payload['regions']) > 0: + try: + Sns.publish(payload["sns_arn"], payload) + except Exception: + logging.exception("Failed to chain insecure services checking") + + logging.debug(f"Checked Redshift Clusters for '{account_id} ({account_name})'") \ No newline at end of file diff --git a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/initiate_to_desc_redshift_encryption.py b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/initiate_to_desc_redshift_encryption.py new file mode 100644 index 00000000..d6cc8606 --- /dev/null +++ b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/initiate_to_desc_redshift_encryption.py @@ -0,0 +1,36 @@ +import os +import logging + +from library.logger import set_logging +from library.config import Config +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to initiate to find unencrypted Redshift clusters """ + set_logging(level=logging.INFO) + logging.debug("Initiating Redshift Clusters checking") + + try: + sns_arn = os.environ["SNS_REDSHIFT_ENCRYPT_ARN"] + config = Config() + + if not config.redshiftEncrypt.enabled: + logging.debug("Redshift clusters checking disabled") + return + + logging.debug("Iterating over each account to initiate Redshift Clusters check") + for account_id, account_name in config.redshiftEncrypt.accounts.items(): + payload = {"account_id": account_id, + "account_name": account_name, + "regions": config.aws.regions, + "sns_arn": sns_arn + } + logging.debug(f"Initiating unencrypted Redshift clusters checking for '{account_name}'") + Sns.publish(sns_arn, payload) + + except Exception: + logging.exception("Error occurred while initiation of unencrypted Redshift cluster checking") + return + + logging.debug("unencrypted Redshift clusters checking initiation done") diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py new file mode 100644 index 00000000..271592bd --- /dev/null +++ b/hammer/library/aws/redshift.py @@ -0,0 +1,240 @@ +import json +import logging +import mimetypes +import pathlib + +from datetime import datetime, timezone +from io import BytesIO +from copy import deepcopy +from botocore.exceptions import ClientError +from library.utility import jsonDumps +from library.utility import timeit +from library.aws.security_groups import SecurityGroup +from collections import namedtuple +from library.aws.utility import convert_tags + + +# structure which describes EC2 instance +RedshiftCluster_Details = namedtuple('RedshiftCluster_Details', [ + # cluster_id + 'id', + # subnet_group_id + 'subnet_group_name' + ]) + +class RedshiftClusterOperations(object): + + @classmethod + @timeit + def get_redshift_vpc_security_groups(cls, redshift_client, group_id): + """ Retrieve redshift clusters meta data with security group attached + + :param redshift_client: boto3 redshift client + :param group_id: security group id + + :return: list with redshift clusters details + """ + # describe rds instances with security group attached + redshift_clusters = [] + + # this will include Clusters + clusters_res = redshift_client.describe_clusters() + for cluster in clusters_res["Clusters"]: + active_security_groups = [sg["VpcSecurityGroupId"] for sg in cluster['VpcSecurityGroups'] if + sg["Status"] == "active"] + if group_id in active_security_groups: + redshift_clusters.append(RedshiftCluster_Details( + id=cluster["ClusterIdentifier"], + subnet_group_name=cluster["ClusterSubnetGroupName"] + )) + + return redshift_clusters + + @staticmethod + def set_cluster_access(redshift_client, cluster_id, public_access): + """ + Sets the cluster access as private. + + :param redshift_client: Redshift boto3 client + :param cluster_id: Redshift cluster name which to make as private + :param public_access: Redshift cluster public access True or False. + + :return: nothing + """ + + redshift_client.modify_cluster( + ClusterIdentifier=cluster_id, + PubliclyAccessible=public_access + ) + + +class RedshiftCluster(object): + """ + Basic class for Redshift Cluster. + Encapsulates `Owner`/`Tags`. + """ + def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_logging=None): + """ + :param account: `Account` instance where redshift cluster is present + + :param name: `Name` of cluster id + :param tags: tags if redshift cluster tags (as AWS returns) + :param is_encrypted: encrypted or not. + """ + self.account = account + self.name =name + self.tags = convert_tags(tags) + self.is_encrypt = is_encrypted + self.is_public = is_public + self.is_logging = is_logging + + + def modify_cluster(self, public_access): + """ + Modify cluster as private. + :return: nothing + """ + try: + RedshiftClusterOperations.set_cluster_access(self.account.client("redshift"), self.name, public_access) + except Exception: + logging.exception(f"Failed to modify {self.name} cluster ") + return False + + return True + + +class RedshiftClusterChecker(object): + + """ + Basic class for checking redshift clusters public access and encryption in account/region. + Encapsulates check settings and discovered clusters. + """ + def __init__(self, account): + """ + :param account: `Account` clusters to check + + """ + self.account = account + self.clusters = [] + + def get_cluster(self, name): + """ + :return: `Redshift cluster` by name + """ + for cluster in self.clusters: + if cluster.name == name: + return cluster + return None + + + def check(self, clusters=None): + """ + Walk through clusters in the account/region and check them. + Put all gathered clusters to `self.clusters`. + + :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering dirung list, so get all clusters for account + response = self.account.client("redshift").describe_clusters() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to list cluster in {self.account}") + return False + + if "Clusters" in response: + for cluster_details in response["Clusters"]: + tags = {} + cluster_id = cluster_details["ClusterIdentifier"] + + if clusters is not None and cluster_id not in clusters: + continue + + is_public = cluster_details["PubliclyAccessible"] + is_encrypted = cluster_details["Encrypted"] + if "Tags" in cluster_details: + tags = cluster_details["Tags"] + + cluster = RedshiftCluster(account=self.account, + name=cluster_id, + tags=tags, + is_encrypted = is_encrypted, + is_public=is_public) + self.clusters.append(cluster) + + return True + + +class RedshiftLoggingChecker(object): + """ + Basic class for checking redshift cluster's logging enabled or not in account/region. + Encapsulates check settings and discovered clusters. + """ + + def __init__(self, account): + """ + :param account: `Account` clusters to check + + """ + self.account = account + self.clusters = [] + + def get_cluster(self, name): + """ + :return: `Redshift cluster` by name + """ + for cluster in self.clusters: + if cluster.name == name: + return cluster + return None + + def check(self, clusters=None): + """ + Walk through clusters in the account/region and check them. + Put all gathered clusters to `self.clusters`. + + :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + # AWS does not support filtering dirung list, so get all clusters for account + response = self.account.client("redshift").describe_clusters() + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to list cluster in {self.account}") + return False + + if "Clusters" in response: + for cluster_details in response["Clusters"]: + logging_enabled = True + tags = {} + cluster_id = cluster_details["ClusterIdentifier"] + + if clusters is not None and cluster_id not in clusters: + continue + + logging_details = self.account.client("redshift").describe_logging_status(ClusterIdentifier=cluster_id) + if "LoggingEnabled" in logging_details: + logging_enabled = logging_details["LoggingEnabled"] + + if "Tags" in cluster_details: + tags = cluster_details["Tags"] + + cluster = RedshiftCluster(account=self.account, + name=cluster_id, + tags=tags, + is_logging=logging_enabled) + self.clusters.append(cluster) + + return True \ No newline at end of file diff --git a/hammer/library/config.py b/hammer/library/config.py index 504f1a1d..282e762e 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -63,6 +63,7 @@ def __init__(self, # RDS encryption issue config self.rdsEncrypt = ModuleConfig(self._config, "rds_encryption") + self.redshiftEncrypt = ModuleConfig(self._config, "redshift_encryption") # AMI public access issue config self.publicAMIs = ModuleConfig(self._config, "ec2_public_ami") diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index d9ae7de2..7b949b5a 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -233,6 +233,11 @@ def __init__(self, *args): super().__init__(*args) +class RedshiftEncryptionIssue(Issue): + def __init__(self, *args): + super().__init__(*args) + + class PublicAMIIssue(Issue): def __init__(self, *args): super().__init__(*args) diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py new file mode 100644 index 00000000..4c36e39a --- /dev/null +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -0,0 +1,160 @@ +""" +Class to create redshift unencrypted cluster issue tickets. +""" +import sys +import logging + + +from library.logger import set_logging, add_cw_logging +from library.aws.utility import Account +from library.config import Config +from library.jiraoperations import JiraReporting, JiraOperations +from library.slack_utility import SlackNotification +from library.ddb_issues import IssueStatus, RedshiftEncryptionIssue +from library.ddb_issues import Operations as IssueOperations +from library.utility import SingletonInstance, SingletonInstanceException + + +class CreateRedshiftUnencryptedInstanceTickets(object): + """ Class to create redshift unencrypted cluster issue issue tickets """ + def __init__(self, config): + self.config = config + + def create_tickets_redshift_unencrypted_cluster(self): + """ Class method to create jira tickets """ + table_name = self.config.redshiftEncrypt.ddb_table_name + + main_account = Account(region=self.config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(table_name) + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.aws.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, RedshiftEncryptionIssue) + for issue in issues: + cluster_id = issue.issue_id + region = issue.issue_details.region + tags = issue.issue_details.tags + # issue has been already reported + if issue.timestamps.reported is not None: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} Redshift unencrypted cluster '{cluster_id}' issue") + + comment = (f"Closing {issue.status.value} Redshift unencrypted cluster '{cluster_id}' issue " + f"in '{account_name} / {account_id}' account, '{region}' region") + if issue.status == IssueStatus.Whitelisted: + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + labels=IssueStatus.Whitelisted + ) + jira.close_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_closed(ddb_table, issue) + # issue.status != IssueStatus.Closed (should be IssueStatus.Open) + elif issue.timestamps.updated > issue.timestamps.reported: + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {cluster_id}") + slack.report_issue( + msg=f"Redshift unencrypted cluster '{cluster_id}' issue is changed " + f"in '{account_name} / {account_id}' account, '{region}' region" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_updated(ddb_table, issue) + else: + logging.debug(f"No changes for '{cluster_id}'") + # issue has not been reported yet + else: + logging.debug(f"Reporting Redshift unencrypted cluster '{cluster_id}' issue") + + owner = tags.get("owner", None) + bu = tags.get("bu", None) + product = tags.get("product", None) + + issue_summary = (f"Redshift unencrypted cluster '{cluster_id}'" + f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + issue_description = ( + f"The Redshift Cluster is unencrypted.\n\n" + f"*Risk*: High\n\n" + f"*Account Name*: {account_name}\n" + f"*Account ID*: {account_id}\n" + f"*Region*: {region}\n" + f"*Redshift Cluster ID*: {cluster_id}\n") + + auto_remediation_date = (self.config.now + self.config.redshiftEncrypt.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + + issue_description += JiraOperations.build_tags_table(tags) + + issue_description += "\n" + issue_description += ( + f"*Recommendation*: " + f"Encrypt Redshift cluster.") + + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["redshift-unencrypted-clusters"], + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + + issue.jira_details.owner = owner + issue.jira_details.business_unit = bu + issue.jira_details.product = product + + slack.report_issue( + msg=f"Discovered {issue_summary}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + + IssueOperations.set_status_reported(ddb_table, issue) + + +if __name__ == '__main__': + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + try: + obj = CreateRedshiftUnencryptedInstanceTickets(config) + obj.create_tickets_redshift_unencrypted_cluster() + except Exception: + logging.exception("Failed to create redshift unencrypted cluster tickets") From 3e7ad948583340ad228bc11cff22b1d38f816c04 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 9 Apr 2019 15:08:46 +0530 Subject: [PATCH 022/193] Updated with Redshift logging issue changes. Updated with Redshift logging issue changes. --- deployment/build_packages.sh | 6 +- deployment/cf-templates/ddb.json | 38 +- deployment/cf-templates/identification.json | 642 +++--------------- .../modules/identification/sources.tf | 4 +- hammer/library/config.py | 3 - hammer/library/ddb_issues.py | 7 +- 6 files changed, 149 insertions(+), 551 deletions(-) diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index 65e16f4a..afb7bb8f 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,11 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -<<<<<<< HEAD -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification redshift-audit-logging-issues-identification" -======= -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api" ->>>>>>> refs/remotes/origin/dev +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api redshift-audit-logging-issues-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index e0b2eccd..0e87b36f 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -426,11 +426,7 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-unencrypted" ] ]} } }, -<<<<<<< HEAD "DynamoDBRedshiftLogging": { -======= - "DynamoDBAMIPublicAccess": { ->>>>>>> refs/remotes/origin/dev "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", "DependsOn": ["DynamoDBCredentials"], @@ -459,9 +455,38 @@ "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, -<<<<<<< HEAD "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-logging" ] ]} -======= + } + }, + "DynamoDBAMIPublicAccess": { + "Type": "AWS::DynamoDB::Table", + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ec2-public-ami" ] ]} } }, @@ -486,7 +511,6 @@ "WriteCapacityUnits": "2" }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} ->>>>>>> refs/remotes/origin/dev } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 106d8344..d8a2ca1e 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,11 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", -<<<<<<< HEAD - "SourceIdentificationRedshiftLogging" -======= + "SourceIdentificationRedshiftLogging", "SourceIdentificationAMIPublicAccess" ->>>>>>> refs/remotes/origin/dev ] }, { @@ -94,13 +91,11 @@ "SourceIdentificationRDSSnapshots": { "default": "Relative path to public RDS snapshots lambda sources" }, -<<<<<<< HEAD - "SourceIdentificationRedshiftLogging":{ + "SourceIdentificationRedshiftLogging": { "default": "Relative path to disabled logging Redshift Cluster sources" -======= + }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" ->>>>>>> refs/remotes/origin/dev } } } @@ -431,7 +426,7 @@ "IdentifyRedshiftLoggingLambdaFunctionName": { "value": "describe-redshift-logging" } - } + } }, "Resources": { "LambdaLogsForwarder": { @@ -1940,446 +1935,53 @@ "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } } }, - -<<<<<<< HEAD - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + "LambdaEvaluateAMIPublicAccess": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } + }, + "Description": "Lambda function to describe public AMI issues.", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] + ]}, + "Handler": "describe_public_ami_issues.lambda_handler", + "MemorySize": 256, + "Timeout": "300", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluateAMIPublicAccess": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "IdentifyAMIPublicAccessLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LambdaLogsForwarder", + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", + "LogGroupLambdaEvaluateAMIPublicAccess"], + "Properties" : { + "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } + } + }, "LambdaInitiateRedshiftLoggingEvaluation": { "Type": "AWS::Lambda::Function", "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftLogging", "LogGroupLambdaInitiateRedshiftLoggingEvaluation"], @@ -2445,21 +2047,6 @@ { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftLoggingLambdaFunctionName", "value"] } ] ]}, "Handler": "describe_redshift_logging_issues.lambda_handler", -======= - "LambdaEvaluateAMIPublicAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Description": "Lambda function to describe public AMI issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_public_ami_issues.lambda_handler", ->>>>>>> refs/remotes/origin/dev "MemorySize": 256, "Timeout": "300", "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", @@ -2471,27 +2058,18 @@ "Runtime": "python3.6" } }, -<<<<<<< HEAD "LogGroupLambdaEvaluateRedshiftLogging": { -======= - "LogGroupLambdaEvaluateAMIPublicAccess": { ->>>>>>> refs/remotes/origin/dev "Type" : "AWS::Logs::LogGroup", "Properties" : { "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", -<<<<<<< HEAD "IdentifyRedshiftLoggingLambdaFunctionName", -======= - "IdentifyAMIPublicAccessLambdaFunctionName", ->>>>>>> refs/remotes/origin/dev "value"] } ] ] }, "RetentionInDays": "7" } }, -<<<<<<< HEAD "SubscriptionFilterLambdaEvaluateRedshiftLogging": { "Type" : "AWS::Logs::SubscriptionFilter", "DependsOn": ["LambdaLogsForwarder", @@ -2503,20 +2081,6 @@ "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRedshiftLogging" } } }, - -======= - "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } - } - }, ->>>>>>> refs/remotes/origin/dev "EventBackupDDB": { "Type": "AWS::Events::Rule", "DependsOn": ["LambdaBackupDDB"], @@ -2681,21 +2245,22 @@ ] } }, -<<<<<<< HEAD - "EventInitiateEvaluationRedshiftLogging": { "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRedshiftLoggingEvaluation"], + "DependsOn": ["LambdaInitiateRedshiftLoggingEvaluaion"], "Properties": { "Description": "Hammer ScheduledRule to initiate audit logging issue Redshift cluster evaluations", "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRedshiftLogging"] ] }, "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", + "State": "ENALED", "Targets": [ { "Arn": { "Fn::GetAtt": ["LambdaInitiateRedshiftLoggingEvaluation", "Arn"] }, "Id": "LambdaInitiateRedshiftLoggingEvaluation" -======= + } + ] + } + }, "EventInitiateEvaluationAMIPublicAccess": { "Type": "AWS::Events::Rule", "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation"], @@ -2708,7 +2273,6 @@ { "Arn": { "Fn::GetAtt": ["LambdaInitiateAMIPublicAccessEvaluation", "Arn"] }, "Id": "LambdaInitiateAMIPublicAccessEvaluation" ->>>>>>> refs/remotes/origin/dev } ] } @@ -2855,17 +2419,16 @@ "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSEncryption", "Arn"] } } }, -<<<<<<< HEAD - "PermissionToInvokeLambdaInitiateRedshiftLoggingEvaluationCloudWatchEvents": { "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRedshiftLoggingEvaluation", "EventInitiateEvaluationRedshiftLogging"], + "DependsOn": ["LambdaInitiateRedshiftLoggingEvaluation", "EventInitiateEvaluationRedshiftLoggig"], "Properties": { "FunctionName": { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" }, "Action": "lambda:InvokeFunction", "Principal": "events.amazonaws.com", "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRedshiftLogging", "Arn"] } -======= + } + }, "PermissionToInvokeLambdaInitiateAMIPublicAccessEvaluationCloudWatchEvents": { "Type": "AWS::Lambda::Permission", "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation", "EventInitiateEvaluationAMIPublicAccess"], @@ -2874,7 +2437,6 @@ "Action": "lambda:InvokeFunction", "Principal": "events.amazonaws.com", "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } ->>>>>>> refs/remotes/origin/dev } }, "SNSNotifyLambdaEvaluateSG": { @@ -3093,10 +2655,9 @@ }] } }, -<<<<<<< HEAD "SNSNotifyLambdaEvaluateRedshiftLogging": { "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRedshiftLogging", + "DependsOn": "LambdaEvaluateRedshiftLoggig", "Properties": { "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRedshiftLogging", "value"] } ] @@ -3107,7 +2668,11 @@ "Subscription": [{ "Endpoint": { "Fn::GetAtt": ["LambdaEvaluateRedshiftLogging", "Arn"] -======= + }, + "Protocol": "lambda" + }] + } + }, "SNSNotifyLambdaEvaluateAMIPublicAccess": { "Type": "AWS::SNS::Topic", "DependsOn": "LambdaEvaluateAMIPublicAccess", @@ -3121,7 +2686,6 @@ "Subscription": [{ "Endpoint": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] ->>>>>>> refs/remotes/origin/dev }, "Protocol": "lambda" }] @@ -3247,7 +2811,6 @@ "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] } } }, -<<<<<<< HEAD "PermissionToInvokeLambdaEvaluateRedshiftLoggingSNS": { "Type": "AWS::Lambda::Permission", @@ -3257,7 +2820,8 @@ "Principal": "sns.amazonaws.com", "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRedshiftLogging" }, "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRedshiftLogging", "Arn"] } -======= + } + }, "PermissionToInvokeLambdaEvaluateAMIPublicAccessSNS": { "Type": "AWS::Lambda::Permission", "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LambdaEvaluateAMIPublicAccess"], @@ -3266,7 +2830,6 @@ "Principal": "sns.amazonaws.com", "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } ->>>>>>> refs/remotes/origin/dev } }, "SNSIdentificationErrors": { @@ -3871,15 +3434,6 @@ "TreatMissingData": "notBreaching" } }, -<<<<<<< HEAD - "AlarmErrorsLambdaInitiateRedshiftLoggingEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRedshiftLoggingEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" }, "LambdaError" ] ]}, -======= "AlarmErrorsLambdaInitiateAMIPublicAccessEvaluation": { "Type": "AWS::CloudWatch::Alarm", "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateAMIPublicAccessEvaluation"], @@ -3887,18 +3441,13 @@ "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, "LambdaError" ] ]}, ->>>>>>> refs/remotes/origin/dev "EvaluationPeriods": 1, "Namespace": "AWS/Lambda", "MetricName": "Errors", "Dimensions": [ { "Name": "FunctionName", -<<<<<<< HEAD - "Value": { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" } -======= "Value": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" } ->>>>>>> refs/remotes/origin/dev } ], "Period": 3600, @@ -3908,7 +3457,29 @@ "TreatMissingData": "notBreaching" } }, -<<<<<<< HEAD + "AlarmErrorsLambdaAMIPublicAccessEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateAMIPublicAccess"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateAMIPublicAccess" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluateAMIPublicAccess" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, "AlarmErrorsLambdaRedshiftLoggingEvaluation": { "Type": "AWS::CloudWatch::Alarm", "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRedshiftLogging"], @@ -3916,26 +3487,36 @@ "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRedshiftLogging" }, "LambdaError" ] ]}, -======= - "AlarmErrorsLambdaAMIPublicAccessEvaluation": { + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluateRedshiftLogging" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaInitiateRedshiftLoggingEvaluation": { "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateAMIPublicAccess"], + "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRedshiftLoggingEvaluation"], "Properties": { "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateAMIPublicAccess" }, "LambdaError" ] ]}, ->>>>>>> refs/remotes/origin/dev + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" }, "LambdaError" ] ]}, "EvaluationPeriods": 1, "Namespace": "AWS/Lambda", "MetricName": "Errors", "Dimensions": [ { "Name": "FunctionName", -<<<<<<< HEAD - "Value": { "Ref": "LambdaEvaluateRedshiftLogging" } -======= - "Value": { "Ref": "LambdaEvaluateAMIPublicAccess" } ->>>>>>> refs/remotes/origin/dev + "Value": { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" } } ], "Period": 3600, @@ -3945,6 +3526,7 @@ "TreatMissingData": "notBreaching" } } + }, "Outputs": { diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index 94d54e56..19e55097 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -90,12 +90,10 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { key = "lambda/${format("rds-unencrypted-instance-identification-%s.zip", "${md5(file("${path.module}/../../../packages/rds-unencrypted-instance-identification.zip"))}")}" source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" } -<<<<<<< HEAD + resource "aws_s3_bucket_object" "redshift-audit-logging-issues-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("redshift-audit-logging-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/redshift-audit-logging-issues-identification.zip"))}")}" source = "${path.module}/../../../packages/redshift-audit-logging-issues-identification.zip" } -======= ->>>>>>> refs/remotes/origin/dev diff --git a/hammer/library/config.py b/hammer/library/config.py index 9e2f5231..40ccc283 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -63,13 +63,10 @@ def __init__(self, # RDS encryption issue config self.rdsEncrypt = ModuleConfig(self._config, "rds_encryption") -<<<<<<< HEAD self.redshift_logging = ModuleConfig(self._config, "redshift_logging") -======= # AMI public access issue config self.publicAMIs = ModuleConfig(self._config, "ec2_public_ami") ->>>>>>> refs/remotes/origin/dev self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index cd936f4c..24bd74e1 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -233,11 +233,12 @@ def __init__(self, *args): super().__init__(*args) -<<<<<<< HEAD class RedshiftLoggingIssue(Issue): -======= + def __init__(self, *args): + super().__init__(*args) + + class PublicAMIIssue(Issue): ->>>>>>> refs/remotes/origin/dev def __init__(self, *args): super().__init__(*args) From 11fe7c0776f4e5f510cf4226bdbc79dffe151c01 Mon Sep 17 00:00:00 2001 From: MrBakalo Date: Wed, 10 Apr 2019 13:59:13 +0300 Subject: [PATCH 023/193] Enhanced granular reporting capability --- deployment/configs/config.json | 61 +++++++++++-------- hammer/library/jiraoperations.py | 40 ++++++------ hammer/library/slack_utility.py | 5 +- .../remediation/clean_ami_public_access.py | 4 +- .../remediation/clean_iam_key_rotation.py | 8 +-- .../remediation/clean_iam_keys_inactive.py | 8 +-- .../remediation/clean_public_ebs_snapshots.py | 8 +-- .../remediation/clean_public_rds_snapshots.py | 8 +-- .../clean_s3bucket_acl_permissions.py | 8 +-- .../clean_s3bucket_policy_permissions.py | 8 +-- .../remediation/clean_s3bucket_unencrypted.py | 8 +-- .../remediation/clean_security_groups.py | 8 +-- .../clean_sqs_policy_permissions.py | 8 +-- .../reporting/create_cloudtrail_tickets.py | 8 +-- ...reate_ebs_public_snapshot_issue_tickets.py | 8 +-- .../create_ebs_volume_issue_tickets.py | 8 +-- .../create_iam_key_inactive_tickets.py | 8 +-- .../create_iam_key_rotation_tickets.py | 8 +-- .../create_public_ami_issue_tickets.py | 4 +- ...reate_rds_public_snapshot_issue_tickets.py | 8 +-- ..._rds_unencrypted_instance_issue_tickets.py | 8 +-- ...ate_s3_unencrypted_bucket_issue_tickets.py | 8 +-- .../create_s3bucket_acl_issue_tickets.py | 8 +-- .../create_s3bucket_policy_issue_tickets.py | 8 +-- .../create_security_groups_tickets.py | 8 +-- .../create_sqs_policy_issue_tickets.py | 8 +-- 26 files changed, 101 insertions(+), 181 deletions(-) diff --git a/deployment/configs/config.json b/deployment/configs/config.json index d3be03ac..f8b94f67 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -51,11 +51,11 @@ "enabled": true, "ddb.table_name": "hammer-s3-public-bucket-acl", "topic_name": "hammer-describe-s3-acl-lambda", - "reporting": false, - "remediation": false, + "reporting": true, + "remediation": true, "remediation_retention_period": 0, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true }, "secgrp_unrestricted_access": { "enabled": true, @@ -78,8 +78,8 @@ "remediation": false, "remediation_accounts": ["210987654321", "654321210987"], "remediation_retention_period": 21, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true }, "user_inactivekeys": { "enabled": true, @@ -90,8 +90,8 @@ "reporting": false, "remediation": false, "remediation_retention_period": 0, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true }, "user_keysrotation": { "enabled": true, @@ -101,8 +101,8 @@ "reporting": false, "remediation": false, "remediation_retention_period": 0, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true }, "s3_bucket_policy": { "enabled": true, @@ -111,16 +111,16 @@ "reporting": false, "remediation": false, "remediation_retention_period": 7, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true }, "cloudtrails": { "enabled": true, "ddb.table_name": "hammer-cloudtrails", "topic_name": "hammer-describe-cloudtrails-lambda", "reporting": false, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true }, "ebs_unencrypted_volume": { "enabled": true, @@ -128,8 +128,8 @@ "topic_name": "hammer-describe-ebs-unencrypted-volumes-lambda", "accounts": ["123456789012", "210987654321"], "reporting": false, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true }, "ebs_public_snapshot": { "enabled": true, @@ -138,8 +138,8 @@ "reporting": false, "remediation": false, "remediation_retention_period": 0, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true }, "rds_public_snapshot": { "enabled": true, @@ -148,8 +148,17 @@ "reporting": false, "remediation": false, "remediation_retention_period": 0, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true + }, + "ec2_public_ami": { + "enabled": true, + "ddb.table_name": "hammer-ec2-public-ami", + "reporting": false, + "remediation": false, + "remediation_retention_period": 21, + "jira": true, + "slack": true }, "sqs_public_access": { "enabled": true, @@ -158,8 +167,8 @@ "reporting": true, "remediation": false, "remediation_retention_period": 0, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true }, "s3_encryption": { "enabled": true, @@ -168,15 +177,15 @@ "reporting": true, "remediation": false, "remediation_retention_period": 0, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true }, "rds_encryption": { "enabled": true, "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true, - "jira": {"enabled": true}, - "slack": {"enabled": true} + "jira": true, + "slack": true } } diff --git a/hammer/library/jiraoperations.py b/hammer/library/jiraoperations.py index cc71c00e..41322197 100755 --- a/hammer/library/jiraoperations.py +++ b/hammer/library/jiraoperations.py @@ -17,10 +17,18 @@ class JiraReporting(object): """ Base class for JIRA reporting """ - def __init__(self, config): + def __init__(self, config, module=''): self.config = config - self.jira = JiraOperations(self.config) + self.jira = JiraOperations(self.config, module=module) + self.module_jira_enabled = getattr(config, module).jira if hasattr(config, module) else True + def jira_enabled(func): + def decorated(self, *args, **kwargs): + if self.config.jira.enabled and self.module_jira_enabled: + return func(self, *args, **kwargs) + return decorated + + @jira_enabled def add_issue(self, issue_summary, issue_description, priority, labels, @@ -28,9 +36,6 @@ def add_issue(self, owner=None, bu=None, product=None, ): - # TODO: move to decorator - if not self.config.jira.enabled: - return None project = self.config.owners.ticket_project( bu=bu, product=product, @@ -74,36 +79,24 @@ def add_issue(self, return NewIssue(ticket_id=ticket_id, ticket_assignee_id=ticket_assignee_id) + @jira_enabled def close_issue(self, ticket_id, comment): - # TODO: move to decorator - if not self.config.jira.enabled: - return - self.jira.add_comment(ticket_id, comment) self.jira.close_issue(ticket_id) logging.debug(f"Closed issue ({self.jira.ticket_url(ticket_id)})") + @jira_enabled def update_issue(self, ticket_id, comment): - # TODO: move to decorator - if not self.config.jira.enabled: - return - # TODO: reopen ticket if closed self.jira.add_comment(ticket_id, comment) logging.debug(f"Updated issue {self.jira.ticket_url(ticket_id)}") + @jira_enabled def add_attachment(self, ticket_id, filename, text): - # TODO: move to decorator - if not self.config.jira.enabled: - return - return self.jira.add_attachment(ticket_id, filename, text) + @jira_enabled def remediate_issue(self, ticket_id, comment, reassign): - # TODO: move to decorator - if not self.config.jira.enabled: - return - if reassign: self.jira.assign_user(ticket_id, self.jira.current_user) self.jira.add_comment(ticket_id, comment) @@ -116,7 +109,7 @@ def add_label(self, ticket_id, label): class JiraOperations(object): """ Base class for interaction with JIRA """ - def __init__(self, config): + def __init__(self, config, module=''): # do not print excess warnings urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # JIRA configuration from config.json/DDB @@ -125,8 +118,9 @@ def __init__(self, config): self.server = self.config.jira.server # JIRA established session self.session = None + self.module_jira_enabled = getattr(config, module).jira if hasattr(config, module) else True - if self.config.jira.enabled: + if self.config.jira.enabled and self.module_jira_enabled: self.login_oauth() else: logging.debug("JIRA integration is disabled") diff --git a/hammer/library/slack_utility.py b/hammer/library/slack_utility.py index a9bc179a..232a6700 100755 --- a/hammer/library/slack_utility.py +++ b/hammer/library/slack_utility.py @@ -9,10 +9,11 @@ class SlackNotification(object): - def __init__(self, config=None): + def __init__(self, config=None, module=''): self.config = Config() if config is None else config self.sc = SlackClient(self.config.slack.api_token) self.slackUser = "hammer" + self.module_slack_enabled = getattr(config, module).slack if hasattr(config, module) else True @property @lru_cache(maxsize=1) @@ -47,7 +48,7 @@ def user_id(self, user): return self.users.get(user.lower(), None) def post_message(self, msg, owner=None): - if not self.config.slack.enabled: + if not self.config.slack.enabled or not self.module_slack_enabled: return # if owner is not set - try to find channel to send msg to based on msg body diff --git a/hammer/reporting-remediation/remediation/clean_ami_public_access.py b/hammer/reporting-remediation/remediation/clean_ami_public_access.py index b08317ed..efe90b80 100644 --- a/hammer/reporting-remediation/remediation/clean_ami_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_ami_public_access.py @@ -28,8 +28,8 @@ def clean_ami_public_access(self): retention_period = self.config.publicAMIs.remediation_retention_period - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='publicAMIs') + slack = SlackNotification(self.config, module='publicAMIs') for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py b/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py index cf91f0e8..ce922411 100755 --- a/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py +++ b/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py @@ -30,8 +30,8 @@ def clean_iam_access_keys(self, batch=False): retention_period = self.config.iamUserKeysRotation.remediation_retention_period - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='iamUserKeysRotation') + slack = SlackNotification(self.config, module='iamUserKeysRotation') for account_id, account_name in self.config.iamUserKeysRotation.remediation_accounts.items(): logging.debug("* Account Name:" + account_name + " :::Account ID:::" + account_id) @@ -113,10 +113,6 @@ def clean_iam_access_keys(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.user_keysrotation.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.user_keysrotation.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py b/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py index 78716700..3407b013 100755 --- a/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py +++ b/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py @@ -30,8 +30,8 @@ def clean_iam_access_keys(self, batch=False): retention_period = self.config.iamUserInactiveKeys.remediation_retention_period - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='iamUserInactiveKeys') + slack = SlackNotification(self.config, module='iamUserInactiveKeys') for account_id, account_name in self.config.iamUserInactiveKeys.remediation_accounts.items(): logging.debug("* Account Name:" + account_name + " :::Account ID:::" + account_id) @@ -113,10 +113,6 @@ def clean_iam_access_keys(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.user_inactivekeys.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.user_inactivekeys.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py b/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py index a1b7b695..8a5b788f 100755 --- a/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py +++ b/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py @@ -30,8 +30,8 @@ def clean_public_ebs_snapshots(self, batch=False): retention_period = self.config.ebsSnapshot.remediation_retention_period - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='ebsSnapshot') + slack = SlackNotification(self.config, module='ebsSnapshot') for account_id, account_name in self.config.ebsSnapshot.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -117,10 +117,6 @@ def clean_public_ebs_snapshots(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.ebs_public_snapshot.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.ebs_public_snapshot.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py b/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py index 44241455..164b61f7 100755 --- a/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py +++ b/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py @@ -31,8 +31,8 @@ def clean_public_rds_snapshots(self, batch=False): retention_period = self.config.rdsSnapshot.remediation_retention_period - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='rdsSnapshot') + slack = SlackNotification(self.config, module='rdsSnapshot') for account_id, account_name in self.config.rdsSnapshot.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -112,10 +112,6 @@ def clean_public_rds_snapshots(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.rds_public_snapshot.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.rds_public_snapshot.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py b/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py index 3eba4b7e..2181cd45 100755 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py @@ -31,8 +31,8 @@ def cleans3bucketaclpermissions(self, batch=False): retention_period = self.config.s3acl.remediation_retention_period - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='s3acl') + slack = SlackNotification(self.config, module='s3acl') for account_id, account_name in self.config.s3acl.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -133,10 +133,6 @@ def cleans3bucketaclpermissions(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.s3_bucket_acl.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.s3_bucket_acl.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py index 42e8c6ba..83102dd8 100755 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py @@ -31,8 +31,8 @@ def clean_s3bucket_policy_permissions(self, batch=False): retention_period = self.config.s3policy.remediation_retention_period - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='s3policy') + slack = SlackNotification(self.config, module='s3policy') for account_id, account_name in self.config.s3policy.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -132,10 +132,6 @@ def clean_s3bucket_policy_permissions(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.s3_bucket_policy.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.s3_bucket_policy.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py index 7451b109..94be2897 100644 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py @@ -30,8 +30,8 @@ def cleans3bucketunencrypted(self, batch=False): retention_period = self.config.s3Encrypt.remediation_retention_period - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='s3Encrypt') + slack = SlackNotification(self.config, module='s3Encrypt') for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -129,10 +129,6 @@ def cleans3bucketunencrypted(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.s3_encryption.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.s3_encryption.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_security_groups.py b/hammer/reporting-remediation/remediation/clean_security_groups.py index 55ce1ff7..22bb866c 100755 --- a/hammer/reporting-remediation/remediation/clean_security_groups.py +++ b/hammer/reporting-remediation/remediation/clean_security_groups.py @@ -31,8 +31,8 @@ def clean_security_groups(self, batch=False): retention_period = self.config.sg.remediation_retention_period - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='sg') + slack = SlackNotification(self.config, module='sg') for account_id, account_name in self.config.sg.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -142,10 +142,6 @@ def clean_security_groups(self, batch=False): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.secgrp_unrestricted_access.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.secgrp_unrestricted_access.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py index c882c8f0..ced1d0fe 100644 --- a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py @@ -29,8 +29,8 @@ def clean_sqs_policy_permissions(self): retention_period = self.config.sqspolicy.remediation_retention_period - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='sqspolicy') + slack = SlackNotification(self.config, module='sqspolicy') for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -125,10 +125,6 @@ def clean_sqs_policy_permissions(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.sqs_public_access.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.sqs_public_access.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py index 5d17756f..dea4472a 100755 --- a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py +++ b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py @@ -46,8 +46,8 @@ def create_tickets_cloud_trail_logging(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='cloudtrails') + slack = SlackNotification(self.config, module='cloudtrails') for account_id, account_name in self.config.cloudtrails.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -157,10 +157,6 @@ def create_tickets_cloud_trail_logging(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.cloudtrails.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.cloudtrails.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py index a8501156..675dc50d 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py @@ -26,8 +26,8 @@ def create_tickets_ebs_public_snapshots(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='ebsSnapshot') + slack = SlackNotification(self.config, module='ebsSnapshot') for account_id, account_name in self.config.ebsSnapshot.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -152,10 +152,6 @@ def create_tickets_ebs_public_snapshots(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.ebs_public_snapshot.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.ebs_public_snapshot.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py index 19d6d166..0ba6f067 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py @@ -74,8 +74,8 @@ def create_tickets_ebsvolumes(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='ebsVolume') + slack = SlackNotification(self.config, module='ebsVolume') for account_id, account_name in self.config.ebsVolume.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -215,10 +215,6 @@ def create_tickets_ebsvolumes(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.ebs_unencrypted_volume.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.ebs_unencrypted_volume.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py index 76f0ac15..ea08b0bc 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py @@ -27,8 +27,8 @@ def create_jira_ticket(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='iamUserInactiveKeys') + slack = SlackNotification(self.config, module='iamUserInactiveKeys') for account_id, account_name in self.config.iamUserInactiveKeys.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -118,10 +118,6 @@ def create_jira_ticket(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.user_inactivekeys.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.user_inactivekeys.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py index 60fcea62..53787ab7 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py @@ -27,8 +27,8 @@ def create_jira_ticket(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='iamUserKeysRotation') + slack = SlackNotification(self.config, module='iamUserKeysRotation') for account_id, account_name in self.config.iamUserKeysRotation.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -116,10 +116,6 @@ def create_jira_ticket(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.user_keysrotation.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.user_keysrotation.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py index 3e33173f..22732500 100644 --- a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py @@ -26,8 +26,8 @@ def create_tickets_public_ami(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='publicAMIs') + slack = SlackNotification(self.config, module='publicAMIs') for account_id, account_name in self.config.publicAMIs.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py index 5f4f2ff1..54ca1e3f 100755 --- a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py @@ -26,8 +26,8 @@ def create_tickets_rds_public_snapshots(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='rdsSnapshot') + slack = SlackNotification(self.config, module='rdsSnapshot') for account_id, account_name in self.config.rdsSnapshot.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -149,10 +149,6 @@ def create_tickets_rds_public_snapshots(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.rds_public_snapshot.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.rds_public_snapshot.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py index 01cf81c4..88c4e5ff 100644 --- a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py @@ -26,8 +26,8 @@ def create_tickets_rds_unencrypted_instances(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='rdsEncrypt') + slack = SlackNotification(self.config, module='rdsEncrypt') for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -146,10 +146,6 @@ def create_tickets_rds_unencrypted_instances(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.rds_encryption.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.rds_encryption.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py index e36c8dde..d004cd86 100644 --- a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py @@ -26,8 +26,8 @@ def create_tickets_s3_unencrypted_buckets(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='s3Encrypt') + slack = SlackNotification(self.config, module='s3Encrypt') for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -159,10 +159,6 @@ def create_tickets_s3_unencrypted_buckets(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.s3_encryption.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.s3_encryption.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py index f99f4ce1..9d69ec85 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py @@ -33,8 +33,8 @@ def create_tickets_s3buckets(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='s3acl') + slack = SlackNotification(self.config, module='s3acl') for account_id, account_name in self.config.s3acl.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -171,10 +171,6 @@ def create_tickets_s3buckets(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.s3_bucket_acl.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.s3_bucket_acl.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py index 54e1234e..ccb59120 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py @@ -29,8 +29,8 @@ def create_tickets_s3buckets(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='s3policy') + slack = SlackNotification(self.config, module='s3policy') for account_id, account_name in self.config.s3policy.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -178,10 +178,6 @@ def create_tickets_s3buckets(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.s3_bucket_policy.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.s3_bucket_policy.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index f6ef9508..dc63449f 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -220,8 +220,8 @@ def create_tickets_securitygroups(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='sg') + slack = SlackNotification(self.config, module='sg') for account_id, account_name in self.config.sg.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -492,10 +492,6 @@ def create_tickets_securitygroups(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.secgrp_unrestricted_access.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.secgrp_unrestricted_access.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, diff --git a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py index dc5671e3..bf5ff888 100644 --- a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py @@ -29,8 +29,8 @@ def create_tickets_sqs_policy(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) - slack = SlackNotification(self.config) + jira = JiraReporting(self.config, module='sqspolicy') + slack = SlackNotification(self.config, module='sqspolicy') for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") @@ -182,10 +182,6 @@ def create_tickets_sqs_policy(self): module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") config = Config() - if config.jira.enabled: - config.jira.enabled = config.sqs_public_access.jira.enabled - if config.slack.enabled: - config.slack.enabled = config.sqs_public_access.slack.enabled add_cw_logging(config.local.log_group, log_stream=module_name, level=logging.DEBUG, From 5da4ec3d5df6a6a3761105273385c764e91cbc2a Mon Sep 17 00:00:00 2001 From: MrBakalo Date: Thu, 11 Apr 2019 18:50:39 +0300 Subject: [PATCH 024/193] Added backward compatibility for new config flags. Jira decorator function marked as 'private' --- hammer/library/jiraoperations.py | 16 ++++++++-------- hammer/library/slack_utility.py | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/hammer/library/jiraoperations.py b/hammer/library/jiraoperations.py index 41322197..c083100b 100755 --- a/hammer/library/jiraoperations.py +++ b/hammer/library/jiraoperations.py @@ -20,15 +20,15 @@ class JiraReporting(object): def __init__(self, config, module=''): self.config = config self.jira = JiraOperations(self.config, module=module) - self.module_jira_enabled = getattr(config, module).jira if hasattr(config, module) else True + self.module_jira_enabled = getattr(config, module).jira if hasattr(hasattr(config, module), 'jira') else True - def jira_enabled(func): + def _jira_enabled(func): def decorated(self, *args, **kwargs): if self.config.jira.enabled and self.module_jira_enabled: return func(self, *args, **kwargs) return decorated - @jira_enabled + @_jira_enabled def add_issue(self, issue_summary, issue_description, priority, labels, @@ -79,23 +79,23 @@ def add_issue(self, return NewIssue(ticket_id=ticket_id, ticket_assignee_id=ticket_assignee_id) - @jira_enabled + @_jira_enabled def close_issue(self, ticket_id, comment): self.jira.add_comment(ticket_id, comment) self.jira.close_issue(ticket_id) logging.debug(f"Closed issue ({self.jira.ticket_url(ticket_id)})") - @jira_enabled + @_jira_enabled def update_issue(self, ticket_id, comment): # TODO: reopen ticket if closed self.jira.add_comment(ticket_id, comment) logging.debug(f"Updated issue {self.jira.ticket_url(ticket_id)}") - @jira_enabled + @_jira_enabled def add_attachment(self, ticket_id, filename, text): return self.jira.add_attachment(ticket_id, filename, text) - @jira_enabled + @_jira_enabled def remediate_issue(self, ticket_id, comment, reassign): if reassign: self.jira.assign_user(ticket_id, self.jira.current_user) @@ -118,7 +118,7 @@ def __init__(self, config, module=''): self.server = self.config.jira.server # JIRA established session self.session = None - self.module_jira_enabled = getattr(config, module).jira if hasattr(config, module) else True + self.module_jira_enabled = getattr(config, module).jira if hasattr(hasattr(config, module), 'jira') else True if self.config.jira.enabled and self.module_jira_enabled: self.login_oauth() diff --git a/hammer/library/slack_utility.py b/hammer/library/slack_utility.py index 232a6700..beb11c51 100755 --- a/hammer/library/slack_utility.py +++ b/hammer/library/slack_utility.py @@ -13,7 +13,7 @@ def __init__(self, config=None, module=''): self.config = Config() if config is None else config self.sc = SlackClient(self.config.slack.api_token) self.slackUser = "hammer" - self.module_slack_enabled = getattr(config, module).slack if hasattr(config, module) else True + self.module_slack_enabled = getattr(config, module).slack if hasattr(hasattr(config, module), 'slack') else True @property @lru_cache(maxsize=1) From 056464bc3bb2da73a5d14a8b9982fcad6e644839 Mon Sep 17 00:00:00 2001 From: MrBakalo Date: Thu, 11 Apr 2019 19:00:03 +0300 Subject: [PATCH 025/193] Added commented functionality since reporting can be turned off for each feature --- deployment/configs/config.json | 8 ++--- .../reporting/create_cloudtrail_tickets.py | 26 ++++++++-------- .../create_ebs_volume_issue_tickets.py | 30 +++++++++---------- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/deployment/configs/config.json b/deployment/configs/config.json index f8b94f67..a2168fe9 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -119,8 +119,8 @@ "ddb.table_name": "hammer-cloudtrails", "topic_name": "hammer-describe-cloudtrails-lambda", "reporting": false, - "jira": true, - "slack": true + "jira": false, + "slack": false }, "ebs_unencrypted_volume": { "enabled": true, @@ -128,8 +128,8 @@ "topic_name": "hammer-describe-ebs-unencrypted-volumes-lambda", "accounts": ["123456789012", "210987654321"], "reporting": false, - "jira": true, - "slack": true + "jira": false, + "slack": false }, "ebs_public_snapshot": { "enabled": true, diff --git a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py index dea4472a..d07036de 100755 --- a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py +++ b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py @@ -130,19 +130,19 @@ def create_tickets_cloud_trail_logging(self): issue_description += (f"For any other exceptions, please follow the [whitelisting procedure|{self.config.whitelisting_procedure_url}] " f"and provide a strong business reasoning. ") - # try: - # response = jira.add_issue( - # issue_summary=issue_summary, issue_description=issue_description, - # priority="Major", labels=["cloud-trail-disabled"], - # account_id=account_id, - # ) - # except Exception: - # logging.exception("Failed to create jira ticket") - # continue - # - # if response is not None: - # issue.jira_details.ticket = response.ticket_id - # issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["cloud-trail-disabled"], + account_id=account_id, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id slack.report_issue( msg=f"Discovered {issue_summary}" diff --git a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py index 0ba6f067..ab06f197 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py @@ -180,21 +180,21 @@ def create_tickets_ebsvolumes(self): issue_summary = (f"EBS unencrypted volume '{volume_id}' " f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") - # try: - # response = jira.add_issue( - # issue_summary=issue_summary, issue_description=issue_description, - # priority="Major", labels=["unencrypted-ebs-volumes"], - # owner=owner, - # account_id=account_id, - # bu=bu, product=product, - # ) - # except Exception: - # logging.exception("Failed to create jira ticket") - # continue - # - # if response is not None: - # issue.jira_details.ticket = response.ticket_id - # issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["unencrypted-ebs-volumes"], + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id issue.jira_details.owner = owner issue.jira_details.business_unit = bu From abc08aad17d0ebd6de2df1a4cbdd038a46c0bbe4 Mon Sep 17 00:00:00 2001 From: MrBakalo Date: Fri, 12 Apr 2019 14:42:47 +0300 Subject: [PATCH 026/193] Jira labels logic update --- deployment/configs/config.json | 13 +++++++ hammer/library/jiraoperations.py | 36 +++++++++++++++++-- .../reporting/create_cloudtrail_tickets.py | 2 +- ...reate_ebs_public_snapshot_issue_tickets.py | 2 +- .../create_ebs_volume_issue_tickets.py | 2 +- .../create_iam_key_inactive_tickets.py | 2 +- .../create_iam_key_rotation_tickets.py | 2 +- .../create_public_ami_issue_tickets.py | 2 +- ...reate_rds_public_snapshot_issue_tickets.py | 2 +- ..._rds_unencrypted_instance_issue_tickets.py | 2 +- ...ate_s3_unencrypted_bucket_issue_tickets.py | 2 +- .../create_s3bucket_acl_issue_tickets.py | 2 +- .../create_s3bucket_policy_issue_tickets.py | 2 +- .../create_security_groups_tickets.py | 2 +- .../create_sqs_policy_issue_tickets.py | 2 +- 15 files changed, 60 insertions(+), 15 deletions(-) diff --git a/deployment/configs/config.json b/deployment/configs/config.json index a2168fe9..e3c62971 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -55,6 +55,7 @@ "remediation": true, "remediation_retention_period": 0, "jira": true, + "labels": ["s3-public-acl"], "slack": true }, "secgrp_unrestricted_access": { @@ -79,6 +80,7 @@ "remediation_accounts": ["210987654321", "654321210987"], "remediation_retention_period": 21, "jira": true, + "labels": ["insecure-services"], "slack": true }, "user_inactivekeys": { @@ -91,6 +93,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, + "labels": ["iam-key-inactive"], "slack": true }, "user_keysrotation": { @@ -102,6 +105,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, + "labels": ["iam-key-rotation"], "slack": true }, "s3_bucket_policy": { @@ -112,6 +116,7 @@ "remediation": false, "remediation_retention_period": 7, "jira": true, + "labels": ["s3-public-policy"], "slack": true }, "cloudtrails": { @@ -120,6 +125,7 @@ "topic_name": "hammer-describe-cloudtrails-lambda", "reporting": false, "jira": false, + "labels": ["cloudtrail-issue"], "slack": false }, "ebs_unencrypted_volume": { @@ -129,6 +135,7 @@ "accounts": ["123456789012", "210987654321"], "reporting": false, "jira": false, + "labels": ["ebs-unencrypted-volume"], "slack": false }, "ebs_public_snapshot": { @@ -139,6 +146,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, + "labels": ["ebs-public-snapshot"], "slack": true }, "rds_public_snapshot": { @@ -149,6 +157,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, + "labels": ["rds-public-snapshot"], "slack": true }, "ec2_public_ami": { @@ -158,6 +167,7 @@ "remediation": false, "remediation_retention_period": 21, "jira": true, + "labels": ["public-ami"], "slack": true }, "sqs_public_access": { @@ -168,6 +178,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, + "labels": ["sqs-public-policy"], "slack": true }, "s3_encryption": { @@ -178,6 +189,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, + "labels": ["s3-unencrypted"], "slack": true }, "rds_encryption": { @@ -186,6 +198,7 @@ "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true, "jira": true, + "labels": ["rds-unencrypted"], "slack": true } } diff --git a/hammer/library/jiraoperations.py b/hammer/library/jiraoperations.py index c083100b..d86566a5 100755 --- a/hammer/library/jiraoperations.py +++ b/hammer/library/jiraoperations.py @@ -14,6 +14,36 @@ 'ticket_assignee_id' ]) +class JiraLabels(object): + """ Base class for JIRA tickets labeling """ + DEFAULT_LABELS = { + 'cloudtrails': 'cloudtrail-issue', + 'ebsSnapshot': 'ebs-public-snapshot', + 'ebsVolume': 'ebs-unencrypted-volume', + 'iamUserInactiveKeys': 'iam-key-inactive', + 'iamUserKeysRotation': 'iam-key-rotation', + 'publicAMIs': 'public-ami', + 'rdsSnapshot': 'rds-public-snapshot', + 'rdsEncrypt': 'rds-unencrypted', + 's3Encrypt': 's3-unencrypted', + 's3acl': 's3-public-acl', + 's3policy': 's3-public-policy', + 'sg': 'insecure-services', + 'sqspolicy': 'sqs-public-policy' + } + def __init__(self, config, module=''): + self.config = config + self.module = module + self.module_jira = getattr(config, module) if hasattr(config, module) else False + self.module_jira_labels = self.module_jira.labels if hasattr(self.module_jira, 'labels') else False + + @property + def module_labels(self): + if self.module_jira_labels: + return self.module_jira_labels + else: + return self.DEFAULT_LABELS.get(self.module, '') + class JiraReporting(object): """ Base class for JIRA reporting """ @@ -21,6 +51,8 @@ def __init__(self, config, module=''): self.config = config self.jira = JiraOperations(self.config, module=module) self.module_jira_enabled = getattr(config, module).jira if hasattr(hasattr(config, module), 'jira') else True + self.jira_labels = JiraLabels(config, module) + self.module_jira_labels = self.jira_labels.module_labels def _jira_enabled(func): def decorated(self, *args, **kwargs): @@ -31,7 +63,7 @@ def decorated(self, *args, **kwargs): @_jira_enabled def add_issue(self, issue_summary, issue_description, - priority, labels, + priority, account_id, owner=None, bu=None, product=None, @@ -48,7 +80,7 @@ def add_issue(self, "description": issue_description, "issuetype": {"name": self.config.jira.issue_type}, "priority": {"name": priority}, - "labels": labels + "labels": self.module_jira_labels } ticket_id = self.jira.create_ticket(issue_data) diff --git a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py index d07036de..6a912efe 100755 --- a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py +++ b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py @@ -133,7 +133,7 @@ def create_tickets_cloud_trail_logging(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["cloud-trail-disabled"], + priority="Major", account_id=account_id, ) except Exception: diff --git a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py index 675dc50d..ab2091ce 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py @@ -120,7 +120,7 @@ def create_tickets_ebs_public_snapshots(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["public_snapshots"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py index ab06f197..51f94edc 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py @@ -183,7 +183,7 @@ def create_tickets_ebsvolumes(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["unencrypted-ebs-volumes"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py index ea08b0bc..f5cb7d73 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py @@ -94,7 +94,7 @@ def create_jira_ticket(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["inactive-iam-keys"], + priority="Major", account_id=account_id, ) except Exception: diff --git a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py index 53787ab7..62d69966 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py @@ -92,7 +92,7 @@ def create_jira_ticket(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["iam-key-rotation"], + priority="Major", account_id=account_id, ) except Exception: diff --git a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py index 22732500..b991140b 100644 --- a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py @@ -120,7 +120,7 @@ def create_tickets_public_ami(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["public-ami"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py index 54ca1e3f..67182bba 100755 --- a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py @@ -117,7 +117,7 @@ def create_tickets_rds_public_snapshots(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["rds-public-snapshots"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py index 88c4e5ff..9e760419 100644 --- a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py @@ -114,7 +114,7 @@ def create_tickets_rds_unencrypted_instances(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["rds-unencrypted-instances"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py index d004cd86..1ea1db6a 100644 --- a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py @@ -127,7 +127,7 @@ def create_tickets_s3_unencrypted_buckets(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["s3-unencrypted"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py index 9d69ec85..770675f2 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py @@ -139,7 +139,7 @@ def create_tickets_s3buckets(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["publics3"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py index ccb59120..ea502350 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py @@ -142,7 +142,7 @@ def create_tickets_s3buckets(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["publics3"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index dc63449f..9ad579e8 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -455,7 +455,7 @@ def create_tickets_securitygroups(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority=priority, labels=["insecure-services"], + priority=priority, owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py index bf5ff888..ecb494aa 100644 --- a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py @@ -146,7 +146,7 @@ def create_tickets_sqs_policy(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["publicsqs"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, From a49dab10e97d438761ce272021683e4d9ef27c96 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 6 May 2019 18:44:58 +0530 Subject: [PATCH 027/193] Updated with account based remediation changes. Updated with account based remediation changes. --- .../remediation/clean_ami_public_access.py | 2 +- .../remediation/clean_s3bucket_unencrypted.py | 2 +- .../remediation/clean_sqs_policy_permissions.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hammer/reporting-remediation/remediation/clean_ami_public_access.py b/hammer/reporting-remediation/remediation/clean_ami_public_access.py index adf74652..0565dcb1 100644 --- a/hammer/reporting-remediation/remediation/clean_ami_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_ami_public_access.py @@ -31,7 +31,7 @@ def clean_ami_public_access(self): jira = JiraReporting(self.config) slack = SlackNotification(self.config) - for account_id, account_name in self.config.aws.accounts.items(): + for account_id, account_name in self.config.publicAMIs.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_open_issues(ddb_table, account_id, PublicAMIIssue) for issue in issues: diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py index 65203d30..ca6de0f4 100644 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py @@ -33,7 +33,7 @@ def cleans3bucketunencrypted(self, batch=False): jira = JiraReporting(self.config) slack = SlackNotification(self.config) - for account_id, account_name in self.config.aws.accounts.items(): + for account_id, account_name in self.config.s3Encrypt.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_open_issues(ddb_table, account_id, S3EncryptionIssue) for issue in issues: diff --git a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py index f5e2d0a4..a62d7bdb 100644 --- a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py @@ -32,7 +32,7 @@ def clean_sqs_policy_permissions(self): jira = JiraReporting(self.config) slack = SlackNotification(self.config) - for account_id, account_name in self.config.aws.accounts.items(): + for account_id, account_name in self.config.sqspolicy.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue) for issue in issues: From 14f92c2ec1f378d29814d4475dd8a07a8edea7aa Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 10 May 2019 13:41:03 +0530 Subject: [PATCH 028/193] Updated with code changes for whitelist accounts. Updated with code changes for whitelist accounts. --- hammer/library/config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hammer/library/config.py b/hammer/library/config.py index 504f1a1d..1f6dd944 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -473,12 +473,13 @@ def module_accounts(self, option): :return: dict with AWS accounts to identify/remediate {'account id': 'account name', ...} """ module_accounts = self._config.get(option, None) - if module_accounts is None: + if module_accounts is None or len(module_accounts) == 0: accounts = self._accounts else: # construct dict similar to main accounts dict accounts = {account: self._accounts.get(account, "") for account in module_accounts} # exclude 'ignore_accounts' from resulting dict + return {k: v for k, v in accounts.items() if k not in self._config.get("ignore_accounts", [])} @property From e005d85e438559520eb5b9146f113371a200a4bd Mon Sep 17 00:00:00 2001 From: MrBakalo Date: Wed, 29 May 2019 13:15:36 +0300 Subject: [PATCH 029/193] Fix for data type issue --- hammer/library/jiraoperations.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/hammer/library/jiraoperations.py b/hammer/library/jiraoperations.py index d86566a5..c40dcf3d 100755 --- a/hammer/library/jiraoperations.py +++ b/hammer/library/jiraoperations.py @@ -17,19 +17,19 @@ class JiraLabels(object): """ Base class for JIRA tickets labeling """ DEFAULT_LABELS = { - 'cloudtrails': 'cloudtrail-issue', - 'ebsSnapshot': 'ebs-public-snapshot', - 'ebsVolume': 'ebs-unencrypted-volume', - 'iamUserInactiveKeys': 'iam-key-inactive', - 'iamUserKeysRotation': 'iam-key-rotation', - 'publicAMIs': 'public-ami', - 'rdsSnapshot': 'rds-public-snapshot', - 'rdsEncrypt': 'rds-unencrypted', - 's3Encrypt': 's3-unencrypted', - 's3acl': 's3-public-acl', - 's3policy': 's3-public-policy', - 'sg': 'insecure-services', - 'sqspolicy': 'sqs-public-policy' + 'cloudtrails': ['cloudtrail-issue'], + 'ebsSnapshot': ['ebs-public-snapshot'], + 'ebsVolume': ['ebs-unencrypted-volume'], + 'iamUserInactiveKeys': ['iam-key-inactive'], + 'iamUserKeysRotation': ['iam-key-rotation'], + 'publicAMIs': ['public-ami'], + 'rdsSnapshot': ['rds-public-snapshot'], + 'rdsEncrypt': ['rds-unencrypted'], + 's3Encrypt': ['s3-unencrypted'], + 's3acl': ['s3-public-acl'], + 's3policy': ['s3-public-policy'], + 'sg': ['insecure-services'], + 'sqspolicy': ['sqs-public-policy'] } def __init__(self, config, module=''): self.config = config From 65f896d73f28fbe2e67e1cc8e1ea5699153aedf9 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 4 Jun 2019 13:50:53 +0530 Subject: [PATCH 030/193] Added exception handling to ECS issues. Added exception handling to ECS issues. --- .../describe_ecs_logging_issues.py | 4 +- hammer/library/aws/ecs.py | 78 +++++++++++-------- 2 files changed, 47 insertions(+), 35 deletions(-) diff --git a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py index fdb440d8..a1cd5476 100644 --- a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py +++ b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py @@ -3,7 +3,7 @@ from library.logger import set_logging from library.config import Config -from library.aws.ecs import ECSLoggingChecker +from library.aws.ecs import ECSChecker from library.aws.utility import Account from library.ddb_issues import IssueStatus, ECSLoggingIssue from library.ddb_issues import Operations as IssueOperations @@ -47,7 +47,7 @@ def lambda_handler(event, context): open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} logging.debug(f"ECS task definitions in DDB:\n{open_issues.keys()}") - checker = ECSLoggingChecker(account=account) + checker = ECSChecker(account=account) if checker.check(): for task_definition in checker.task_definitions: logging.debug(f"Checking {task_definition.name}") diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 0dfdf058..5df5f435 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -140,38 +140,50 @@ def check(self, task_definitions=None): logging_enabled = False external_image = False is_privileged = False - task_definition = self.account.client("ecs").describe_task_definition( - taskDefinition=task_definition_name - )['taskDefinition'] - task_definition_arn = task_definition["taskDefinitionArn"] - if "containerDefinitions" in task_definition: - for container_definition in task_definition['containerDefinitions']: - if container_definition.get('logConfiguration') is None: - logging_enabled = False - else: - logging_enabled = True - - if container_definition['privileged']: - is_privileged = True - else: - is_privileged = False - - image = container_definition['image'] - if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: - external_image = True - else: - external_image = False - - if "Tags" in task_definition: - tags = task_definition["Tags"] - task_definition_details = ECSTaskDefinitions(account=self.account, - name=task_definition_name, - arn=task_definition_arn, - tags=tags, - is_logging=logging_enabled, - is_privileged=is_privileged, - external_image=external_image - ) - self.task_definitions.append(task_definition_details) + try: + task_definition = self.account.client("ecs").describe_task_definition( + taskDefinition=task_definition_name + )['taskDefinition'] + task_definition_arn = task_definition["taskDefinitionArn"] + if "containerDefinitions" in task_definition: + for container_definition in task_definition['containerDefinitions']: + if container_definition.get('logConfiguration') is None: + logging_enabled = False + else: + logging_enabled = True + + container_privileged_details = container_definition.get('privileged') + if container_privileged_details is not None: + if container_definition['privileged']: + is_privileged = True + else: + is_privileged = False + + image = container_definition.get('image') + if image is not None: + if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: + external_image = True + else: + external_image = False + + if "Tags" in task_definition: + tags = task_definition["Tags"] + task_definition_details = ECSTaskDefinitions(account=self.account, + name=task_definition_name, + arn=task_definition_arn, + tags=tags, + is_logging=logging_enabled, + is_privileged=is_privileged, + external_image=external_image + ) + self.task_definitions.append(task_definition_details) + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(ecs:{err.operation_name})") + else: + logging.exception(f"Failed to describe task definitions in {self.account} " + f"for task {task_definition_name}") + continue return True \ No newline at end of file From d70389c0c2ee9b077708fd629907885f6670efb1 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 4 Jun 2019 14:56:20 +0530 Subject: [PATCH 031/193] Updated with ECS logging changes. Updated with ECS logging changes. --- .../describe_ecs_logging_issues.py | 3 ++ .../create_ecs_logging_issue_tickets.py | 32 +++++++++++++------ 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py index a1cd5476..f412a999 100644 --- a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py +++ b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py @@ -54,6 +54,9 @@ def lambda_handler(event, context): if not task_definition.is_logging: issue = ECSLoggingIssue(account_id, task_definition.name) issue.issue_details.region = task_definition.account.region + issue.issue_details.task_definition_arn = task_definition.arn + issue.issue_details.tags = task_definition.tags + if config.ecs_logging.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index 8097a3c2..70164bca 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -33,7 +33,7 @@ def create_tickets_ecs_logging(self): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ECSLoggingIssue) for issue in issues: - task_definition_arn = issue.issue_id + task_definition_name = issue.issue_id region = issue.issue_details.region tags = issue.issue_details.tags # issue has been already reported @@ -43,9 +43,9 @@ def create_tickets_ecs_logging(self): product = issue.jira_details.product if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: - logging.debug(f"Closing {issue.status.value} ECS logging enabled '{task_definition_arn}' issue") + logging.debug(f"Closing {issue.status.value} ECS logging enabled '{task_definition_name}' issue") - comment = (f"Closing {issue.status.value} ECS logging enabled '{task_definition_arn}' issue " + comment = (f"Closing {issue.status.value} ECS logging enabled '{task_definition_name}' issue " f"in '{account_name} / {account_id}' account, '{region}' region") if issue.status == IssueStatus.Whitelisted: # Adding label with "whitelisted" to jira ticket. @@ -67,9 +67,9 @@ def create_tickets_ecs_logging(self): IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_arn}") + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_name}") slack.report_issue( - msg=f"ECS logging '{task_definition_arn}' issue is changed " + msg=f"ECS logging '{task_definition_name}' issue is changed " f"in '{account_name} / {account_id}' account, '{region}' region" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, @@ -78,16 +78,16 @@ def create_tickets_ecs_logging(self): ) IssueOperations.set_status_updated(ddb_table, issue) else: - logging.debug(f"No changes for '{task_definition_arn}'") + logging.debug(f"No changes for '{task_definition_name}'") # issue has not been reported yet else: - logging.debug(f"Reporting ECS logging '{task_definition_arn}' issue") + logging.debug(f"Reporting ECS logging '{task_definition_name}' issue") owner = tags.get("owner", None) bu = tags.get("bu", None) product = tags.get("product", None) - issue_summary = (f"ECS logging is not enabled for '{task_definition_arn}'" + issue_summary = (f"ECS logging is not enabled for '{task_definition_name}'" f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") issue_description = ( @@ -96,7 +96,9 @@ def create_tickets_ecs_logging(self): f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" - f"*ECS Task Definition*: {task_definition_arn}\n") + f"*ECS Task Definition*: {task_definition_name}\n" + f"*Task definition logging enabled*: False \n" + ) auto_remediation_date = (self.config.now + self.config.ecs_logging.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" @@ -106,7 +108,17 @@ def create_tickets_ecs_logging(self): issue_description += "\n" issue_description += ( f"*Recommendation*: " - f"Enable logging for ECS task definition.") + f"Enable logging for ECS task definition. To enable logging, follow below steps: \n" + f"1. Open the Amazon ECS console at https://console.aws.amazon.com/ecs/. \n" + f"2. From the navigation bar, " + f"choose region that contains your task definition and choose Task Definitions.\n" + f"3. On the Task Definitions page, select the box to the left of the task definition to revise " + f"and choose Create new revision.\n" + f"4. On the Create new revision of Task Definition page, " + f"select the existing container and enable LogConfiguration under section Storage and Logging " + f"and then choose Update.\n" + f"5. Verify the information and choose Create.\n" + ) try: response = jira.add_issue( From 65483ee3c1e25bd12d4a0a36a89ca8be385e733f Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 4 Jun 2019 15:47:22 +0530 Subject: [PATCH 032/193] Updated ECS logging changes. Updated ECS logging changes. --- .../describe_ecs_logging_issues.py | 1 + hammer/library/aws/ecs.py | 59 +++++++------------ .../create_ecs_logging_issue_tickets.py | 10 ++-- 3 files changed, 28 insertions(+), 42 deletions(-) diff --git a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py index f412a999..4bcf6c08 100644 --- a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py +++ b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py @@ -55,6 +55,7 @@ def lambda_handler(event, context): issue = ECSLoggingIssue(account_id, task_definition.name) issue.issue_details.region = task_definition.account.region issue.issue_details.task_definition_arn = task_definition.arn + issue.issue_details.container_name = task_definition.container_name issue.issue_details.tags = task_definition.tags if config.ecs_logging.in_whitelist(account_id, task_definition.name): diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 5df5f435..7b2eb30f 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -1,15 +1,8 @@ import json import logging -import mimetypes -import pathlib -from datetime import datetime, timezone -from io import BytesIO -from copy import deepcopy from botocore.exceptions import ClientError -from library.utility import jsonDumps from library.utility import timeit -from library.aws.security_groups import SecurityGroup from collections import namedtuple from library.aws.utility import convert_tags @@ -69,7 +62,7 @@ class ECSTaskDefinitions(object): Basic class for ECS task definitions. """ - def __init__(self, account, name, arn, tags, is_logging=None, is_privileged=None, external_image=None): + def __init__(self, account, name, arn, tags, container_name=None, is_logging=None, is_privileged=None, external_image=None): """ :param account: `Account` instance where ECS task definition is present @@ -85,12 +78,13 @@ def __init__(self, account, name, arn, tags, is_logging=None, is_privileged=None self.is_logging = is_logging self.is_privileged = is_privileged self.external_image = external_image + self.container_name = container_name class ECSChecker(object): """ - Basic class for checking ecs task definition's logging enabled or not in account/region. - Encapsulates check settings and discovered task definitions. + Basic class for checking ecs task definition's logging/privileged access/image source in account/region. + Encapsulates check settings and discovered task definition's containers. """ def __init__(self, account): @@ -101,21 +95,10 @@ def __init__(self, account): self.account = account self.task_definitions = [] - def task_definition_arns(self, name): - """ - :return: `ECS task definition' by arn - """ - for task_definition in self.task_definitions: - if task_definition.name == name: - return task_definition - return None - - def check(self, task_definitions=None): + def check(self): """ Walk through clusters in the account/region and check them. - Put all gathered clusters to `self.clusters`. - - :param task_definitions: list with task definitions to check, if it is not supplied - all taks definitions must be checked + Put all ECS task definition's container details. :return: boolean. True - if check was successful, False - otherwise @@ -132,14 +115,12 @@ def check(self, task_definitions=None): return False if "families" in response: - tags = {} for task_definition_name in response["families"]: - if task_definitions is not None and task_definition_name not in task_definitions: - continue - + tags = {} logging_enabled = False external_image = False is_privileged = False + container_name = None try: task_definition = self.account.client("ecs").describe_task_definition( taskDefinition=task_definition_name @@ -147,6 +128,7 @@ def check(self, task_definitions=None): task_definition_arn = task_definition["taskDefinitionArn"] if "containerDefinitions" in task_definition: for container_definition in task_definition['containerDefinitions']: + container_name = container_definition["name"] if container_definition.get('logConfiguration') is None: logging_enabled = False else: @@ -166,17 +148,18 @@ def check(self, task_definitions=None): else: external_image = False - if "Tags" in task_definition: - tags = task_definition["Tags"] - task_definition_details = ECSTaskDefinitions(account=self.account, - name=task_definition_name, - arn=task_definition_arn, - tags=tags, - is_logging=logging_enabled, - is_privileged=is_privileged, - external_image=external_image - ) - self.task_definitions.append(task_definition_details) + if "Tags" in task_definition: + tags = task_definition["Tags"] + task_definition_details = ECSTaskDefinitions(account=self.account, + name=task_definition_name, + arn=task_definition_arn, + tags=tags, + container_name=container_name, + is_logging=logging_enabled, + is_privileged=is_privileged, + external_image=external_image + ) + self.task_definitions.append(task_definition_details) except ClientError as err: if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: logging.error(f"Access denied in {self.account} " diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index 70164bca..130add86 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -34,6 +34,7 @@ def create_tickets_ecs_logging(self): issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ECSLoggingIssue) for issue in issues: task_definition_name = issue.issue_id + container_name = issue.issue_details.container_name region = issue.issue_details.region tags = issue.issue_details.tags # issue has been already reported @@ -91,13 +92,14 @@ def create_tickets_ecs_logging(self): f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") issue_description = ( - f"The ECS logging is not enabled.\n\n" + f"The ECS Container's logging is not enabled.\n\n" f"*Risk*: High\n\n" f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" f"*ECS Task Definition*: {task_definition_name}\n" - f"*Task definition logging enabled*: False \n" + f"*ECS Task definition's Container Name*: {container_name}\n", + f"*Container's logging enabled*: False \n" ) auto_remediation_date = (self.config.now + self.config.ecs_logging.issue_retention_date).date() @@ -108,14 +110,14 @@ def create_tickets_ecs_logging(self): issue_description += "\n" issue_description += ( f"*Recommendation*: " - f"Enable logging for ECS task definition. To enable logging, follow below steps: \n" + f"Enable logging for ECS task definition's container. To enable logging, follow below steps: \n" f"1. Open the Amazon ECS console at https://console.aws.amazon.com/ecs/. \n" f"2. From the navigation bar, " f"choose region that contains your task definition and choose Task Definitions.\n" f"3. On the Task Definitions page, select the box to the left of the task definition to revise " f"and choose Create new revision.\n" f"4. On the Create new revision of Task Definition page, " - f"select the existing container and enable LogConfiguration under section Storage and Logging " + f"select the container and enable 'LogConfiguration' under section 'Storage and Logging' " f"and then choose Update.\n" f"5. Verify the information and choose Create.\n" ) From adb60c7fc62b4ad2879049024f1d542d6b9092b4 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 4 Jun 2019 18:51:12 +0530 Subject: [PATCH 033/193] Updated with ECS privileged access issue code changes. Updated with ECS privileged access issue code changes. --- .../describe_ecs_privileged_access_issues.py | 1 + hammer/library/aws/ecs.py | 115 +++++++++--------- ...ate_ecs_privileged_access_issue_tickets.py | 34 ++++-- 3 files changed, 83 insertions(+), 67 deletions(-) diff --git a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py index d2be8714..ca6f693a 100644 --- a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py +++ b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py @@ -55,6 +55,7 @@ def lambda_handler(event, context): issue = ECSPrivilegedAccessIssue(account_id, task_definition.name) issue.issue_details.arn = task_definition.arn issue.issue_details.tags = task_definition.tags + issue.issue_details.container_name = task_definition.container_name issue.issue_details.region = task_definition.account.region if config.ecs_privileged_access.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 504e24ba..e693ae48 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -1,3 +1,4 @@ +import json import logging from botocore.exceptions import ClientError @@ -11,7 +12,7 @@ 'cluster_arn', # subnet_group_id 'cluster_instance_arn' - ]) +]) class ECSClusterOperations(object): @@ -19,7 +20,7 @@ class ECSClusterOperations(object): @timeit def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): """ Retrieve ecs clusters meta data with security group attached - + :param ec2_client: boto3 ec2 client :param ecs_client: boto3 ECS client :param group_id: security group id @@ -45,7 +46,8 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): ) ec2_instance_id = container_instance[0]["ec2InstanceId"] - ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] + ec2_instance = \ + ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] if group_id in str(ec2_instance["SecurityGroups"]): ecs_instances.append(ECSCluster_Details( @@ -59,12 +61,14 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): class ECSTaskDefinitions(object): """ Basic class for ECS task definitions. - + """ - def __init__(self, account, name, arn, tags, is_logging=None, is_privileged=None, external_image=None): + + def __init__(self, account, name, arn, tags, container_name=None, is_logging=None, is_privileged=None, + external_image=None): """ :param account: `Account` instance where ECS task definition is present - + :param name: name of the task definition :param arn: arn of the task definition :param arn: tags of task definition. @@ -77,12 +81,13 @@ def __init__(self, account, name, arn, tags, is_logging=None, is_privileged=None self.is_logging = is_logging self.is_privileged = is_privileged self.external_image = external_image + self.container_name = container_name class ECSChecker(object): """ - Basic class for checking ecs task definition's logging enabled or not in account/region. - Encapsulates check settings and discovered task definitions. + Basic class for checking ecs task definition's logging/privileged access/image source in account/region. + Encapsulates check settings and discovered task definition's containers. """ def __init__(self, account): @@ -93,21 +98,10 @@ def __init__(self, account): self.account = account self.task_definitions = [] - def task_definition_arns(self, name): - """ - :return: `ECS task definition' by arn - """ - for task_definition in self.task_definitions: - if task_definition.name == name: - return task_definition - return None - - def check(self, task_definitions=None): + def check(self): """ Walk through clusters in the account/region and check them. - Put all gathered clusters to `self.clusters`. - - :param task_definitions: list with task definitions to check, if it is not supplied - all taks definitions must be checked + Put all ECS task definition's container details. :return: boolean. True - if check was successful, False - otherwise @@ -124,51 +118,58 @@ def check(self, task_definitions=None): return False if "families" in response: - tags = {} for task_definition_name in response["families"]: - if task_definitions is not None and task_definition_name not in task_definitions: - continue - + tags = {} logging_enabled = False external_image = False is_privileged = False + container_name = None try: task_definition = self.account.client("ecs").describe_task_definition( taskDefinition=task_definition_name )['taskDefinition'] + task_definition_arn = task_definition["taskDefinitionArn"] + if "containerDefinitions" in task_definition: + for container_definition in task_definition['containerDefinitions']: + container_name = container_definition["name"] + if container_definition.get('logConfiguration') is None: + logging_enabled = False + else: + logging_enabled = True + + container_privileged_details = container_definition.get('privileged') + if container_privileged_details is not None: + if container_definition['privileged']: + is_privileged = True + else: + is_privileged = False + + image = container_definition.get('image') + if image is not None: + if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: + external_image = True + else: + external_image = False + + if "Tags" in task_definition: + tags = task_definition["Tags"] + task_definition_details = ECSTaskDefinitions(account=self.account, + name=task_definition_name, + arn=task_definition_arn, + tags=tags, + container_name=container_name, + is_logging=logging_enabled, + is_privileged=is_privileged, + external_image=external_image + ) + self.task_definitions.append(task_definition_details) except ClientError as err: - logging.exception(f"Failed to describe task definitions in {self.account} ") + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(ecs:{err.operation_name})") + else: + logging.exception(f"Failed to describe task definitions in {self.account} " + f"for task {task_definition_name}") continue - task_definition_arn = task_definition["taskDefinitionArn"] - if "containerDefinitions" in task_definition: - for container_definition in task_definition['containerDefinitions']: - if container_definition.get('logConfiguration') is None: - logging_enabled = False - else: - logging_enabled = True - - if "privileged" in str(container_definition) and container_definition['privileged']: - is_privileged = True - else: - is_privileged = False - - image = container_definition['image'] - if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: - external_image = True - else: - external_image = False - - if "Tags" in task_definition: - tags = task_definition["Tags"] - task_definition_details = ECSTaskDefinitions(account=self.account, - name=task_definition_name, - arn=task_definition_arn, - tags=tags, - is_logging=logging_enabled, - is_privileged=is_privileged, - external_image=external_image - ) - self.task_definitions.append(task_definition_details) - return True \ No newline at end of file diff --git a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py index dcce2e88..40773735 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py @@ -33,7 +33,8 @@ def create_tickets_ecs_privileged(self): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ECSPrivilegedAccessIssue) for issue in issues: - task_definition_arn = issue.issue_id + task_definition_name = issue.issue_id + container_name = issue.issue_details.container_name region = issue.issue_details.region tags = issue.issue_details.tags # issue has been already reported @@ -43,9 +44,9 @@ def create_tickets_ecs_privileged(self): product = issue.jira_details.product if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: - logging.debug(f"Closing {issue.status.value} ECS privileged access disabled '{task_definition_arn}' issue") + logging.debug(f"Closing {issue.status.value} ECS privileged access disabled '{task_definition_name}' issue") - comment = (f"Closing {issue.status.value} ECS privileged access disabled '{task_definition_arn}' issue " + comment = (f"Closing {issue.status.value} ECS privileged access disabled '{task_definition_name}' issue " f"in '{account_name} / {account_id}' account, '{region}' region") if issue.status == IssueStatus.Whitelisted: # Adding label with "whitelisted" to jira ticket. @@ -67,9 +68,9 @@ def create_tickets_ecs_privileged(self): IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_arn}") + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_name}") slack.report_issue( - msg=f"ECS privileged access disabled '{task_definition_arn}' issue is changed " + msg=f"ECS privileged access disabled '{task_definition_name}' issue is changed " f"in '{account_name} / {account_id}' account, '{region}' region" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, @@ -78,16 +79,16 @@ def create_tickets_ecs_privileged(self): ) IssueOperations.set_status_updated(ddb_table, issue) else: - logging.debug(f"No changes for '{task_definition_arn}'") + logging.debug(f"No changes for '{task_definition_name}'") # issue has not been reported yet else: - logging.debug(f"Reporting ECS privileged access issue for '{task_definition_arn}'") + logging.debug(f"Reporting ECS privileged access issue for '{task_definition_name}'") owner = tags.get("owner", None) bu = tags.get("bu", None) product = tags.get("product", None) - issue_summary = (f"ECS privileged access is enabled for '{task_definition_arn}'" + issue_summary = (f"ECS privileged access is enabled for '{task_definition_name}'" f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") issue_description = ( @@ -96,7 +97,10 @@ def create_tickets_ecs_privileged(self): f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" - f"*ECS Task Definition*: {task_definition_arn}\n") + f"*ECS Task Definition Name*: {task_definition_name}\n" + f"*ECS Task definition's Container Name*: {container_name}\n" + f"*Container has privileged access*: True \n" + ) auto_remediation_date = (self.config.now + self.config.ecs_privileged_access.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" @@ -106,7 +110,17 @@ def create_tickets_ecs_privileged(self): issue_description += "\n" issue_description += ( f"*Recommendation*: " - f"By default, containers are unprivileged and cannot. Disable ECS privileged access.") + f"By default, containers are unprivileged and cannot. To disable ECS privileged access, follow below steps:" + f"1. Open the Amazon ECS console at https://console.aws.amazon.com/ecs/. \n" + f"2. From the navigation bar, " + f"choose region that contains your task definition and choose Task Definitions.\n" + f"3. On the Task Definitions page, select the box to the left of the task definition to revise " + f"and choose Create new revision.\n" + f"4. On the Create new revision of Task Definition page, " + f"select the container and disable 'Privileged' option under section 'Security' " + f"and then choose Update.\n" + f"5. Verify the information and choose Create.\n" + ) try: response = jira.add_issue( From a6c65d1d28f2af9cd8b5bad5b7104c13f390abc8 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 4 Jun 2019 20:04:27 +0530 Subject: [PATCH 034/193] Updated with ECS image source issue changes. Updated with ECS image source issue changes. --- ...scribe_ecs_external_image_source_issues.py | 2 + hammer/library/aws/ecs.py | 105 +++++++++--------- ...ecs_external_image_source_issue_tickets.py | 37 ++++-- 3 files changed, 82 insertions(+), 62 deletions(-) diff --git a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py index 58d2b045..82fa662e 100644 --- a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py +++ b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py @@ -55,6 +55,8 @@ def lambda_handler(event, context): issue = ECSExternalImageSourceIssue(account_id, task_definition.name) issue.issue_details.arn = task_definition.arn issue.issue_details.tags = task_definition.tags + issue.issue_details.container_name = task_definition.container_name + issue.issue_details.image_url = task_definition.image_url issue.issue_details.region = task_definition.account.region if config.ecs_external_image_source.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 22c4d75f..fd80c5d2 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -1,3 +1,4 @@ +import json import logging from botocore.exceptions import ClientError @@ -63,7 +64,8 @@ class ECSTaskDefinitions(object): """ - def __init__(self, account, name, arn, tags, is_logging=None, is_privileged=None, external_image=None): + def __init__(self, account, name, arn, tags, container_name=None, image_url= None, is_logging=None, is_privileged=None, + external_image=None): """ :param account: `Account` instance where ECS task definition is present @@ -79,12 +81,14 @@ def __init__(self, account, name, arn, tags, is_logging=None, is_privileged=None self.is_logging = is_logging self.is_privileged = is_privileged self.external_image = external_image + self.container_name = container_name + self.image_url = image_url class ECSChecker(object): """ - Basic class for checking ecs task definition's logging enabled or not in account/region. - Encapsulates check settings and discovered task definitions. + Basic class for checking ecs task definition's logging/privileged access/image source in account/region. + Encapsulates check settings and discovered task definition's containers. """ def __init__(self, account): @@ -95,21 +99,10 @@ def __init__(self, account): self.account = account self.task_definitions = [] - def task_definition_arns(self, name): - """ - :return: `ECS task definition' by arn - """ - for task_definition in self.task_definitions: - if task_definition.name == name: - return task_definition - return None - - def check(self, task_definitions=None): + def check(self): """ Walk through clusters in the account/region and check them. - Put all gathered clusters to `self.clusters`. - - :param task_definitions: list with task definitions to check, if it is not supplied - all taks definitions must be checked + Put all ECS task definition's container details. :return: boolean. True - if check was successful, False - otherwise @@ -126,51 +119,59 @@ def check(self, task_definitions=None): return False if "families" in response: - tags = {} for task_definition_name in response["families"]: - if task_definitions is not None and task_definition_name not in task_definitions: - continue - + tags = {} logging_enabled = False external_image = False is_privileged = False + container_name = None try: task_definition = self.account.client("ecs").describe_task_definition( taskDefinition=task_definition_name )['taskDefinition'] + task_definition_arn = task_definition["taskDefinitionArn"] + if "containerDefinitions" in task_definition: + for container_definition in task_definition['containerDefinitions']: + container_name = container_definition["name"] + if container_definition.get('logConfiguration') is None: + logging_enabled = False + else: + logging_enabled = True + + container_privileged_details = container_definition.get('privileged') + if container_privileged_details is not None: + if container_definition['privileged']: + is_privileged = True + else: + is_privileged = False + + image = container_definition.get('image') + if image is not None: + if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: + external_image = True + else: + external_image = False + + if "Tags" in task_definition: + tags = task_definition["Tags"] + task_definition_details = ECSTaskDefinitions(account=self.account, + name=task_definition_name, + arn=task_definition_arn, + tags=tags, + container_name=container_name, + image_url=image, + is_logging=logging_enabled, + is_privileged=is_privileged, + external_image=external_image + ) + self.task_definitions.append(task_definition_details) except ClientError as err: - logging.exception(f"Failed to describe task definitions in {self.account} ") + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(ecs:{err.operation_name})") + else: + logging.exception(f"Failed to describe task definitions in {self.account} " + f"for task {task_definition_name}") continue - task_definition_arn = task_definition["taskDefinitionArn"] - if "containerDefinitions" in task_definition: - for container_definition in task_definition['containerDefinitions']: - if container_definition.get('logConfiguration') is None: - logging_enabled = False - else: - logging_enabled = True - - if "privileged" in str(container_definition) and container_definition['privileged']: - is_privileged = True - else: - is_privileged = False - - image = container_definition['image'] - if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: - external_image = True - else: - external_image = False - - if "Tags" in task_definition: - tags = task_definition["Tags"] - task_definition_details = ECSTaskDefinitions(account=self.account, - name=task_definition_name, - arn=task_definition_arn, - tags=tags, - is_logging=logging_enabled, - is_privileged=is_privileged, - external_image=external_image - ) - self.task_definitions.append(task_definition_details) - return True \ No newline at end of file diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py index f0b780c7..1ec8fd28 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -33,9 +33,11 @@ def create_tickets_ecs_external_images(self): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ECSExternalImageSourceIssue) for issue in issues: - task_definition_arn = issue.issue_id + task_definition_name = issue.issue_id region = issue.issue_details.region tags = issue.issue_details.tags + container_name = issue.issue_details.container_name + image_url = issue.issue_details.image_url # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner @@ -43,9 +45,9 @@ def create_tickets_ecs_external_images(self): product = issue.jira_details.product if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: - logging.debug(f"Closing {issue.status.value} ECS external image source '{task_definition_arn}' issue") + logging.debug(f"Closing {issue.status.value} ECS external image source '{task_definition_name}' issue") - comment = (f"Closing {issue.status.value} ECS external image source '{task_definition_arn}' issue " + comment = (f"Closing {issue.status.value} ECS external image source '{task_definition_name}' issue " f"in '{account_name} / {account_id}' account, '{region}' region") if issue.status == IssueStatus.Whitelisted: # Adding label with "whitelisted" to jira ticket. @@ -67,9 +69,9 @@ def create_tickets_ecs_external_images(self): IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_arn}") + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_name}") slack.report_issue( - msg=f"ECS external image source '{task_definition_arn}' issue is changed " + msg=f"ECS external image source '{task_definition_name}' issue is changed " f"in '{account_name} / {account_id}' account, '{region}' region" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, @@ -78,16 +80,16 @@ def create_tickets_ecs_external_images(self): ) IssueOperations.set_status_updated(ddb_table, issue) else: - logging.debug(f"No changes for '{task_definition_arn}'") + logging.debug(f"No changes for '{task_definition_name}'") # issue has not been reported yet else: - logging.debug(f"Reporting ECS external image source issue for '{task_definition_arn}'") + logging.debug(f"Reporting ECS external image source issue for '{task_definition_name}'") owner = tags.get("owner", None) bu = tags.get("bu", None) product = tags.get("product", None) - issue_summary = (f"ECS external image source '{task_definition_arn}'" + issue_summary = (f"ECS external image source '{task_definition_name}'" f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") issue_description = ( @@ -96,7 +98,11 @@ def create_tickets_ecs_external_images(self): f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" - f"*ECS Task Definition*: {task_definition_arn}\n") + f"*ECS Task Definition*: {task_definition_name}\n" + f"*ECS Task definition's Container Name*: {container_name}\n" + f"*ECS container image Source*: External" + f"*Container image url*: {image_url} \n" + ) auto_remediation_date = (self.config.now + self.config.ecs_external_image_source.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" @@ -106,7 +112,18 @@ def create_tickets_ecs_external_images(self): issue_description += "\n" issue_description += ( f"*Recommendation*: " - f"For both security and reliability, it would be better to use ECS container registry and maintain all required container images within ECS.") + f"For both security and reliability, use ECS container registry and maintain " + f"all required container images within ECS. " + f"To update ECS container image source follow below steps:\n" + f"1. Open the Amazon ECS console at https://console.aws.amazon.com/ecs/. \n" + f"2. From the navigation bar, choose region that contains your task definition " + f"and choose Task Definitions.\n" + f"3. On the Task Definitions page, select the box to left of task definition to revise " + f"and choose Create new revision.\n" + f"4. On the Create new revision of Task Definition page, select the container and " + f"add internal image source to 'Image' option and then choose Update.\n" + f"5. Verify the information and choose Create.\n" + ) try: response = jira.add_issue( From 378e56c9450c7e979c023875f2a7403bba246999 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 6 Jun 2019 18:49:49 +0530 Subject: [PATCH 035/193] Updated with Elasticsearch encryption issue changes. Updated with Elasticsearch encryption issue changes. --- .../create_ecs_external_image_source_issue_tickets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py index 1ec8fd28..4ac1c6c0 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -100,8 +100,8 @@ def create_tickets_ecs_external_images(self): f"*Region*: {region}\n" f"*ECS Task Definition*: {task_definition_name}\n" f"*ECS Task definition's Container Name*: {container_name}\n" - f"*ECS container image Source*: External" - f"*Container image url*: {image_url} \n" + f"*ECS container image Source*: External \n" + f"*ECS container image url*: {image_url} \n" ) auto_remediation_date = (self.config.now + self.config.ecs_external_image_source.issue_retention_date).date() From 04e2dc4b6f04513e2d7f684c98013490e8dc6bb1 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 6 Jun 2019 18:54:24 +0530 Subject: [PATCH 036/193] Added code changes for ES encrytion issue. Added code changes for ES encrytion issue. --- deployment/build_packages.sh | 2 +- deployment/cf-templates/ddb.json | 23 +++ deployment/cf-templates/identification.json | 65 ++++++- .../modules/identification/identification.tf | 5 +- .../modules/identification/sources.tf | 7 + hammer/library/aws/elasticsearch.py | 142 +++++++++++++++ hammer/library/config.py | 3 + hammer/library/ddb_issues.py | 5 + ...elasticsearch_unencrypted_issue_tickets.py | 165 ++++++++++++++++++ 9 files changed, 414 insertions(+), 3 deletions(-) create mode 100644 hammer/library/aws/elasticsearch.py create mode 100644 hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index 2e00c69c..cb1f4d23 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,7 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api" +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api elasticsearch-unencrypted-domain-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 9b55f4d5..b7d91578 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -480,6 +480,29 @@ }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} } + }, + "DynamoDBESEncryptionRequests": { + "Type": "AWS::DynamoDB::Table", + "DependsOn": ["DynamoDBCredentials", "DynamoDBSQSPublicPolicy"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "request_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "request_id", + "KeyType": "HASH" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "es-unencrypted-domain" ] ]} + } } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index d83355db..76b9c01f 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,7 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", - "SourceIdentificationAMIPublicAccess" + "SourceIdentificationAMIPublicAccess", + "SourceIdentificationElasticSearchEncryption" ] }, { @@ -92,6 +93,9 @@ }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" + }, + "SourceIdentificationElasticSearchEncryption":{ + "dafault": "Relative path to Unencrypted Elasticsearch domain sources" } } } @@ -188,6 +192,10 @@ "SourceIdentificationRDSEncryption": { "Type": "String", "Default": "rds-unencrypted-instance-identification.zip" + }, + "SourceIdentificationElasticSearchEncryption": { + "Type": "String", + "Default": "elasticsearch-unencrypted-domain-identification.zip" } }, "Conditions": { @@ -245,6 +253,9 @@ "IdentificationMetricRDSEncryptionError": { "value": "RDSEncryptionError" }, + "IdentificationMetricESEncryptionError": { + "value": "ESEncryptionError" + }, "SNSDisplayNameSecurityGroups": { "value": "describe-security-groups-sns" }, @@ -323,6 +334,12 @@ "SNSTopicNameRDSEncryption": { "value": "describe-rds-encryption-lambda" }, + "SNSDisplayNameESEncryption": { + "value": "describe-es-encryption-sns" + }, + "SNSTopicNameESEncryption": { + "value": "describe-es-encryption-lambda" + }, "LogsForwarderLambdaFunctionName": { "value": "logs-forwarder" }, @@ -406,6 +423,12 @@ }, "IdentifyRDSEncryptionLambdaFunctionName": { "value": "describe-rds-encryption" + }, + "InitiateESEncryptionLambdaFunctionName": { + "value": "initiate-elasticsearch-encryption" + }, + "IdentifyESEncryptionLambdaFunctionName": { + "value": "describe-elasticsearch-encryption" } } }, @@ -1098,6 +1121,46 @@ "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } + }, + "StackEvaluateESEncryption": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationElasticSearchEncryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted Elasticsearch domains.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted Elasticsearch domains.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateESEncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyESEncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_elasticsearch_unencrypted_domains.lambda_handler", + "EvaluateLambdaHandler": "describe_elasticsearch_unencrypted_domains.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Elasticsearch domain encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationESEncryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameESEncryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameESEncryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } + } } }, "Outputs": { diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 1a5b7b6f..d84eded9 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -16,7 +16,8 @@ resource "aws_cloudformation_stack" "identification" { "aws_s3_bucket_object.ami-public-access-issues-identification", "aws_s3_bucket_object.sqs-public-policy-identification", "aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification", - "aws_s3_bucket_object.rds-unencrypted-instance-identification" + "aws_s3_bucket_object.rds-unencrypted-instance-identification", + "aws_s3_bucket_object.elasticsearch-unencrypted-domain-identification" ] tags = "${var.tags}" @@ -44,6 +45,8 @@ resource "aws_cloudformation_stack" "identification" { SourceIdentificationSQSPublicPolicy = "${aws_s3_bucket_object.sqs-public-policy-identification.id}" SourceIdentificationS3Encryption = "${aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification.id}" SourceIdentificationRDSEncryption = "${aws_s3_bucket_object.rds-unencrypted-instance-identification.id}" + SourceIdentificationElasticSearchEncryption = "${aws_s3_bucket_object.elasticsearch-unencrypted-domain-identification.id}" + } template_url = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-cfn.id}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index e5658577..dcce7f5b 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -96,3 +96,10 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { key = "lambda/${format("rds-unencrypted-instance-identification-%s.zip", "${md5(file("${path.module}/../../../packages/rds-unencrypted-instance-identification.zip"))}")}" source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" } + +resource "aws_s3_bucket_object" "elasticsearch-unencrypted-domain-identification" { + bucket = "${var.s3bucket}" + key = "lambda/${format("elasticsearch-unencrypted-domain-identification-%s.zip", "${md5(file("${path.module}/../../../packages/elasticsearch-unencrypted-domain-identification.zip"))}")}" + source = "${path.module}/../../../packages/elasticsearch-unencrypted-domain-identification.zip" +} + diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py new file mode 100644 index 00000000..ae2e71d1 --- /dev/null +++ b/hammer/library/aws/elasticsearch.py @@ -0,0 +1,142 @@ +import logging + + +from botocore.exceptions import ClientError +from collections import namedtuple +from library.utility import timeit + + +# structure which describes Elastic search domains +ElasticSearchDomain_Details = namedtuple('ElasticSearchDomain', [ + # domain name + 'domain_name', + # domain arn + 'domain_arn', + # vpc_id + 'vpc_id' + ]) + + +class ESDomainDetails(object): + """ + Basic class for ElasticSearch domain details. + + """ + + def __init__(self, account, name, id, arn, is_logging=None, encrypted=None): + """ + :param account: `Account` instance where ECS task definition is present + + :param name: name of the task definition + :param arn: arn of the task definition + :param arn: tags of task definition. + :param is_logging: logging enabled or not. + """ + self.account = account + self.name = name + self.id = id + self.arn = arn + self.is_logging = is_logging + self.encrypted = encrypted + + +class ElasticSearchOperations: + @classmethod + @timeit + def get_elasticsearch_details_of_sg_associated(cls, elasticsearch_client, group_id): + """ Retrieve elastic search details meta data with security group attached + + :param elasticsearch_client: boto3 elastic search client + :param group_id: security group id + + :return: list with elastic search details + """ + # describe elastic search domain details with security group attached. + domains_list = [] + + elasticsearch_response = elasticsearch_client.list_domain_names() + for domain in elasticsearch_response["DomainNames"]: + domain_name = domain["DomainName"] + domain_details = elasticsearch_client.describe_elasticsearch_domain( + DomainName=domain_name + )["DomainStatus"] + if group_id in str(domain_details): + domains_list.append(ElasticSearchDomain_Details( + domain_name=domain_name, + domain_arn=domain_details["ARN"], + vpc_id=domain_details["VPCOptions"]["VPCId"] + )) + + return domains_list + + +class ESDomainChecker: + """ + Basic class for checking EBS snapshots in account/region. + Encapsulates discovered EBS snapshots. + """ + + def __init__(self, account): + """ + :param account: `Account` instance with Elasticsearch domains to check + """ + self.account = account + self.domains = [] + + def get_domain(self, id): + """ + :return: `EBSSnapshot` by id + """ + for domain in self.domains: + if domain.id == id: + return domain + return None + + def check(self, ids=None): + """ + Walk through Elasticsearch domains in the account/region and put them to `self.domains`. + + :param ids: list with Elasticsearch domain ids to check, if it is not supplied - all Elasticsearch domains must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + try: + es_client = self.account.client("es") + if ids is None: + domain_names_list = es_client.list_domain_names()["DomainNames"] + for domain_name in domain_names_list: + ids.append(domain_name["DomainName"]) + + if ids is not None: + domain_details = es_client.describe_elasticsearch_domains(DomainNames=ids)["DomainStatusList"] + + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(ec2:{err.operation_name})") + else: + logging.exception(f"Failed to describe snapshots in {self.account}") + return False + + domain_encrypted = False + is_logging = False + for domain_detail in domain_details: + domain_name = domain_detail["DomainName"] + domain_id = domain_detail["DomainId"] + domain_arn = domain_detail["ARN"] + if domain_detail["EncryptionAtRestOptions"]["Enabled"] or \ + domain_detail["NodeToNodeEncryptionOptions"]["Enabled"]: + domain_encrypted = True + + if domain_detail["LogPublishingOptions"]["Options"]: + is_logging = True + + domain = ESDomainDetails(self.account, + name=domain_name, + id=domain_id, + arn=domain_arn, + is_logging=is_logging, + encrypted=domain_encrypted) + self.domains.append(domain) + return True diff --git a/hammer/library/config.py b/hammer/library/config.py index 504f1a1d..aac01da8 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -66,6 +66,9 @@ def __init__(self, # AMI public access issue config self.publicAMIs = ModuleConfig(self._config, "ec2_public_ami") + # Elasticsearch unencrypted domain issue config + self.esEncrypt = ModuleConfig(self._config, "es_unencrypted_domain") + self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index d9ae7de2..54fd81b1 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -238,6 +238,11 @@ def __init__(self, *args): super().__init__(*args) +class ESEncryptionIssue(Issue): + def __init__(self, *args): + super().__init__(*args) + + class Operations(object): @staticmethod def find(ddb_table, issue): diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py new file mode 100644 index 00000000..1179c8f4 --- /dev/null +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -0,0 +1,165 @@ +""" +Class to create elasticsearch unencryption issue tickets. +""" +import sys +import logging + + +from collections import Counter +from library.logger import set_logging, add_cw_logging +from library.aws.utility import Account +from library.config import Config +from library.jiraoperations import JiraReporting, JiraOperations +from library.slack_utility import SlackNotification +from library.aws.ec2 import EC2Operations +from library.ddb_issues import IssueStatus, ESEncryptionIssue +from library.ddb_issues import Operations as IssueOperations +from library.utility import empty_converter, list_converter +from library.utility import SingletonInstance, SingletonInstanceException + + +class CreateElasticSearchUnencryptedDomainTickets(object): + """ Class to create elasticsearch unencryption issue tickets """ + def __init__(self, config): + self.config = config + + def create_tickets_elasticsearch_unencryption(self): + """ Class method to create jira tickets """ + table_name = self.config.esEncrypt.ddb_table_name + + main_account = Account(region=self.config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(table_name) + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.esEncrypt.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ESEncryptionIssue) + for issue in issues: + domain_name = issue.issue_id + region = issue.issue_details.region + tags = issue.issue_details.tags + # issue has been already reported + if issue.timestamps.reported is not None: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} Elasticsearch unencrypted domain '{domain_name}' issue") + + comment = (f"Closing {issue.status.value} Elasticsearch unencrypted domain '{domain_name}' issue " + f"in '{account_name} / {account_id}' account, '{region}' region") + if issue.status == IssueStatus.Whitelisted: + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + label=IssueStatus.Whitelisted.value + ) + jira.close_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_closed(ddb_table, issue) + # issue.status != IssueStatus.Closed (should be IssueStatus.Open) + elif issue.timestamps.updated > issue.timestamps.reported: + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {domain_name}") + slack.report_issue( + msg=f"Elasticsearch unencrypted domain '{domain_name}' issue is changed " + f"in '{account_name} / {account_id}' account, '{region}' region" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_updated(ddb_table, issue) + else: + logging.debug(f"No changes for '{domain_name}'") + # issue has not been reported yet + else: + logging.debug(f"Reporting Elasticsearch unencrypted domain '{domain_name}' issue") + + owner = tags.get("owner", None) + bu = tags.get("bu", None) + product = tags.get("product", None) + + issue_description = ( + f"Elasticsearch domain needs to be encrypted.\n\n" + f"*Risk*: High\n\n" + f"*Account Name*: {account_name}\n" + f"*Account ID*: {account_id}\n" + f"*Region*: {region}\n" + f"*Domain ID*: {domain_name}\n" + ) + + issue_description += JiraOperations.build_tags_table(tags) + + issue_description += "*Recommendation*: Encrypt Elasticsearch domain. " \ + "To enable encryption follow below steps: " \ + "1. Choose to create new domain. \n" \ + "2. Enable node-node encryption or encryption at rest options.\n" \ + "3. Fill other details and navigate to review page. \n" \ + "4. On the Review page, review your domain configuration, " \ + "and then choose 'Confirm' to create new domain. \n " \ + "5. After creation of new domain, migrate your data to new domain. \n " + + issue_summary = (f"Elasticsearch unencrypted domain '{domain_name}' " + f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["unencrypted-elasticsearch-domains"], + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + + issue.jira_details.owner = owner + issue.jira_details.business_unit = bu + issue.jira_details.product = product + + slack.report_issue( + msg=f"Discovered {issue_summary}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + + IssueOperations.set_status_reported(ddb_table, issue) + + +if __name__ == '__main__': + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + try: + obj = CreateElasticSearchUnencryptedDomainTickets(config) + obj.create_tickets_elasticsearch_unencryption() + except Exception: + logging.exception("Failed to create Elasticsearch unencrypted domain tickets") From 5690bd57bd59f43244e2648ca8e4927c2b885cb6 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 6 Jun 2019 19:13:35 +0530 Subject: [PATCH 037/193] Added lambda function code for ES encryption issue. Added lambda function code for ES encryption issue. --- ...cribe_elasticsearch_unencrypted_domains.py | 88 +++++++++++++++++++ ..._desc_elasticsearch_unencrypted_domains.py | 36 ++++++++ 2 files changed, 124 insertions(+) create mode 100644 hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py create mode 100644 hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/initiate_to_desc_elasticsearch_unencrypted_domains.py diff --git a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py new file mode 100644 index 00000000..7dfcba4e --- /dev/null +++ b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py @@ -0,0 +1,88 @@ +import json +import logging + + +from library.logger import set_logging +from library.config import Config +from library.aws.elasticsearch import ESDomainChecker +from library.aws.utility import Account +from library.ddb_issues import IssueStatus, ESEncryptionIssue +from library.ddb_issues import Operations as IssueOperations +from library.aws.utility import DDB, Sns + + +def lambda_handler(event, context): + """ Lambda handler to evaluate Elasticsearch unencrypted domains """ + set_logging(level=logging.INFO) + + try: + payload = json.loads(event["Records"][0]["Sns"]["Message"]) + account_id = payload['account_id'] + account_name = payload['account_name'] + # get the last region from the list to process + region = payload['regions'].pop() + # if request_id is present in payload then this lambda was called from the API + request_id = payload.get('request_id', None) + except Exception: + logging.exception(f"Failed to parse event\n{event}") + return + + try: + config = Config() + + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(config.esEncrypt.ddb_table_name) + + account = Account(id=account_id, + name=account_name, + region=region, + role_name=config.aws.role_name_identification) + if account.session is None: + return + + logging.debug(f"Checking for Elasticsearch unencrypted domains in {account}") + + # existing open issues for account to check if resolved + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, ESEncryptionIssue) + # make dictionary for fast search by id + # and filter by current region + open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} + logging.debug(f"Elasticsearch unencrypted domains in DDB:\n{open_issues.keys()}") + + checker = ESDomainChecker(account=account) + if checker.check(): + for domain in checker.domains: + if not domain.encrypted: + issue = ESEncryptionIssue(account_id, domain.name) + issue.issue_details.region = domain.account.region + issue.issue_details.id = domain.id + issue.issue_details.arn = domain.arn + + if config.esEncrypt.in_whitelist(account_id, domain.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {domain.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(domain.name, None) + + logging.debug(f"Elasticsearch unencrypted domains in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated Elasticsearch domains + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + if request_id: + api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) + DDB.track_progress(api_table, request_id) + except Exception: + logging.exception(f"Failed to check Elasticsearch unencrypted domains in '{region}' for '{account_id} ({account_name})'") + + # push SNS messages until the list with regions to check is empty + if len(payload['regions']) > 0: + try: + Sns.publish(payload["sns_arn"], payload) + except Exception: + logging.exception("Failed to chain Elasticsearch unencrypted domains checking") + + logging.debug(f"Checked Elasticsearch unencrypted domains in '{region}' for '{account_id} ({account_name})'") diff --git a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/initiate_to_desc_elasticsearch_unencrypted_domains.py b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/initiate_to_desc_elasticsearch_unencrypted_domains.py new file mode 100644 index 00000000..500687e2 --- /dev/null +++ b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/initiate_to_desc_elasticsearch_unencrypted_domains.py @@ -0,0 +1,36 @@ +import os +import logging + + +from library.logger import set_logging +from library.config import Config +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to initiate to find unencrypted elasticsearch domains """ + set_logging(level=logging.INFO) + logging.debug("Initiating unencrypted Elasticsearch domains checking") + + try: + sns_arn = os.environ["SNS_ARN"] + config = Config() + + if not config.esEncrypt.enabled: + logging.debug("Elasticsearch unencrypted domains checking disabled") + return + + logging.debug("Iterating each account to initiate Elasticsearch unencrypted domains checking") + for account_id, account_name in config.esEncrypt.accounts.items(): + payload = {"account_id": account_id, + "account_name": account_name, + "regions": config.aws.regions, + "sns_arn": sns_arn + } + logging.debug(f"Initiating Elasticsearch unencrypted domains checking for '{account_name}'") + Sns.publish(sns_arn, payload) + except Exception: + logging.exception("Error occurred while initiation of Elasticsearch unencrypted domains checking") + return + + logging.debug("Elasticsearch unencrypted domains checking initiation done") From 0cb0f3a1bea8512535e376e5a9ba11289c149dd2 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 6 Jun 2019 19:31:40 +0530 Subject: [PATCH 038/193] Updated ddb table changes for ES encryption issue. Updated ddb table changes for ES encryption issue. --- deployment/cf-templates/ddb.json | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index b7d91578..06681176 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -464,14 +464,22 @@ "Properties": { "AttributeDefinitions": [ { - "AttributeName": "request_id", + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", "AttributeType": "S" } ], "KeySchema": [ { - "AttributeName": "request_id", + "AttributeName": "account_id", "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" } ], "ProvisionedThroughput": { From 966b796f4c06619bf08535ff882ee8e22e74fb0a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 7 Jun 2019 13:01:04 +0530 Subject: [PATCH 039/193] Updated with ES encryption issue changes. Updated with ES encryption issue changes. --- ...cribe_elasticsearch_unencrypted_domains.py | 4 ++- hammer/library/aws/elasticsearch.py | 26 +++++++++++++------ ...elasticsearch_unencrypted_issue_tickets.py | 25 +++++++++--------- 3 files changed, 34 insertions(+), 21 deletions(-) diff --git a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py index 7dfcba4e..b73fabe3 100644 --- a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py +++ b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py @@ -57,6 +57,7 @@ def lambda_handler(event, context): issue.issue_details.region = domain.account.region issue.issue_details.id = domain.id issue.issue_details.arn = domain.arn + issue.issue_details.tags = domain.tags if config.esEncrypt.in_whitelist(account_id, domain.name): issue.status = IssueStatus.Whitelisted @@ -76,7 +77,8 @@ def lambda_handler(event, context): api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) DDB.track_progress(api_table, request_id) except Exception: - logging.exception(f"Failed to check Elasticsearch unencrypted domains in '{region}' for '{account_id} ({account_name})'") + logging.exception(f"Failed to check Elasticsearch unencrypted domains " + f"in '{region}' for '{account_id} ({account_name})'") # push SNS messages until the list with regions to check is empty if len(payload['regions']) > 0: diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index ae2e71d1..da08a174 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -1,10 +1,9 @@ import logging - from botocore.exceptions import ClientError from collections import namedtuple from library.utility import timeit - +from library.aws.utility import convert_tags # structure which describes Elastic search domains ElasticSearchDomain_Details = namedtuple('ElasticSearchDomain', [ @@ -14,7 +13,7 @@ 'domain_arn', # vpc_id 'vpc_id' - ]) +]) class ESDomainDetails(object): @@ -23,7 +22,7 @@ class ESDomainDetails(object): """ - def __init__(self, account, name, id, arn, is_logging=None, encrypted=None): + def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted=None): """ :param account: `Account` instance where ECS task definition is present @@ -38,6 +37,7 @@ def __init__(self, account, name, id, arn, is_logging=None, encrypted=None): self.arn = arn self.is_logging = is_logging self.encrypted = encrypted + self.tags = convert_tags(tags) class ElasticSearchOperations: @@ -101,9 +101,11 @@ def check(self, ids=None): :return: boolean. True - if check was successful, False - otherwise """ + domain_details = [] try: es_client = self.account.client("es") if ids is None: + ids = [] domain_names_list = es_client.list_domain_names()["DomainNames"] for domain_name in domain_names_list: ids.append(domain_name["DomainName"]) @@ -116,7 +118,7 @@ def check(self, ids=None): logging.error(f"Access denied in {self.account} " f"(ec2:{err.operation_name})") else: - logging.exception(f"Failed to describe snapshots in {self.account}") + logging.exception(f"Failed to describe elasticsearch domains in {self.account}") return False domain_encrypted = False @@ -125,17 +127,25 @@ def check(self, ids=None): domain_name = domain_detail["DomainName"] domain_id = domain_detail["DomainId"] domain_arn = domain_detail["ARN"] - if domain_detail["EncryptionAtRestOptions"]["Enabled"] or \ - domain_detail["NodeToNodeEncryptionOptions"]["Enabled"]: + encryption_at_rest = domain_detail.get("EncryptionAtRestOptions") + node_to_node_encryption = domain_detail.get("NodeToNodeEncryptionOptions") + if encryption_at_rest and encryption_at_rest["Enabled"]: + domain_encrypted = True + elif node_to_node_encryption and node_to_node_encryption["Enabled"]: domain_encrypted = True - if domain_detail["LogPublishingOptions"]["Options"]: + logging_details = domain_detail.get("LogPublishingOptions") + + if logging_details and logging_details["Options"]: is_logging = True + tags = es_client.list_tags(ARN=domain_arn)["TagList"] + domain = ESDomainDetails(self.account, name=domain_name, id=domain_id, arn=domain_arn, + tags=tags, is_logging=is_logging, encrypted=domain_encrypted) self.domains.append(domain) diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index 1179c8f4..cd6ad97f 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -1,20 +1,17 @@ """ -Class to create elasticsearch unencryption issue tickets. +Class to create Elasticsearch unencrypted domain issue tickets. """ import sys import logging -from collections import Counter from library.logger import set_logging, add_cw_logging from library.aws.utility import Account from library.config import Config from library.jiraoperations import JiraReporting, JiraOperations from library.slack_utility import SlackNotification -from library.aws.ec2 import EC2Operations from library.ddb_issues import IssueStatus, ESEncryptionIssue from library.ddb_issues import Operations as IssueOperations -from library.utility import empty_converter, list_converter from library.utility import SingletonInstance, SingletonInstanceException @@ -101,14 +98,18 @@ def create_tickets_elasticsearch_unencryption(self): issue_description += JiraOperations.build_tags_table(tags) - issue_description += "*Recommendation*: Encrypt Elasticsearch domain. " \ - "To enable encryption follow below steps: " \ - "1. Choose to create new domain. \n" \ - "2. Enable node-node encryption or encryption at rest options.\n" \ - "3. Fill other details and navigate to review page. \n" \ - "4. On the Review page, review your domain configuration, " \ - "and then choose 'Confirm' to create new domain. \n " \ - "5. After creation of new domain, migrate your data to new domain. \n " + auto_remediation_date = (self.config.now + self.config.esEncrypt.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + + issue_description += ( + f"*Recommendation*: Encrypt Elasticsearch domain. To enable encryption follow below steps: \n" + f"1. Choose to create new domain. \n" + f"2. Enable node-node encryption or encryption at rest options.\n" + f"3. Fill other domain configuration details and navigate to review page. \n" + f"4. On the Review page, review your domain configuration, and then choose 'Confirm' to " + f"create new domain. \n " + f"5. After creation of new domain, migrate your data to new domain. \n " + ) issue_summary = (f"Elasticsearch unencrypted domain '{domain_name}' " f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") From 26671eeb41fd7bdc5c6ceeec91de90deeaba7c48 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 13 Jun 2019 20:09:29 +0530 Subject: [PATCH 040/193] Added elasticsearch public access issue changes. Added elasticsearch public access domain issue changes. --- deployment/build_packages.sh | 2 +- deployment/cf-templates/ddb.json | 31 +++ deployment/cf-templates/identification.json | 65 ++++- .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 5 + ...ibe_elasticsearch_public_access_domains.py | 90 ++++++ ...esc_elasticsearch_public_access_domains.py | 36 +++ hammer/library/aws/elasticsearch.py | 261 ++++++++++++++++++ hammer/library/config.py | 3 + hammer/library/ddb_issues.py | 3 + .../clean_elasticsearch_policy_permissions.py | 153 ++++++++++ ...asticsearch_public_access_issue_tickets.py | 171 ++++++++++++ 12 files changed, 821 insertions(+), 3 deletions(-) create mode 100644 hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py create mode 100644 hammer/identification/lambdas/elasticsearch-public-access-domain-identification/initiate_to_desc_elasticsearch_public_access_domains.py create mode 100644 hammer/library/aws/elasticsearch.py create mode 100644 hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py create mode 100644 hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index 2e00c69c..2d79e20f 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,7 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api" +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api elasticsearch-public-access-domain-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 9b55f4d5..4fb7a44b 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -480,6 +480,37 @@ }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} } + }, + "DynamoDBESPublicAccessRequests": { + "Type": "AWS::DynamoDB::Table", + "DependsOn": ["DynamoDBCredentials", "DynamoDBSQSPublicPolicy"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "es-public-access-domain" ] ]} + } } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index d83355db..789eb8a6 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,7 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", - "SourceIdentificationAMIPublicAccess" + "SourceIdentificationAMIPublicAccess", + "SourceIdentificationElasticSearchPublicAccess" ] }, { @@ -92,6 +93,9 @@ }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" + }, + "SourceIdentificationElasticSearchPublicAccess":{ + "dafault": "Relative path to Unencrypted Elasticsearch domain public access sources" } } } @@ -188,6 +192,10 @@ "SourceIdentificationRDSEncryption": { "Type": "String", "Default": "rds-unencrypted-instance-identification.zip" + }, + "SourceIdentificationElasticSearchPublicAccess": { + "Type": "String", + "Default": "elasticsearch-public-access-domain-identification.zip" } }, "Conditions": { @@ -245,6 +253,9 @@ "IdentificationMetricRDSEncryptionError": { "value": "RDSEncryptionError" }, + "IdentificationMetricESPublicAccessError": { + "value": "ESPublicAccessError" + }, "SNSDisplayNameSecurityGroups": { "value": "describe-security-groups-sns" }, @@ -323,6 +334,12 @@ "SNSTopicNameRDSEncryption": { "value": "describe-rds-encryption-lambda" }, + "SNSDisplayNameESPublicAccess": { + "value": "describe-es-public-access-sns" + }, + "SNSTopicNameESPublicAccess": { + "value": "describe-es-public-access-lambda" + }, "LogsForwarderLambdaFunctionName": { "value": "logs-forwarder" }, @@ -406,6 +423,12 @@ }, "IdentifyRDSEncryptionLambdaFunctionName": { "value": "describe-rds-encryption" + }, + "InitiateESPublicAccessLambdaFunctionName": { + "value": "initiate-elasticsearch-public-access" + }, + "IdentifyESPublicAccessLambdaFunctionName": { + "value": "describe-elasticsearch-public-access" } } }, @@ -1098,6 +1121,46 @@ "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } + }, + "StackEvaluateESPublicAccess": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationElasticSearchPublicAccess" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify publicly accessible Elasticsearch domains.", + "EvaluateLambdaDescription": "Lambda function to describe publicly accessible Elasticsearch domains.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateESPublicAccessLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyESPublicAccessLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_elasticsearch_public_access_domains.lambda_handler", + "EvaluateLambdaHandler": "describe_elasticsearch_public_access_domains.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Elasticsearch domain public access evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationESPublicAccess"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameESPublicAccess", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameESPublicAccess", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } + } } }, "Outputs": { diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 1a5b7b6f..1e15ea75 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -16,7 +16,8 @@ resource "aws_cloudformation_stack" "identification" { "aws_s3_bucket_object.ami-public-access-issues-identification", "aws_s3_bucket_object.sqs-public-policy-identification", "aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification", - "aws_s3_bucket_object.rds-unencrypted-instance-identification" + "aws_s3_bucket_object.rds-unencrypted-instance-identification", + "aws_s3_bucket_object.elasticsearch-public-access-domain-identification" ] tags = "${var.tags}" @@ -44,6 +45,7 @@ resource "aws_cloudformation_stack" "identification" { SourceIdentificationSQSPublicPolicy = "${aws_s3_bucket_object.sqs-public-policy-identification.id}" SourceIdentificationS3Encryption = "${aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification.id}" SourceIdentificationRDSEncryption = "${aws_s3_bucket_object.rds-unencrypted-instance-identification.id}" + SourceIdentificationElasticSearchPublicAccess = "${aws_s3_bucket_object.elasticsearch-public-access-domain-identification.id}" } template_url = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-cfn.id}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index e5658577..c684971c 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -96,3 +96,8 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { key = "lambda/${format("rds-unencrypted-instance-identification-%s.zip", "${md5(file("${path.module}/../../../packages/rds-unencrypted-instance-identification.zip"))}")}" source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" } +resource "aws_s3_bucket_object" "elasticsearch-public-access-domain-identification" { + bucket = "${var.s3bucket}" + key = "lambda/${format("elasticsearch-public-access-domain-identification-%s.zip", "${md5(file("${path.module}/../../../packages/elasticsearch-public-access-domain-identification.zip"))}")}" + source = "${path.module}/../../../packages/elasticsearch-public-access-domain-identification.zip" +} \ No newline at end of file diff --git a/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py b/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py new file mode 100644 index 00000000..eccbd677 --- /dev/null +++ b/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py @@ -0,0 +1,90 @@ +import json +import logging + + +from library.logger import set_logging +from library.config import Config +from library.aws.elasticsearch import ESDomainChecker +from library.aws.utility import Account +from library.ddb_issues import IssueStatus, ESPublicAccessIssue +from library.ddb_issues import Operations as IssueOperations +from library.aws.utility import DDB, Sns + + +def lambda_handler(event, context): + """ Lambda handler to evaluate Elasticsearch publicly accessible domains """ + set_logging(level=logging.INFO) + + try: + payload = json.loads(event["Records"][0]["Sns"]["Message"]) + account_id = payload['account_id'] + account_name = payload['account_name'] + # get the last region from the list to process + region = payload['regions'].pop() + # if request_id is present in payload then this lambda was called from the API + request_id = payload.get('request_id', None) + except Exception: + logging.exception(f"Failed to parse event\n{event}") + return + + try: + config = Config() + + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(config.esPublicAccess.ddb_table_name) + + account = Account(id=account_id, + name=account_name, + region=region, + role_name=config.aws.role_name_identification) + if account.session is None: + return + + logging.debug(f"Checking for Elasticsearch publicly accessible domains in {account}") + + # existing open issues for account to check if resolved + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, ESPublicAccessIssue) + # make dictionary for fast search by id + # and filter by current region + open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} + logging.debug(f"Elasticsearch publicly accessible domains in DDB:\n{open_issues.keys()}") + + checker = ESDomainChecker(account=account) + if checker.check(): + for domain in checker.domains: + if domain.public: + issue = ESPublicAccessIssue(account_id, domain.name) + issue.issue_details.region = domain.account.region + issue.issue_details.id = domain.id + issue.issue_details.arn = domain.arn + issue.issue_details.tags = domain.tags + issue.issue_details.policy = domain.policy + if config.esPublicAccess.in_whitelist(account_id, domain.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {domain.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(domain.name, None) + + logging.debug(f"Elasticsearch publicly accessible domains in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated Elasticsearch domains + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + if request_id: + api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) + DDB.track_progress(api_table, request_id) + except Exception: + logging.exception(f"Failed to check Elasticsearch publicly accessible domains " + f"in '{region}' for '{account_id} ({account_name})'") + + # push SNS messages until the list with regions to check is empty + if len(payload['regions']) > 0: + try: + Sns.publish(payload["sns_arn"], payload) + except Exception: + logging.exception("Failed to chain Elasticsearch publicly accessible domains checking") + + logging.debug(f"Checked Elasticsearch publicly accessible domains in '{region}' for '{account_id} ({account_name})'") diff --git a/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/initiate_to_desc_elasticsearch_public_access_domains.py b/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/initiate_to_desc_elasticsearch_public_access_domains.py new file mode 100644 index 00000000..f097188e --- /dev/null +++ b/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/initiate_to_desc_elasticsearch_public_access_domains.py @@ -0,0 +1,36 @@ +import os +import logging + + +from library.logger import set_logging +from library.config import Config +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to initiate to find publicly accessible elasticsearch domains """ + set_logging(level=logging.INFO) + logging.debug("Initiating publicly accessible Elasticsearch domains checking") + + try: + sns_arn = os.environ["SNS_ARN"] + config = Config() + + if not config.esPublicAccess.enabled: + logging.debug("Elasticsearch publicly accessible domains checking disabled") + return + + logging.debug("Iterating each account to initiate Elasticsearch publicly accessible domains checking") + for account_id, account_name in config.esPublicAccess.accounts.items(): + payload = {"account_id": account_id, + "account_name": account_name, + "regions": config.aws.regions, + "sns_arn": sns_arn + } + logging.debug(f"Initiating Elasticsearch publicly accessible domains checking for '{account_name}'") + Sns.publish(sns_arn, payload) + except Exception: + logging.exception("Error occurred while initiation of Elasticsearch publicly accessible domains checking") + return + + logging.debug("Elasticsearch publicly accessible domains checking initiation done") diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py new file mode 100644 index 00000000..6d9163a9 --- /dev/null +++ b/hammer/library/aws/elasticsearch.py @@ -0,0 +1,261 @@ +import json +import logging + +from datetime import datetime, timezone +from botocore.exceptions import ClientError +from collections import namedtuple +from library.utility import timeit +from library.utility import jsonDumps +from library.aws.utility import convert_tags +from library.aws.s3 import S3Operations + +# structure which describes Elastic search domains +ElasticSearchDomain_Details = namedtuple('ElasticSearchDomain', [ + # domain name + 'domain_name', + # domain arn + 'domain_arn', + # vpc_id + 'vpc_id' +]) + + +class ElasticSearchOperations: + @classmethod + @timeit + def get_elasticsearch_details_of_sg_associated(cls, elasticsearch_client, group_id): + """ Retrieve elastic search details meta data with security group attached + + :param elasticsearch_client: boto3 elastic search client + :param group_id: security group id + + :return: list with elastic search details + """ + # describe elastic search domain details with security group attached. + domains_list = [] + + elasticsearch_response = elasticsearch_client.list_domain_names() + for domain in elasticsearch_response["DomainNames"]: + domain_name = domain["DomainName"] + domain_details = elasticsearch_client.describe_elasticsearch_domain( + DomainName=domain_name + )["DomainStatus"] + if group_id in str(domain_details): + domains_list.append(ElasticSearchDomain_Details( + domain_name=domain_name, + domain_arn=domain_details["ARN"], + vpc_id=domain_details["VPCOptions"]["VPCId"] + )) + + return domains_list + + @staticmethod + def put_domain_policy(es_client, domain_name, policy): + """ + Replaces a policy on a domain. If the domain already has a policy, the one in this request completely replaces it. + + :param es_client: Elasticsearch boto3 client + :param domain_name: Elasticsearch domain where to update policy on + :param policy: `dict` or `str` with policy. `Dict` will be transformed to string using pretty json.dumps(). + + :return: nothing + """ + policy_json = jsonDumps(policy) if isinstance(policy, dict) else policy + es_client.update_elasticsearch_domain_config( + DomainName=domain_name, + AccessPolicies=policy_json, + ) + + @classmethod + def validate_access_policy(cls, policy_details): + """ + + :param policy_details: + :return: + """ + public_policy = False + for statement in policy_details.get("Statement", []): + effect = statement['Effect'] + principal = statement.get('Principal', {}) + not_principal = statement.get('NotPrincipal', None) + condition = statement.get('Condition', None) + suffix = "/0" + # check both `Principal` - `{"AWS": "*"}` and `"*"` + # and condition (if exists) to be restricted (not "0.0.0.0/0") + if effect == "Allow" and \ + (principal == "*" or principal.get("AWS") == "*"): + if condition is not None: + if suffix in str(condition.get("IpAddress")): + return True + else: + return True + if effect == "Allow" and \ + not_principal is not None: + # TODO: it is not recommended to use `Allow` with `NotPrincipal`, need to write proper check for such case + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_notprincipal.html + logging.error(f"TODO: is this statement public???\n{statement}") + return False + + return public_policy + + +class ESDomainDetails(object): + """ + Basic class for ElasticSearch domain details. + + """ + + def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted=None, policy=None): + """ + :param account: `Account` instance where ECS task definition is present + + :param name: name of the task definition + :param arn: arn of the task definition + :param arn: tags of task definition. + :param is_logging: logging enabled or not. + """ + self.account = account + self.name = name + self.id = id + self.arn = arn + self.is_logging = is_logging + self.encrypted = encrypted + self._policy = json.loads(policy) if policy else {} + self.tags = convert_tags(tags) + + @property + def policy(self): + """ + :return: pretty formatted string with S3 bucket policy + """ + return jsonDumps(self._policy) + + @property + def public(self): + """ + :return: boolean, True - if Elasticsearch domain policy allows public access + False - otherwise + """ + return ElasticSearchOperations.validate_access_policy(self._policy) + + def backup_policy_s3(self, s3_client, bucket): + """ + Backup Elasticsearch policy json to S3. + + :param s3_client: S3 boto3 client + :param bucket: S3 bucket name where to put backup of S3 bucket policy + + :return: S3 path (without bucket name) to saved object with elasticsearch domain policy backup + """ + timestamp = datetime.now(timezone.utc).isoformat('T', 'seconds') + path = (f"queue_policies/" + f"{self.account.id}/" + f"{self.backup_filename.stem}_{timestamp}" + f"{self.backup_filename.suffix}") + if S3Operations.object_exists(s3_client, bucket, path): + raise Exception(f"s3://{bucket}/{path} already exists") + S3Operations.put_object(s3_client, bucket, path, self.policy) + return path + + def restrict_policy(self): + """ + Restrict and replace current policy on domain. + + :return: nothing + + .. note:: This keeps self._policy unchanged. + You need to recheck Elasticsearch domain policy to ensure that it was really restricted. + """ + restricted_policy = S3Operations.restrict_policy(self._policy) + try: + ElasticSearchOperations.put_domain_policy(self.account.client(""), self.name, restricted_policy) + except Exception: + logging.exception(f"Failed to put {self.name} restricted policy") + return False + + return True + + +class ESDomainChecker: + """ + Basic class for checking Elasticsearch unencrypted and logging issues in account/region. + Encapsulates discovered Elasticsearch domains. + """ + + def __init__(self, account): + """ + :param account: `Account` instance with Elasticsearch domains to check + """ + self.account = account + self.domains = [] + + def get_domain(self, id): + """ + :return: `Elasticsearch Domain` by id + """ + for domain in self.domains: + if domain.id == id: + return domain + return None + + def check(self, ids=None): + """ + Walk through Elasticsearch domains in the account/region and put them to `self.domains`. + + :param ids: list with Elasticsearch domain ids to check, if it is not supplied - all Elasticsearch domains must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + domain_details = [] + try: + es_client = self.account.client("es") + if ids is None: + ids = [] + domain_names_list = es_client.list_domain_names()["DomainNames"] + for domain_name in domain_names_list: + ids.append(domain_name["DomainName"]) + + if ids is not None: + domain_details = es_client.describe_elasticsearch_domains(DomainNames=ids)["DomainStatusList"] + + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(ec2:{err.operation_name})") + else: + logging.exception(f"Failed to describe elasticsearch domains in {self.account}") + return False + + domain_encrypted = False + is_logging = False + for domain_detail in domain_details: + domain_name = domain_detail["DomainName"] + domain_id = domain_detail["DomainId"] + domain_arn = domain_detail["ARN"] + encryption_at_rest = domain_detail.get("EncryptionAtRestOptions") + node_to_node_encryption = domain_detail.get("NodeToNodeEncryptionOptions") + if encryption_at_rest and encryption_at_rest["Enabled"]: + domain_encrypted = True + elif node_to_node_encryption and node_to_node_encryption["Enabled"]: + domain_encrypted = True + + logging_details = domain_detail.get("LogPublishingOptions") + + if logging_details and logging_details["Options"]: + is_logging = True + + tags = es_client.list_tags(ARN=domain_arn)["TagList"] + + access_policy = domain_detail.get("AccessPolicies") + + domain = ESDomainDetails(self.account, + name=domain_name, + id=domain_id, + arn=domain_arn, + tags=tags, + is_logging=is_logging, + encrypted=domain_encrypted, + policy=access_policy) + self.domains.append(domain) + return True \ No newline at end of file diff --git a/hammer/library/config.py b/hammer/library/config.py index 504f1a1d..c44b87f3 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -66,6 +66,9 @@ def __init__(self, # AMI public access issue config self.publicAMIs = ModuleConfig(self._config, "ec2_public_ami") + # Elasticsearch publicly accessed domain issue config + self.esPublicAccess = ModuleConfig(self._config, "es_public_access_domain") + self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index d9ae7de2..dfbed11b 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -238,6 +238,9 @@ def __init__(self, *args): super().__init__(*args) +class ESPublicAccessIssue(Issue): + def __init__(self, *args): + super().__init__(*args) class Operations(object): @staticmethod def find(ddb_table, issue): diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py new file mode 100644 index 00000000..08d97c94 --- /dev/null +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py @@ -0,0 +1,153 @@ +""" +Class to remediate ElasticSearch policy permissions. +""" +import sys +import logging +import argparse + + +from library.logger import set_logging, add_cw_logging +from library.config import Config +from library.jiraoperations import JiraReporting +from library.slack_utility import SlackNotification +from library.ddb_issues import Operations as IssueOperations +from library.ddb_issues import IssueStatus, ESPublicAccessIssue +from library.aws.elasticsearch import ESDomainChecker +from library.aws.utility import Account +from library.utility import confirm +from library.utility import SingletonInstance, SingletonInstanceException + + +class CleanElasticSearchPolicyPermissions: + """ Class to remediate ElasticSearch domain policy permissions """ + def __init__(self, config): + self.config = config + + def clean_elasticsearch_domain_policy_permissions(self, batch=False): + """ Class method to clean ElasticSearch domains which are violating aws best practices """ + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(self.config.esPublicAccess.ddb_table_name) + backup_bucket = config.aws.s3_backup_bucket + + retention_period = self.config.esPublicAccess.remediation_retention_period + + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.esPublicAccess.remediation_accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_open_issues(ddb_table, account_id, ESPublicAccessIssue) + for issue in issues: + domain_name = issue.issue_id + + in_whitelist = self.config.esPublicAccess.in_whitelist(account_id, domain_name) + #in_fixlist = self.config.esPublicAccess.in_fixnow(account_id, domain_name) + + if in_whitelist: + logging.debug(f"Skipping {domain_name} (in whitelist)") + + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + label=IssueStatus.Whitelisted.value + ) + continue + # if not in_fixlist: + # logging.debug(f"Skipping {domain_name} (not in fixlist)") + # continue + + if issue.timestamps.reported is None: + logging.debug(f"Skipping '{domain_name}' (was not reported)") + continue + + if issue.timestamps.remediated is not None: + logging.debug(f"Skipping {domain_name} (has been already remediated)") + continue + + updated_date = issue.timestamp_as_datetime + no_of_days_issue_created = (self.config.now - updated_date).days + + if no_of_days_issue_created >= retention_period: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + try: + account = Account(id=account_id, + name=account_name, + role_name=self.config.aws.role_name_reporting) + if account.session is None: + continue + + checker = ESDomainChecker(account=account) + checker.check(ids=[domain_name]) + domain_details = checker.get_domain(domain_name) + if domain_details is None: + logging.debug(f"Elasticsearch domain {domain_name} was removed by user") + elif not domain_details.public_by_policy: + logging.debug(f"Elasticsearch domain {domain_name} policy issue was remediated by user") + else: + if not batch and \ + not confirm(f"Do you want to remediate elasticsearch domain '{domain_name}' policy", False): + continue + + logging.debug(f"Remediating '{domain_name}' policy") + + backup_path = domain_details.backup_policy_s3(main_account.client("s3"), backup_bucket) + remediation_succeed = True + if domain_details.restrict_policy(): + comment = (f"Policy backup was saved to " + f"[{backup_path}|https://s3.console.aws.amazon.com/s3/object/{backup_bucket}/{backup_path}]. " + f"Domain '{domain_name}' policy issue " + f"in '{account_name} / {account_id}' account " + f"was remediated by hammer") + else: + remediation_succeed = False + comment = (f"Failed to remediate elasticsearch domain '{domain_name}' policy issue " + f"in '{account_name} / {account_id}' account " + f"due to some limitations. Please, check manually") + + jira.remediate_issue( + ticket_id=issue.jira_details.ticket, + comment=comment, + reassign=remediation_succeed, + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_remediated(ddb_table, issue) + except Exception: + logging.exception(f"Error occurred while updating domain '{domain_name}' policy " + f"in '{account_name} / {account_id}'") + else: + logging.debug(f"Skipping '{domain_name}' " + f"({retention_period - no_of_days_issue_created} days before remediation)") + + +if __name__ == "__main__": + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + parser = argparse.ArgumentParser() + parser.add_argument('--batch', action='store_true', help='Do not ask confirmation for remediation') + args = parser.parse_args() + + try: + class_object = CleanElasticSearchPolicyPermissions(config) + class_object.clean_elasticsearch_domain_policy_permissions(batch=args.batch) + except Exception: + logging.exception("Failed to clean Elasticsearch domain public policies") diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py new file mode 100644 index 00000000..4b74cfb4 --- /dev/null +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -0,0 +1,171 @@ +""" +Class to create Elasticsearch publicly accessible domain issue tickets. +""" +import sys +import logging + + +from library.logger import set_logging, add_cw_logging +from library.aws.utility import Account +from library.config import Config +from library.jiraoperations import JiraReporting, JiraOperations +from library.slack_utility import SlackNotification +from library.ddb_issues import IssueStatus, ESPublicAccessIssue +from library.ddb_issues import Operations as IssueOperations +from library.utility import SingletonInstance, SingletonInstanceException + + +class CreateElasticSearchPublicAccessDomainTickets(object): + """ Class to create elasticsearch publicly accessible issue tickets """ + def __init__(self, config): + self.config = config + + def attachment_name(self, account_id, domain_name): + return f"{account_id}_{domain_name}_{self.config.now.isoformat('T', 'seconds')}.json" + + def create_tickets_elasticsearch_public_access(self): + """ Class method to create jira tickets """ + table_name = self.config.esPublicAccess.ddb_table_name + + main_account = Account(region=self.config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(table_name) + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.esPublicAccess.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ESPublicAccessIssue) + for issue in issues: + domain_name = issue.issue_id + region = issue.issue_details.region + tags = issue.issue_details.tags + policy = issue.issue_details.policy + # issue has been already reported + if issue.timestamps.reported is not None: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} Elasticsearch publicly accessible domain '{domain_name}' issue") + + comment = (f"Closing {issue.status.value} Elasticsearch publicly accessible domain '{domain_name}' issue " + f"in '{account_name} / {account_id}' account, '{region}' region") + if issue.status == IssueStatus.Whitelisted: + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + label=IssueStatus.Whitelisted.value + ) + jira.close_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_closed(ddb_table, issue) + # issue.status != IssueStatus.Closed (should be IssueStatus.Open) + elif issue.timestamps.updated > issue.timestamps.reported: + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {domain_name}") + slack.report_issue( + msg=f"Elasticsearch publicly accessible domain '{domain_name}' issue is changed " + f"in '{account_name} / {account_id}' account, '{region}' region" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_updated(ddb_table, issue) + else: + logging.debug(f"No changes for '{domain_name}'") + # issue has not been reported yet + else: + logging.debug(f"Reporting Elasticsearch publicly accessible domain '{domain_name}' issue") + + owner = tags.get("owner", None) + bu = tags.get("bu", None) + product = tags.get("product", None) + + issue_description = ( + f"Elasticsearch domain allows unrestricted public access.\n\n" + f"*Risk*: High\n\n" + f"*Account Name*: {account_name}\n" + f"*Account ID*: {account_id}\n" + f"*Region*: {region}\n" + f"*Domain ID*: {domain_name}\n" + ) + + issue_description += JiraOperations.build_tags_table(tags) + + auto_remediation_date = (self.config.now + self.config.esPublicAccess.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + + issue_description += ( + f"*Recommendation*: " + f"Grant CloudFront OAI applicable permissions on domain " + f"or update domain permissions with VPC CIDRs ranges or ip addresses/ranges from " + f"[RFC1918|https://tools.ietf.org/html/rfc1918]. " + ) + + issue_summary = (f"Elasticsearch publicly accessible domain '{domain_name}' " + f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["public-elasticsearch-domains"], + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + # Adding domain policy json as attachment to Jira ticket. + jira.add_attachment(ticket_id=issue.jira_details.ticket, + filename=self.attachment_name(account_id, domain_name), + text=policy) + + issue.jira_details.owner = owner + issue.jira_details.business_unit = bu + issue.jira_details.product = product + + slack.report_issue( + msg=f"Discovered {issue_summary}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + + IssueOperations.set_status_reported(ddb_table, issue) + + +if __name__ == '__main__': + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + try: + obj = CreateElasticSearchPublicAccessDomainTickets(config) + obj.create_tickets_elasticsearch_public_access() + except Exception: + logging.exception("Failed to create Elasticsearch publicly accessible domain tickets") From 868922ac67abd8cddb53664b48a014054b2e6996 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 14 Jun 2019 15:19:07 +0530 Subject: [PATCH 041/193] Updated with Elasticsearch public access changes. Updated with Elasticsearch public access changes. --- hammer/library/aws/elasticsearch.py | 71 +++++++++++++++++-- .../clean_elasticsearch_policy_permissions.py | 3 +- 2 files changed, 69 insertions(+), 5 deletions(-) diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 6d9163a9..0d36f0ee 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -1,5 +1,6 @@ import json import logging +import pathlib from datetime import datetime, timezone from botocore.exceptions import ClientError @@ -66,6 +67,48 @@ def put_domain_policy(es_client, domain_name, policy): AccessPolicies=policy_json, ) + def retrieve_loggroup_arn(self, cw_client, domain_name): + """ + + :param cw_client: cloudwatch logs boto3 client + :param domain_name: Elasticsearch domain name + :return: + """ + log_groups = cw_client.describe_log_groups() + domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" + log_group_arn = None + for log_group in log_groups["logGroups"]: + log_group_name = log_group["logGroupName"] + if log_group_name == domain_log_group_name: + log_group_arn = log_group["arn"] + + if not log_group_arn: + cw_client.create_log_group(logGroupName=domain_log_group_name) + self.retrieve_loggroup_arn(cw_client, domain_name) + + return log_group_arn + + def set_domain_logging(self, es_client, cw_client, domain_name): + """ + + :param es_client: elastic search boto3 client + :param cw_client: cloudwatch logs boto3 client + :param domain_name: elastic search domain name + :return: + """ + log_group_arn = self.retrieve_loggroup_arn(cw_client, domain_name) + es_client.update_elasticsearch_domain_config( + DomainName=domain_name, + LogPublishingOptions={ + 'ES_APPLICATION_LOGS': + { + 'CloudWatchLogsLogGroupArn': log_group_arn, + 'Enabled': True + } + + } + ) + @classmethod def validate_access_policy(cls, policy_details): """ @@ -121,6 +164,7 @@ def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted self.is_logging = is_logging self.encrypted = encrypted self._policy = json.loads(policy) if policy else {} + self.backup_filename = pathlib.Path(f"{self.name}.json") self.tags = convert_tags(tags) @property @@ -168,13 +212,26 @@ def restrict_policy(self): """ restricted_policy = S3Operations.restrict_policy(self._policy) try: - ElasticSearchOperations.put_domain_policy(self.account.client(""), self.name, restricted_policy) + ElasticSearchOperations.put_domain_policy(self.account.client("es"), self.name, restricted_policy) except Exception: logging.exception(f"Failed to put {self.name} restricted policy") return False return True + def set_logging(self): + """ + + :return: + """ + try: + ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), self.name) + except Exception: + logging.exception(f"Failed to enable {self.name} logging") + return False + + return True + class ESDomainChecker: """ @@ -194,7 +251,7 @@ def get_domain(self, id): :return: `Elasticsearch Domain` by id """ for domain in self.domains: - if domain.id == id: + if domain.name == id: return domain return None @@ -242,8 +299,14 @@ def check(self, ids=None): logging_details = domain_detail.get("LogPublishingOptions") - if logging_details and logging_details["Options"]: - is_logging = True + if logging_details: + index_logs = logging_details.get("INDEX_SLOW_LOGS") + search_logs = logging_details.get("SEARCH_SLOW_LOGS") + error_logs = logging_details.get("ES_APPLICATION_LOGS") + if (index_logs and index_logs["Enable"]) \ + or (search_logs and search_logs["Enable"]) \ + or (error_logs and error_logs["Enable"]): + is_logging = True tags = es_client.list_tags(ARN=domain_arn)["TagList"] diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py index 08d97c94..d92f4365 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py @@ -75,6 +75,7 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): try: account = Account(id=account_id, name=account_name, + region=issue.issue_details.region, role_name=self.config.aws.role_name_reporting) if account.session is None: continue @@ -84,7 +85,7 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): domain_details = checker.get_domain(domain_name) if domain_details is None: logging.debug(f"Elasticsearch domain {domain_name} was removed by user") - elif not domain_details.public_by_policy: + elif not domain_details.public: logging.debug(f"Elasticsearch domain {domain_name} policy issue was remediated by user") else: if not batch and \ From c0bc00549329578297cde95efba3a08161560c6a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 14 Jun 2019 15:22:21 +0530 Subject: [PATCH 042/193] Revert "Updated with Elasticsearch public access changes." This reverts commit 868922ac67abd8cddb53664b48a014054b2e6996. --- hammer/library/aws/elasticsearch.py | 71 ++----------------- .../clean_elasticsearch_policy_permissions.py | 3 +- 2 files changed, 5 insertions(+), 69 deletions(-) diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 0d36f0ee..6d9163a9 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -1,6 +1,5 @@ import json import logging -import pathlib from datetime import datetime, timezone from botocore.exceptions import ClientError @@ -67,48 +66,6 @@ def put_domain_policy(es_client, domain_name, policy): AccessPolicies=policy_json, ) - def retrieve_loggroup_arn(self, cw_client, domain_name): - """ - - :param cw_client: cloudwatch logs boto3 client - :param domain_name: Elasticsearch domain name - :return: - """ - log_groups = cw_client.describe_log_groups() - domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" - log_group_arn = None - for log_group in log_groups["logGroups"]: - log_group_name = log_group["logGroupName"] - if log_group_name == domain_log_group_name: - log_group_arn = log_group["arn"] - - if not log_group_arn: - cw_client.create_log_group(logGroupName=domain_log_group_name) - self.retrieve_loggroup_arn(cw_client, domain_name) - - return log_group_arn - - def set_domain_logging(self, es_client, cw_client, domain_name): - """ - - :param es_client: elastic search boto3 client - :param cw_client: cloudwatch logs boto3 client - :param domain_name: elastic search domain name - :return: - """ - log_group_arn = self.retrieve_loggroup_arn(cw_client, domain_name) - es_client.update_elasticsearch_domain_config( - DomainName=domain_name, - LogPublishingOptions={ - 'ES_APPLICATION_LOGS': - { - 'CloudWatchLogsLogGroupArn': log_group_arn, - 'Enabled': True - } - - } - ) - @classmethod def validate_access_policy(cls, policy_details): """ @@ -164,7 +121,6 @@ def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted self.is_logging = is_logging self.encrypted = encrypted self._policy = json.loads(policy) if policy else {} - self.backup_filename = pathlib.Path(f"{self.name}.json") self.tags = convert_tags(tags) @property @@ -212,26 +168,13 @@ def restrict_policy(self): """ restricted_policy = S3Operations.restrict_policy(self._policy) try: - ElasticSearchOperations.put_domain_policy(self.account.client("es"), self.name, restricted_policy) + ElasticSearchOperations.put_domain_policy(self.account.client(""), self.name, restricted_policy) except Exception: logging.exception(f"Failed to put {self.name} restricted policy") return False return True - def set_logging(self): - """ - - :return: - """ - try: - ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), self.name) - except Exception: - logging.exception(f"Failed to enable {self.name} logging") - return False - - return True - class ESDomainChecker: """ @@ -251,7 +194,7 @@ def get_domain(self, id): :return: `Elasticsearch Domain` by id """ for domain in self.domains: - if domain.name == id: + if domain.id == id: return domain return None @@ -299,14 +242,8 @@ def check(self, ids=None): logging_details = domain_detail.get("LogPublishingOptions") - if logging_details: - index_logs = logging_details.get("INDEX_SLOW_LOGS") - search_logs = logging_details.get("SEARCH_SLOW_LOGS") - error_logs = logging_details.get("ES_APPLICATION_LOGS") - if (index_logs and index_logs["Enable"]) \ - or (search_logs and search_logs["Enable"]) \ - or (error_logs and error_logs["Enable"]): - is_logging = True + if logging_details and logging_details["Options"]: + is_logging = True tags = es_client.list_tags(ARN=domain_arn)["TagList"] diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py index d92f4365..08d97c94 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py @@ -75,7 +75,6 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): try: account = Account(id=account_id, name=account_name, - region=issue.issue_details.region, role_name=self.config.aws.role_name_reporting) if account.session is None: continue @@ -85,7 +84,7 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): domain_details = checker.get_domain(domain_name) if domain_details is None: logging.debug(f"Elasticsearch domain {domain_name} was removed by user") - elif not domain_details.public: + elif not domain_details.public_by_policy: logging.debug(f"Elasticsearch domain {domain_name} policy issue was remediated by user") else: if not batch and \ From 56c986bb363d52c29cbb99911395cbb9c74859e7 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 14 Jun 2019 15:23:28 +0530 Subject: [PATCH 043/193] Updated with Elasticsearch public access changes. Updated with Elasticsearch public access changes. --- hammer/library/aws/elasticsearch.py | 71 +++++++++++++++++-- .../clean_elasticsearch_policy_permissions.py | 3 +- 2 files changed, 69 insertions(+), 5 deletions(-) diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 6d9163a9..0d36f0ee 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -1,5 +1,6 @@ import json import logging +import pathlib from datetime import datetime, timezone from botocore.exceptions import ClientError @@ -66,6 +67,48 @@ def put_domain_policy(es_client, domain_name, policy): AccessPolicies=policy_json, ) + def retrieve_loggroup_arn(self, cw_client, domain_name): + """ + + :param cw_client: cloudwatch logs boto3 client + :param domain_name: Elasticsearch domain name + :return: + """ + log_groups = cw_client.describe_log_groups() + domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" + log_group_arn = None + for log_group in log_groups["logGroups"]: + log_group_name = log_group["logGroupName"] + if log_group_name == domain_log_group_name: + log_group_arn = log_group["arn"] + + if not log_group_arn: + cw_client.create_log_group(logGroupName=domain_log_group_name) + self.retrieve_loggroup_arn(cw_client, domain_name) + + return log_group_arn + + def set_domain_logging(self, es_client, cw_client, domain_name): + """ + + :param es_client: elastic search boto3 client + :param cw_client: cloudwatch logs boto3 client + :param domain_name: elastic search domain name + :return: + """ + log_group_arn = self.retrieve_loggroup_arn(cw_client, domain_name) + es_client.update_elasticsearch_domain_config( + DomainName=domain_name, + LogPublishingOptions={ + 'ES_APPLICATION_LOGS': + { + 'CloudWatchLogsLogGroupArn': log_group_arn, + 'Enabled': True + } + + } + ) + @classmethod def validate_access_policy(cls, policy_details): """ @@ -121,6 +164,7 @@ def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted self.is_logging = is_logging self.encrypted = encrypted self._policy = json.loads(policy) if policy else {} + self.backup_filename = pathlib.Path(f"{self.name}.json") self.tags = convert_tags(tags) @property @@ -168,13 +212,26 @@ def restrict_policy(self): """ restricted_policy = S3Operations.restrict_policy(self._policy) try: - ElasticSearchOperations.put_domain_policy(self.account.client(""), self.name, restricted_policy) + ElasticSearchOperations.put_domain_policy(self.account.client("es"), self.name, restricted_policy) except Exception: logging.exception(f"Failed to put {self.name} restricted policy") return False return True + def set_logging(self): + """ + + :return: + """ + try: + ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), self.name) + except Exception: + logging.exception(f"Failed to enable {self.name} logging") + return False + + return True + class ESDomainChecker: """ @@ -194,7 +251,7 @@ def get_domain(self, id): :return: `Elasticsearch Domain` by id """ for domain in self.domains: - if domain.id == id: + if domain.name == id: return domain return None @@ -242,8 +299,14 @@ def check(self, ids=None): logging_details = domain_detail.get("LogPublishingOptions") - if logging_details and logging_details["Options"]: - is_logging = True + if logging_details: + index_logs = logging_details.get("INDEX_SLOW_LOGS") + search_logs = logging_details.get("SEARCH_SLOW_LOGS") + error_logs = logging_details.get("ES_APPLICATION_LOGS") + if (index_logs and index_logs["Enable"]) \ + or (search_logs and search_logs["Enable"]) \ + or (error_logs and error_logs["Enable"]): + is_logging = True tags = es_client.list_tags(ARN=domain_arn)["TagList"] diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py index 08d97c94..d92f4365 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py @@ -75,6 +75,7 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): try: account = Account(id=account_id, name=account_name, + region=issue.issue_details.region, role_name=self.config.aws.role_name_reporting) if account.session is None: continue @@ -84,7 +85,7 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): domain_details = checker.get_domain(domain_name) if domain_details is None: logging.debug(f"Elasticsearch domain {domain_name} was removed by user") - elif not domain_details.public_by_policy: + elif not domain_details.public: logging.debug(f"Elasticsearch domain {domain_name} policy issue was remediated by user") else: if not batch and \ From f2cfa1958e4ac2aba387076fa7f874105b1dae0a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 14 Jun 2019 15:33:46 +0530 Subject: [PATCH 044/193] Revert "Updated with Elasticsearch public access changes." This reverts commit 56c986bb363d52c29cbb99911395cbb9c74859e7. --- hammer/library/aws/elasticsearch.py | 71 ++----------------- .../clean_elasticsearch_policy_permissions.py | 3 +- 2 files changed, 5 insertions(+), 69 deletions(-) diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 0d36f0ee..6d9163a9 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -1,6 +1,5 @@ import json import logging -import pathlib from datetime import datetime, timezone from botocore.exceptions import ClientError @@ -67,48 +66,6 @@ def put_domain_policy(es_client, domain_name, policy): AccessPolicies=policy_json, ) - def retrieve_loggroup_arn(self, cw_client, domain_name): - """ - - :param cw_client: cloudwatch logs boto3 client - :param domain_name: Elasticsearch domain name - :return: - """ - log_groups = cw_client.describe_log_groups() - domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" - log_group_arn = None - for log_group in log_groups["logGroups"]: - log_group_name = log_group["logGroupName"] - if log_group_name == domain_log_group_name: - log_group_arn = log_group["arn"] - - if not log_group_arn: - cw_client.create_log_group(logGroupName=domain_log_group_name) - self.retrieve_loggroup_arn(cw_client, domain_name) - - return log_group_arn - - def set_domain_logging(self, es_client, cw_client, domain_name): - """ - - :param es_client: elastic search boto3 client - :param cw_client: cloudwatch logs boto3 client - :param domain_name: elastic search domain name - :return: - """ - log_group_arn = self.retrieve_loggroup_arn(cw_client, domain_name) - es_client.update_elasticsearch_domain_config( - DomainName=domain_name, - LogPublishingOptions={ - 'ES_APPLICATION_LOGS': - { - 'CloudWatchLogsLogGroupArn': log_group_arn, - 'Enabled': True - } - - } - ) - @classmethod def validate_access_policy(cls, policy_details): """ @@ -164,7 +121,6 @@ def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted self.is_logging = is_logging self.encrypted = encrypted self._policy = json.loads(policy) if policy else {} - self.backup_filename = pathlib.Path(f"{self.name}.json") self.tags = convert_tags(tags) @property @@ -212,26 +168,13 @@ def restrict_policy(self): """ restricted_policy = S3Operations.restrict_policy(self._policy) try: - ElasticSearchOperations.put_domain_policy(self.account.client("es"), self.name, restricted_policy) + ElasticSearchOperations.put_domain_policy(self.account.client(""), self.name, restricted_policy) except Exception: logging.exception(f"Failed to put {self.name} restricted policy") return False return True - def set_logging(self): - """ - - :return: - """ - try: - ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), self.name) - except Exception: - logging.exception(f"Failed to enable {self.name} logging") - return False - - return True - class ESDomainChecker: """ @@ -251,7 +194,7 @@ def get_domain(self, id): :return: `Elasticsearch Domain` by id """ for domain in self.domains: - if domain.name == id: + if domain.id == id: return domain return None @@ -299,14 +242,8 @@ def check(self, ids=None): logging_details = domain_detail.get("LogPublishingOptions") - if logging_details: - index_logs = logging_details.get("INDEX_SLOW_LOGS") - search_logs = logging_details.get("SEARCH_SLOW_LOGS") - error_logs = logging_details.get("ES_APPLICATION_LOGS") - if (index_logs and index_logs["Enable"]) \ - or (search_logs and search_logs["Enable"]) \ - or (error_logs and error_logs["Enable"]): - is_logging = True + if logging_details and logging_details["Options"]: + is_logging = True tags = es_client.list_tags(ARN=domain_arn)["TagList"] diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py index d92f4365..08d97c94 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py @@ -75,7 +75,6 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): try: account = Account(id=account_id, name=account_name, - region=issue.issue_details.region, role_name=self.config.aws.role_name_reporting) if account.session is None: continue @@ -85,7 +84,7 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): domain_details = checker.get_domain(domain_name) if domain_details is None: logging.debug(f"Elasticsearch domain {domain_name} was removed by user") - elif not domain_details.public: + elif not domain_details.public_by_policy: logging.debug(f"Elasticsearch domain {domain_name} policy issue was remediated by user") else: if not batch and \ From d2de7711e40ae7f763d138fc5458c0a1a11f92db Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 14 Jun 2019 15:50:53 +0530 Subject: [PATCH 045/193] Updated with ES publicaccess issue changes. Updated with ES public access issue changes. --- hammer/library/aws/elasticsearch.py | 71 +++++++++++++++++-- .../clean_elasticsearch_policy_permissions.py | 3 +- 2 files changed, 69 insertions(+), 5 deletions(-) diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 6d9163a9..0d36f0ee 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -1,5 +1,6 @@ import json import logging +import pathlib from datetime import datetime, timezone from botocore.exceptions import ClientError @@ -66,6 +67,48 @@ def put_domain_policy(es_client, domain_name, policy): AccessPolicies=policy_json, ) + def retrieve_loggroup_arn(self, cw_client, domain_name): + """ + + :param cw_client: cloudwatch logs boto3 client + :param domain_name: Elasticsearch domain name + :return: + """ + log_groups = cw_client.describe_log_groups() + domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" + log_group_arn = None + for log_group in log_groups["logGroups"]: + log_group_name = log_group["logGroupName"] + if log_group_name == domain_log_group_name: + log_group_arn = log_group["arn"] + + if not log_group_arn: + cw_client.create_log_group(logGroupName=domain_log_group_name) + self.retrieve_loggroup_arn(cw_client, domain_name) + + return log_group_arn + + def set_domain_logging(self, es_client, cw_client, domain_name): + """ + + :param es_client: elastic search boto3 client + :param cw_client: cloudwatch logs boto3 client + :param domain_name: elastic search domain name + :return: + """ + log_group_arn = self.retrieve_loggroup_arn(cw_client, domain_name) + es_client.update_elasticsearch_domain_config( + DomainName=domain_name, + LogPublishingOptions={ + 'ES_APPLICATION_LOGS': + { + 'CloudWatchLogsLogGroupArn': log_group_arn, + 'Enabled': True + } + + } + ) + @classmethod def validate_access_policy(cls, policy_details): """ @@ -121,6 +164,7 @@ def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted self.is_logging = is_logging self.encrypted = encrypted self._policy = json.loads(policy) if policy else {} + self.backup_filename = pathlib.Path(f"{self.name}.json") self.tags = convert_tags(tags) @property @@ -168,13 +212,26 @@ def restrict_policy(self): """ restricted_policy = S3Operations.restrict_policy(self._policy) try: - ElasticSearchOperations.put_domain_policy(self.account.client(""), self.name, restricted_policy) + ElasticSearchOperations.put_domain_policy(self.account.client("es"), self.name, restricted_policy) except Exception: logging.exception(f"Failed to put {self.name} restricted policy") return False return True + def set_logging(self): + """ + + :return: + """ + try: + ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), self.name) + except Exception: + logging.exception(f"Failed to enable {self.name} logging") + return False + + return True + class ESDomainChecker: """ @@ -194,7 +251,7 @@ def get_domain(self, id): :return: `Elasticsearch Domain` by id """ for domain in self.domains: - if domain.id == id: + if domain.name == id: return domain return None @@ -242,8 +299,14 @@ def check(self, ids=None): logging_details = domain_detail.get("LogPublishingOptions") - if logging_details and logging_details["Options"]: - is_logging = True + if logging_details: + index_logs = logging_details.get("INDEX_SLOW_LOGS") + search_logs = logging_details.get("SEARCH_SLOW_LOGS") + error_logs = logging_details.get("ES_APPLICATION_LOGS") + if (index_logs and index_logs["Enable"]) \ + or (search_logs and search_logs["Enable"]) \ + or (error_logs and error_logs["Enable"]): + is_logging = True tags = es_client.list_tags(ARN=domain_arn)["TagList"] diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py index 08d97c94..d92f4365 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py @@ -75,6 +75,7 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): try: account = Account(id=account_id, name=account_name, + region=issue.issue_details.region, role_name=self.config.aws.role_name_reporting) if account.session is None: continue @@ -84,7 +85,7 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): domain_details = checker.get_domain(domain_name) if domain_details is None: logging.debug(f"Elasticsearch domain {domain_name} was removed by user") - elif not domain_details.public_by_policy: + elif not domain_details.public: logging.debug(f"Elasticsearch domain {domain_name} policy issue was remediated by user") else: if not batch and \ From 416727407ae42fb6722f78f6df094cbc457fa5ac Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 14 Jun 2019 16:11:28 +0530 Subject: [PATCH 046/193] Added Elasticsearch logging issue changes. Added Elasticsearch logging issue changes. --- deployment/build_packages.sh | 2 +- deployment/cf-templates/ddb.json | 31 ++ deployment/cf-templates/identification.json | 70 +++- .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 6 + ...be_elasticsearch_domains_logging_issues.py | 90 +++++ ...sc_elasticsearch_domains_logging_issues.py | 36 ++ hammer/library/aws/elasticsearch.py | 324 ++++++++++++++++++ hammer/library/config.py | 3 + hammer/library/ddb_issues.py | 3 + .../clean_elasticsearch_domain_logging.py | 151 ++++++++ ...sticsearch_domain_logging_issue_tickets.py | 165 +++++++++ 12 files changed, 880 insertions(+), 5 deletions(-) create mode 100644 hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py create mode 100644 hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/initiate_to_desc_elasticsearch_domains_logging_issues.py create mode 100644 hammer/library/aws/elasticsearch.py create mode 100644 hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py create mode 100644 hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index 2e00c69c..9b913fb1 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,7 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api" +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api elasticsearch-domain-logging-issues-identification" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 9b55f4d5..b9ba3d7d 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -480,6 +480,37 @@ }, "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} } + }, + "DynamoDBESLoggingRequests": { + "Type": "AWS::DynamoDB::Table", + "DependsOn": ["DynamoDBCredentials", "DynamoDBSQSPublicPolicy"], + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", + "AttributeType": "S" + } + ], + "KeySchema": [ + { + "AttributeName": "account_id", + "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "10", + "WriteCapacityUnits": "2" + }, + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "es-domain-logging" ] ]} + } } } } diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index d83355db..72984fb7 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,7 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", - "SourceIdentificationAMIPublicAccess" + "SourceIdentificationAMIPublicAccess", + "SourceIdentificationElasticSearchLogging", ] }, { @@ -92,7 +93,11 @@ }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" - } + }, + "SourceIdentificationElasticSearchLogging":{ + "dafault": "Relative path to Elasticsearch domain logging sources" + }, + } } }, @@ -188,7 +193,11 @@ "SourceIdentificationRDSEncryption": { "Type": "String", "Default": "rds-unencrypted-instance-identification.zip" - } + }, + "SourceIdentificationElasticSearchLogging": { + "Type": "String", + "Default": "elasticsearch-domain-logging-issues-identification.zip" + }, }, "Conditions": { "LambdaSubnetsEmpty": { @@ -245,6 +254,9 @@ "IdentificationMetricRDSEncryptionError": { "value": "RDSEncryptionError" }, + "IdentificationMetricESLoggingError": { + "value": "ESLoggingError" + }, "SNSDisplayNameSecurityGroups": { "value": "describe-security-groups-sns" }, @@ -323,6 +335,12 @@ "SNSTopicNameRDSEncryption": { "value": "describe-rds-encryption-lambda" }, + "SNSDisplayNameESLogging": { + "value": "describe-es-logging-sns" + }, + "SNSTopicNameESLogging": { + "value": "describe-es-logging-lambda" + }, "LogsForwarderLambdaFunctionName": { "value": "logs-forwarder" }, @@ -406,6 +424,12 @@ }, "IdentifyRDSEncryptionLambdaFunctionName": { "value": "describe-rds-encryption" + }, + "InitiateESLoggingLambdaFunctionName": { + "value": "initiate-elasticsearch-logging" + }, + "IdentifyESLoggingLambdaFunctionName": { + "value": "describe-elasticsearch-logging" } } }, @@ -1098,6 +1122,46 @@ "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } + }, + "StackEvaluateESLogging": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationElasticSearchLogging" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify Elasticsearch domain logging issues.", + "EvaluateLambdaDescription": "Lambda function to describe Elasticsearch domain logging issues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateESLoggingLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyESLoggingLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_elasticsearch_domains_logging_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_elasticsearch_domains_logging_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Elasticsearch domain logging evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationESLogging"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameESLogging", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameESLogging", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } + } } }, "Outputs": { diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 1a5b7b6f..50960d62 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -16,7 +16,8 @@ resource "aws_cloudformation_stack" "identification" { "aws_s3_bucket_object.ami-public-access-issues-identification", "aws_s3_bucket_object.sqs-public-policy-identification", "aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification", - "aws_s3_bucket_object.rds-unencrypted-instance-identification" + "aws_s3_bucket_object.rds-unencrypted-instance-identification", + "aws_s3_bucket_object.elasticsearch-domain-logging-issues-identification" ] tags = "${var.tags}" @@ -44,6 +45,7 @@ resource "aws_cloudformation_stack" "identification" { SourceIdentificationSQSPublicPolicy = "${aws_s3_bucket_object.sqs-public-policy-identification.id}" SourceIdentificationS3Encryption = "${aws_s3_bucket_object.s3-unencrypted-bucket-issues-identification.id}" SourceIdentificationRDSEncryption = "${aws_s3_bucket_object.rds-unencrypted-instance-identification.id}" + SourceIdentificationElasticSearchLogging = "${aws_s3_bucket_object.elasticsearch-domain-logging-issues-identification.id}" } template_url = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-cfn.id}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index e5658577..0b52ef08 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -96,3 +96,9 @@ resource "aws_s3_bucket_object" "rds-unencrypted-instance-identification" { key = "lambda/${format("rds-unencrypted-instance-identification-%s.zip", "${md5(file("${path.module}/../../../packages/rds-unencrypted-instance-identification.zip"))}")}" source = "${path.module}/../../../packages/rds-unencrypted-instance-identification.zip" } + +resource "aws_s3_bucket_object" "elasticsearch-domain-logging-issues-identification" { + bucket = "${var.s3bucket}" + key = "lambda/${format("elasticsearch-domain-logging-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/elasticsearch-domain-logging-issues-identification.zip"))}")}" + source = "${path.module}/../../../packages/elasticsearch-domain-logging-issues-identification.zip" +} \ No newline at end of file diff --git a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py new file mode 100644 index 00000000..eccbd677 --- /dev/null +++ b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py @@ -0,0 +1,90 @@ +import json +import logging + + +from library.logger import set_logging +from library.config import Config +from library.aws.elasticsearch import ESDomainChecker +from library.aws.utility import Account +from library.ddb_issues import IssueStatus, ESPublicAccessIssue +from library.ddb_issues import Operations as IssueOperations +from library.aws.utility import DDB, Sns + + +def lambda_handler(event, context): + """ Lambda handler to evaluate Elasticsearch publicly accessible domains """ + set_logging(level=logging.INFO) + + try: + payload = json.loads(event["Records"][0]["Sns"]["Message"]) + account_id = payload['account_id'] + account_name = payload['account_name'] + # get the last region from the list to process + region = payload['regions'].pop() + # if request_id is present in payload then this lambda was called from the API + request_id = payload.get('request_id', None) + except Exception: + logging.exception(f"Failed to parse event\n{event}") + return + + try: + config = Config() + + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(config.esPublicAccess.ddb_table_name) + + account = Account(id=account_id, + name=account_name, + region=region, + role_name=config.aws.role_name_identification) + if account.session is None: + return + + logging.debug(f"Checking for Elasticsearch publicly accessible domains in {account}") + + # existing open issues for account to check if resolved + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, ESPublicAccessIssue) + # make dictionary for fast search by id + # and filter by current region + open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} + logging.debug(f"Elasticsearch publicly accessible domains in DDB:\n{open_issues.keys()}") + + checker = ESDomainChecker(account=account) + if checker.check(): + for domain in checker.domains: + if domain.public: + issue = ESPublicAccessIssue(account_id, domain.name) + issue.issue_details.region = domain.account.region + issue.issue_details.id = domain.id + issue.issue_details.arn = domain.arn + issue.issue_details.tags = domain.tags + issue.issue_details.policy = domain.policy + if config.esPublicAccess.in_whitelist(account_id, domain.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {domain.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(domain.name, None) + + logging.debug(f"Elasticsearch publicly accessible domains in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated Elasticsearch domains + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + if request_id: + api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) + DDB.track_progress(api_table, request_id) + except Exception: + logging.exception(f"Failed to check Elasticsearch publicly accessible domains " + f"in '{region}' for '{account_id} ({account_name})'") + + # push SNS messages until the list with regions to check is empty + if len(payload['regions']) > 0: + try: + Sns.publish(payload["sns_arn"], payload) + except Exception: + logging.exception("Failed to chain Elasticsearch publicly accessible domains checking") + + logging.debug(f"Checked Elasticsearch publicly accessible domains in '{region}' for '{account_id} ({account_name})'") diff --git a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/initiate_to_desc_elasticsearch_domains_logging_issues.py b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/initiate_to_desc_elasticsearch_domains_logging_issues.py new file mode 100644 index 00000000..f097188e --- /dev/null +++ b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/initiate_to_desc_elasticsearch_domains_logging_issues.py @@ -0,0 +1,36 @@ +import os +import logging + + +from library.logger import set_logging +from library.config import Config +from library.aws.utility import Sns + + +def lambda_handler(event, context): + """ Lambda handler to initiate to find publicly accessible elasticsearch domains """ + set_logging(level=logging.INFO) + logging.debug("Initiating publicly accessible Elasticsearch domains checking") + + try: + sns_arn = os.environ["SNS_ARN"] + config = Config() + + if not config.esPublicAccess.enabled: + logging.debug("Elasticsearch publicly accessible domains checking disabled") + return + + logging.debug("Iterating each account to initiate Elasticsearch publicly accessible domains checking") + for account_id, account_name in config.esPublicAccess.accounts.items(): + payload = {"account_id": account_id, + "account_name": account_name, + "regions": config.aws.regions, + "sns_arn": sns_arn + } + logging.debug(f"Initiating Elasticsearch publicly accessible domains checking for '{account_name}'") + Sns.publish(sns_arn, payload) + except Exception: + logging.exception("Error occurred while initiation of Elasticsearch publicly accessible domains checking") + return + + logging.debug("Elasticsearch publicly accessible domains checking initiation done") diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py new file mode 100644 index 00000000..0d36f0ee --- /dev/null +++ b/hammer/library/aws/elasticsearch.py @@ -0,0 +1,324 @@ +import json +import logging +import pathlib + +from datetime import datetime, timezone +from botocore.exceptions import ClientError +from collections import namedtuple +from library.utility import timeit +from library.utility import jsonDumps +from library.aws.utility import convert_tags +from library.aws.s3 import S3Operations + +# structure which describes Elastic search domains +ElasticSearchDomain_Details = namedtuple('ElasticSearchDomain', [ + # domain name + 'domain_name', + # domain arn + 'domain_arn', + # vpc_id + 'vpc_id' +]) + + +class ElasticSearchOperations: + @classmethod + @timeit + def get_elasticsearch_details_of_sg_associated(cls, elasticsearch_client, group_id): + """ Retrieve elastic search details meta data with security group attached + + :param elasticsearch_client: boto3 elastic search client + :param group_id: security group id + + :return: list with elastic search details + """ + # describe elastic search domain details with security group attached. + domains_list = [] + + elasticsearch_response = elasticsearch_client.list_domain_names() + for domain in elasticsearch_response["DomainNames"]: + domain_name = domain["DomainName"] + domain_details = elasticsearch_client.describe_elasticsearch_domain( + DomainName=domain_name + )["DomainStatus"] + if group_id in str(domain_details): + domains_list.append(ElasticSearchDomain_Details( + domain_name=domain_name, + domain_arn=domain_details["ARN"], + vpc_id=domain_details["VPCOptions"]["VPCId"] + )) + + return domains_list + + @staticmethod + def put_domain_policy(es_client, domain_name, policy): + """ + Replaces a policy on a domain. If the domain already has a policy, the one in this request completely replaces it. + + :param es_client: Elasticsearch boto3 client + :param domain_name: Elasticsearch domain where to update policy on + :param policy: `dict` or `str` with policy. `Dict` will be transformed to string using pretty json.dumps(). + + :return: nothing + """ + policy_json = jsonDumps(policy) if isinstance(policy, dict) else policy + es_client.update_elasticsearch_domain_config( + DomainName=domain_name, + AccessPolicies=policy_json, + ) + + def retrieve_loggroup_arn(self, cw_client, domain_name): + """ + + :param cw_client: cloudwatch logs boto3 client + :param domain_name: Elasticsearch domain name + :return: + """ + log_groups = cw_client.describe_log_groups() + domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" + log_group_arn = None + for log_group in log_groups["logGroups"]: + log_group_name = log_group["logGroupName"] + if log_group_name == domain_log_group_name: + log_group_arn = log_group["arn"] + + if not log_group_arn: + cw_client.create_log_group(logGroupName=domain_log_group_name) + self.retrieve_loggroup_arn(cw_client, domain_name) + + return log_group_arn + + def set_domain_logging(self, es_client, cw_client, domain_name): + """ + + :param es_client: elastic search boto3 client + :param cw_client: cloudwatch logs boto3 client + :param domain_name: elastic search domain name + :return: + """ + log_group_arn = self.retrieve_loggroup_arn(cw_client, domain_name) + es_client.update_elasticsearch_domain_config( + DomainName=domain_name, + LogPublishingOptions={ + 'ES_APPLICATION_LOGS': + { + 'CloudWatchLogsLogGroupArn': log_group_arn, + 'Enabled': True + } + + } + ) + + @classmethod + def validate_access_policy(cls, policy_details): + """ + + :param policy_details: + :return: + """ + public_policy = False + for statement in policy_details.get("Statement", []): + effect = statement['Effect'] + principal = statement.get('Principal', {}) + not_principal = statement.get('NotPrincipal', None) + condition = statement.get('Condition', None) + suffix = "/0" + # check both `Principal` - `{"AWS": "*"}` and `"*"` + # and condition (if exists) to be restricted (not "0.0.0.0/0") + if effect == "Allow" and \ + (principal == "*" or principal.get("AWS") == "*"): + if condition is not None: + if suffix in str(condition.get("IpAddress")): + return True + else: + return True + if effect == "Allow" and \ + not_principal is not None: + # TODO: it is not recommended to use `Allow` with `NotPrincipal`, need to write proper check for such case + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_notprincipal.html + logging.error(f"TODO: is this statement public???\n{statement}") + return False + + return public_policy + + +class ESDomainDetails(object): + """ + Basic class for ElasticSearch domain details. + + """ + + def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted=None, policy=None): + """ + :param account: `Account` instance where ECS task definition is present + + :param name: name of the task definition + :param arn: arn of the task definition + :param arn: tags of task definition. + :param is_logging: logging enabled or not. + """ + self.account = account + self.name = name + self.id = id + self.arn = arn + self.is_logging = is_logging + self.encrypted = encrypted + self._policy = json.loads(policy) if policy else {} + self.backup_filename = pathlib.Path(f"{self.name}.json") + self.tags = convert_tags(tags) + + @property + def policy(self): + """ + :return: pretty formatted string with S3 bucket policy + """ + return jsonDumps(self._policy) + + @property + def public(self): + """ + :return: boolean, True - if Elasticsearch domain policy allows public access + False - otherwise + """ + return ElasticSearchOperations.validate_access_policy(self._policy) + + def backup_policy_s3(self, s3_client, bucket): + """ + Backup Elasticsearch policy json to S3. + + :param s3_client: S3 boto3 client + :param bucket: S3 bucket name where to put backup of S3 bucket policy + + :return: S3 path (without bucket name) to saved object with elasticsearch domain policy backup + """ + timestamp = datetime.now(timezone.utc).isoformat('T', 'seconds') + path = (f"queue_policies/" + f"{self.account.id}/" + f"{self.backup_filename.stem}_{timestamp}" + f"{self.backup_filename.suffix}") + if S3Operations.object_exists(s3_client, bucket, path): + raise Exception(f"s3://{bucket}/{path} already exists") + S3Operations.put_object(s3_client, bucket, path, self.policy) + return path + + def restrict_policy(self): + """ + Restrict and replace current policy on domain. + + :return: nothing + + .. note:: This keeps self._policy unchanged. + You need to recheck Elasticsearch domain policy to ensure that it was really restricted. + """ + restricted_policy = S3Operations.restrict_policy(self._policy) + try: + ElasticSearchOperations.put_domain_policy(self.account.client("es"), self.name, restricted_policy) + except Exception: + logging.exception(f"Failed to put {self.name} restricted policy") + return False + + return True + + def set_logging(self): + """ + + :return: + """ + try: + ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), self.name) + except Exception: + logging.exception(f"Failed to enable {self.name} logging") + return False + + return True + + +class ESDomainChecker: + """ + Basic class for checking Elasticsearch unencrypted and logging issues in account/region. + Encapsulates discovered Elasticsearch domains. + """ + + def __init__(self, account): + """ + :param account: `Account` instance with Elasticsearch domains to check + """ + self.account = account + self.domains = [] + + def get_domain(self, id): + """ + :return: `Elasticsearch Domain` by id + """ + for domain in self.domains: + if domain.name == id: + return domain + return None + + def check(self, ids=None): + """ + Walk through Elasticsearch domains in the account/region and put them to `self.domains`. + + :param ids: list with Elasticsearch domain ids to check, if it is not supplied - all Elasticsearch domains must be checked + + :return: boolean. True - if check was successful, + False - otherwise + """ + domain_details = [] + try: + es_client = self.account.client("es") + if ids is None: + ids = [] + domain_names_list = es_client.list_domain_names()["DomainNames"] + for domain_name in domain_names_list: + ids.append(domain_name["DomainName"]) + + if ids is not None: + domain_details = es_client.describe_elasticsearch_domains(DomainNames=ids)["DomainStatusList"] + + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(ec2:{err.operation_name})") + else: + logging.exception(f"Failed to describe elasticsearch domains in {self.account}") + return False + + domain_encrypted = False + is_logging = False + for domain_detail in domain_details: + domain_name = domain_detail["DomainName"] + domain_id = domain_detail["DomainId"] + domain_arn = domain_detail["ARN"] + encryption_at_rest = domain_detail.get("EncryptionAtRestOptions") + node_to_node_encryption = domain_detail.get("NodeToNodeEncryptionOptions") + if encryption_at_rest and encryption_at_rest["Enabled"]: + domain_encrypted = True + elif node_to_node_encryption and node_to_node_encryption["Enabled"]: + domain_encrypted = True + + logging_details = domain_detail.get("LogPublishingOptions") + + if logging_details: + index_logs = logging_details.get("INDEX_SLOW_LOGS") + search_logs = logging_details.get("SEARCH_SLOW_LOGS") + error_logs = logging_details.get("ES_APPLICATION_LOGS") + if (index_logs and index_logs["Enable"]) \ + or (search_logs and search_logs["Enable"]) \ + or (error_logs and error_logs["Enable"]): + is_logging = True + + tags = es_client.list_tags(ARN=domain_arn)["TagList"] + + access_policy = domain_detail.get("AccessPolicies") + + domain = ESDomainDetails(self.account, + name=domain_name, + id=domain_id, + arn=domain_arn, + tags=tags, + is_logging=is_logging, + encrypted=domain_encrypted, + policy=access_policy) + self.domains.append(domain) + return True \ No newline at end of file diff --git a/hammer/library/config.py b/hammer/library/config.py index 504f1a1d..21ddd61f 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -66,6 +66,9 @@ def __init__(self, # AMI public access issue config self.publicAMIs = ModuleConfig(self._config, "ec2_public_ami") + # Elasticsearch domain logging issue config + self.esLogging = ModuleConfig(self._config, "es_domain_logging") + self.bu_list = self._config.get("bu_list", []) self.whitelisting_procedure_url = self._config.get("whitelisting_procedure_url", None) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index d9ae7de2..0108e56f 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -238,6 +238,9 @@ def __init__(self, *args): super().__init__(*args) +class ESLoggingIssue(Issue): + def __init__(self, *args): + super().__init__(*args) class Operations(object): @staticmethod def find(ddb_table, issue): diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py new file mode 100644 index 00000000..5cf107b4 --- /dev/null +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py @@ -0,0 +1,151 @@ +""" +Class to remediate ElasticSearch domain logging issues. +""" +import sys +import logging +import argparse + + +from library.logger import set_logging, add_cw_logging +from library.config import Config +from library.jiraoperations import JiraReporting +from library.slack_utility import SlackNotification +from library.ddb_issues import Operations as IssueOperations +from library.ddb_issues import IssueStatus, ESLoggingIssue +from library.aws.elasticsearch import ESDomainChecker +from library.aws.utility import Account +from library.utility import confirm +from library.utility import SingletonInstance, SingletonInstanceException + + +class CleanElasticSearchDomainLogging: + """ Class to remediate ElasticSearch domain logging issues """ + def __init__(self, config): + self.config = config + + def clean_elasticsearch_domain_domain_logging_issues(self, batch=False): + """ Class method to clean ElasticSearch domains which are violating aws best practices """ + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(self.config.esLogging.ddb_table_name) + backup_bucket = config.aws.s3_backup_bucket + + retention_period = self.config.esLogging.remediation_retention_period + + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.esLogging.remediation_accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_open_issues(ddb_table, account_id, ESLoggingIssue) + for issue in issues: + domain_name = issue.issue_id + + in_whitelist = self.config.esLogging.in_whitelist(account_id, domain_name) + #in_fixlist = self.config.esLogging.in_fixnow(account_id, domain_name) + + if in_whitelist: + logging.debug(f"Skipping {domain_name} (in whitelist)") + + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + label=IssueStatus.Whitelisted.value + ) + continue + # if not in_fixlist: + # logging.debug(f"Skipping {domain_name} (not in fixlist)") + # continue + + if issue.timestamps.reported is None: + logging.debug(f"Skipping '{domain_name}' (was not reported)") + continue + + if issue.timestamps.remediated is not None: + logging.debug(f"Skipping {domain_name} (has been already remediated)") + continue + + updated_date = issue.timestamp_as_datetime + no_of_days_issue_created = (self.config.now - updated_date).days + + if no_of_days_issue_created >= retention_period: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + try: + account = Account(id=account_id, + name=account_name, + region=issue.issue_details.region, + role_name=self.config.aws.role_name_reporting) + if account.session is None: + continue + + checker = ESDomainChecker(account=account) + checker.check(ids=[domain_name]) + domain_details = checker.get_domain(domain_name) + if domain_details is None: + logging.debug(f"Elasticsearch domain {domain_name} was removed by user") + elif not domain_details.is_logging: + logging.debug(f"Elasticsearch domain {domain_name} logging issue was remediated by user") + else: + if not batch and \ + not confirm(f"Do you want to remediate elasticsearch domain '{domain_name}' logging issue", False): + continue + + logging.debug(f"Remediating '{domain_name}' logging issue") + + remediation_succeed = True + if domain_details.set_logging(): + comment = (f"Domain '{domain_name}' logging issue " + f"in '{account_name} / {account_id}' account " + f"was remediated by hammer") + else: + remediation_succeed = False + comment = (f"Failed to remediate elasticsearch domain '{domain_name}' logging issue " + f"in '{account_name} / {account_id}' account " + f"due to some limitations. Please, check manually") + + jira.remediate_issue( + ticket_id=issue.jira_details.ticket, + comment=comment, + reassign=remediation_succeed, + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_remediated(ddb_table, issue) + except Exception: + logging.exception(f"Error occurred while updating domain '{domain_name}' logging " + f"in '{account_name} / {account_id}'") + else: + logging.debug(f"Skipping '{domain_name}' " + f"({retention_period - no_of_days_issue_created} days before remediation)") + + +if __name__ == "__main__": + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + parser = argparse.ArgumentParser() + parser.add_argument('--batch', action='store_true', help='Do not ask confirmation for remediation') + args = parser.parse_args() + + try: + class_object = CleanElasticSearchDomainLogging(config) + class_object.clean_elasticsearch_domain_domain_logging_issues(batch=args.batch) + except Exception: + logging.exception("Failed to clean Elasticsearch domain logging issues") diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py new file mode 100644 index 00000000..32af1a8e --- /dev/null +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py @@ -0,0 +1,165 @@ +""" +Class to create Elasticsearch domain logging issue tickets. +""" +import sys +import logging + + +from library.logger import set_logging, add_cw_logging +from library.aws.utility import Account +from library.config import Config +from library.jiraoperations import JiraReporting, JiraOperations +from library.slack_utility import SlackNotification +from library.ddb_issues import IssueStatus, ESLoggingIssue +from library.ddb_issues import Operations as IssueOperations +from library.utility import SingletonInstance, SingletonInstanceException + + +class CreateElasticSearchDomainLoggingIssueTickets(object): + """ Class to create elasticsearch domain logging issue tickets """ + def __init__(self, config): + self.config = config + + def create_tickets_elasticsearch_domain_logging(self): + """ Class method to create jira tickets """ + table_name = self.config.esLogging.ddb_table_name + + main_account = Account(region=self.config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(table_name) + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.esLogging.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ESLoggingIssue) + for issue in issues: + domain_name = issue.issue_id + region = issue.issue_details.region + tags = issue.issue_details.tags + + # issue has been already reported + if issue.timestamps.reported is not None: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} Elasticsearch domain logging " + f"'{domain_name}' issue") + + comment = (f"Closing {issue.status.value} Elasticsearch domain logging '{domain_name}' issue " + f"in '{account_name} / {account_id}' account, '{region}' region") + if issue.status == IssueStatus.Whitelisted: + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + label=IssueStatus.Whitelisted.value + ) + jira.close_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_closed(ddb_table, issue) + # issue.status != IssueStatus.Closed (should be IssueStatus.Open) + elif issue.timestamps.updated > issue.timestamps.reported: + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {domain_name}") + slack.report_issue( + msg=f"Elasticsearch domain logging '{domain_name}' issue is changed " + f"in '{account_name} / {account_id}' account, '{region}' region" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_updated(ddb_table, issue) + else: + logging.debug(f"No changes for '{domain_name}'") + # issue has not been reported yet + else: + logging.debug(f"Reporting Elasticsearch domain logging '{domain_name}' issue") + + owner = tags.get("owner", None) + bu = tags.get("bu", None) + product = tags.get("product", None) + + issue_description = ( + f"Elasticsearch domain logging is not enabled.\n\n" + f"*Risk*: High\n\n" + f"*Account Name*: {account_name}\n" + f"*Account ID*: {account_id}\n" + f"*Region*: {region}\n" + f"*Domain ID*: {domain_name}\n" + ) + + issue_description += JiraOperations.build_tags_table(tags) + + auto_remediation_date = (self.config.now + self.config.esLogging.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + + issue_description += ( + f"*Recommendation*: " + f"Enable logging for Elasticesearch domain. " + f"Elasticsearch logs contain information about errors and warnings raised " + f"during the operation of the service and can be useful for troubleshooting.. " + ) + + issue_summary = (f"Elasticsearch domain logging issue '{domain_name}' " + f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + try: + response = jira.add_issue( + issue_summary=issue_summary, issue_description=issue_description, + priority="Major", labels=["es-domain-logging"], + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + except Exception: + logging.exception("Failed to create jira ticket") + continue + + if response is not None: + issue.jira_details.ticket = response.ticket_id + issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + + issue.jira_details.owner = owner + issue.jira_details.business_unit = bu + issue.jira_details.product = product + + slack.report_issue( + msg=f"Discovered {issue_summary}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + + IssueOperations.set_status_reported(ddb_table, issue) + + +if __name__ == '__main__': + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + try: + obj = CreateElasticSearchDomainLoggingIssueTickets(config) + obj.create_tickets_elasticsearch_domain_logging() + except Exception: + logging.exception("Failed to create Elasticsearch domain logging issue tickets") From 8599a3d548925ae751fd38c99d82700dd1d3d667 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 14 Jun 2019 16:19:46 +0530 Subject: [PATCH 047/193] Updated with deployment issue changes. Updated with deployment issue changes. --- deployment/cf-templates/identification.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 72984fb7..1348c91c 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -28,7 +28,7 @@ "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", "SourceIdentificationAMIPublicAccess", - "SourceIdentificationElasticSearchLogging", + "SourceIdentificationElasticSearchLogging" ] }, { @@ -96,7 +96,7 @@ }, "SourceIdentificationElasticSearchLogging":{ "dafault": "Relative path to Elasticsearch domain logging sources" - }, + } } } @@ -197,7 +197,7 @@ "SourceIdentificationElasticSearchLogging": { "Type": "String", "Default": "elasticsearch-domain-logging-issues-identification.zip" - }, + } }, "Conditions": { "LambdaSubnetsEmpty": { From 0222a1491f88dcf42ae4a9ac0fda0da961e0a707 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 18 Jun 2019 12:25:21 +0530 Subject: [PATCH 048/193] Updated with ES domain logging issue changes. Updated with ES domain logging issue changes. --- ...be_elasticsearch_domains_logging_issues.py | 28 ++++---- ...sc_elasticsearch_domains_logging_issues.py | 18 ++--- hammer/library/aws/elasticsearch.py | 70 ++++++++++++++----- .../clean_elasticsearch_domain_logging.py | 4 -- 4 files changed, 74 insertions(+), 46 deletions(-) diff --git a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py index eccbd677..0ba5f163 100644 --- a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py +++ b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py @@ -6,13 +6,13 @@ from library.config import Config from library.aws.elasticsearch import ESDomainChecker from library.aws.utility import Account -from library.ddb_issues import IssueStatus, ESPublicAccessIssue +from library.ddb_issues import IssueStatus, ESLoggingIssue from library.ddb_issues import Operations as IssueOperations from library.aws.utility import DDB, Sns def lambda_handler(event, context): - """ Lambda handler to evaluate Elasticsearch publicly accessible domains """ + """ Lambda handler to evaluate Elasticsearch domains logging issue """ set_logging(level=logging.INFO) try: @@ -31,7 +31,7 @@ def lambda_handler(event, context): config = Config() main_account = Account(region=config.aws.region) - ddb_table = main_account.resource("dynamodb").Table(config.esPublicAccess.ddb_table_name) + ddb_table = main_account.resource("dynamodb").Table(config.esLogging.ddb_table_name) account = Account(id=account_id, name=account_name, @@ -40,26 +40,26 @@ def lambda_handler(event, context): if account.session is None: return - logging.debug(f"Checking for Elasticsearch publicly accessible domains in {account}") + logging.debug(f"Checking for Elasticsearch domains logging issue in {account}") # existing open issues for account to check if resolved - open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, ESPublicAccessIssue) + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, ESLoggingIssue) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} - logging.debug(f"Elasticsearch publicly accessible domains in DDB:\n{open_issues.keys()}") + logging.debug(f"Elasticsearch domains logging issue in DDB:\n{open_issues.keys()}") checker = ESDomainChecker(account=account) if checker.check(): for domain in checker.domains: - if domain.public: - issue = ESPublicAccessIssue(account_id, domain.name) + if not domain.is_logging: + issue = ESLoggingIssue(account_id, domain.name) issue.issue_details.region = domain.account.region issue.issue_details.id = domain.id issue.issue_details.arn = domain.arn issue.issue_details.tags = domain.tags - issue.issue_details.policy = domain.policy - if config.esPublicAccess.in_whitelist(account_id, domain.name): + + if config.esLogging.in_whitelist(account_id, domain.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open @@ -69,7 +69,7 @@ def lambda_handler(event, context): # as we already checked it open_issues.pop(domain.name, None) - logging.debug(f"Elasticsearch publicly accessible domains in DDB:\n{open_issues.keys()}") + logging.debug(f"Elasticsearch domains logging issue in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated Elasticsearch domains for issue in open_issues.values(): IssueOperations.set_status_resolved(ddb_table, issue) @@ -77,7 +77,7 @@ def lambda_handler(event, context): api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) DDB.track_progress(api_table, request_id) except Exception: - logging.exception(f"Failed to check Elasticsearch publicly accessible domains " + logging.exception(f"Failed to check Elasticsearch domains logging issue " f"in '{region}' for '{account_id} ({account_name})'") # push SNS messages until the list with regions to check is empty @@ -85,6 +85,6 @@ def lambda_handler(event, context): try: Sns.publish(payload["sns_arn"], payload) except Exception: - logging.exception("Failed to chain Elasticsearch publicly accessible domains checking") + logging.exception("Failed to chain Elasticsearch domains logging issue checking") - logging.debug(f"Checked Elasticsearch publicly accessible domains in '{region}' for '{account_id} ({account_name})'") + logging.debug(f"Checked Elasticsearch domains logging issue in '{region}' for '{account_id} ({account_name})'") diff --git a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/initiate_to_desc_elasticsearch_domains_logging_issues.py b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/initiate_to_desc_elasticsearch_domains_logging_issues.py index f097188e..f7ccda13 100644 --- a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/initiate_to_desc_elasticsearch_domains_logging_issues.py +++ b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/initiate_to_desc_elasticsearch_domains_logging_issues.py @@ -8,29 +8,29 @@ def lambda_handler(event, context): - """ Lambda handler to initiate to find publicly accessible elasticsearch domains """ + """ Lambda handler to initiate to find elasticsearch domains logging issue """ set_logging(level=logging.INFO) - logging.debug("Initiating publicly accessible Elasticsearch domains checking") + logging.debug("Initiating Elasticsearch domains logging issue checking") try: sns_arn = os.environ["SNS_ARN"] config = Config() - if not config.esPublicAccess.enabled: - logging.debug("Elasticsearch publicly accessible domains checking disabled") + if not config.esLogging.enabled: + logging.debug("Elasticsearch domains logging issue checking disabled") return - logging.debug("Iterating each account to initiate Elasticsearch publicly accessible domains checking") - for account_id, account_name in config.esPublicAccess.accounts.items(): + logging.debug("Iterating each account to initiate Elasticsearch domains logging issue checking") + for account_id, account_name in config.esLogging.accounts.items(): payload = {"account_id": account_id, "account_name": account_name, "regions": config.aws.regions, "sns_arn": sns_arn } - logging.debug(f"Initiating Elasticsearch publicly accessible domains checking for '{account_name}'") + logging.debug(f"Initiating Elasticsearch domains logging issue checking for '{account_name}'") Sns.publish(sns_arn, payload) except Exception: - logging.exception("Error occurred while initiation of Elasticsearch publicly accessible domains checking") + logging.exception("Error occurred while initiation of Elasticsearch domains logging issue checking") return - logging.debug("Elasticsearch publicly accessible domains checking initiation done") + logging.debug("Elasticsearch domains logging issue checking initiation done") diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 0d36f0ee..52146849 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -67,28 +67,56 @@ def put_domain_policy(es_client, domain_name, policy): AccessPolicies=policy_json, ) - def retrieve_loggroup_arn(self, cw_client, domain_name): + @staticmethod + def retrieve_loggroup_arn(cw_client, domain_log_group_name): """ + This method used to retrieve cloud watch log group arn details if log group is available. If not, create a + cloudwatch log group and returns arn of newly created log group :param cw_client: cloudwatch logs boto3 client - :param domain_name: Elasticsearch domain name + :param domain_log_group_name: Elasticsearch domain's log group name :return: """ log_groups = cw_client.describe_log_groups() - domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" + log_group_arn = None for log_group in log_groups["logGroups"]: log_group_name = log_group["logGroupName"] if log_group_name == domain_log_group_name: log_group_arn = log_group["arn"] - if not log_group_arn: - cw_client.create_log_group(logGroupName=domain_log_group_name) - self.retrieve_loggroup_arn(cw_client, domain_name) - + if log_group_arn: + """ + In order to successfully deliver the logs to your CloudWatch Logs log group, + Amazon Elasticsearch Service (AES) will need access to two CloudWatch Logs API calls: + 1. CreateLogStream: Create a CloudWatch Logs log stream for the log group you specified + 2. PutLogEvents: Deliver CloudTrail events to the CloudWatch Logs log stream + + Adding resource policy that grants above access. + """ + policy_name = "AES-"+domain_log_group_name+"-Application-logs" + policy_doc = {} + statement = {} + principal = {} + action = [] + principal["Service"] = "es.amazonaws.com" + action.append("logs:PutLogEvents") + action.append("logs:CreateLogStream") + statement["Effect"] = "Allow" + statement["Principal"] = principal + statement["Action"] = action + statement["Resource"] = log_group_arn + + policy_doc["Statement"] = statement + + cw_client.put_resource_policy( + policyName=policy_name, + policyDocument=str(json.dumps(policy_doc)) + ) return log_group_arn - def set_domain_logging(self, es_client, cw_client, domain_name): + @staticmethod + def set_domain_logging(es_client, cw_client, domain_name): """ :param es_client: elastic search boto3 client @@ -96,16 +124,20 @@ def set_domain_logging(self, es_client, cw_client, domain_name): :param domain_name: elastic search domain name :return: """ - log_group_arn = self.retrieve_loggroup_arn(cw_client, domain_name) + domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" + log_group_arn = ElasticSearchOperations.retrieve_loggroup_arn(cw_client, domain_log_group_name) + if not log_group_arn: + cw_client.create_log_group(logGroupName=domain_log_group_name) + log_group_arn = ElasticSearchOperations.retrieve_loggroup_arn(cw_client, domain_log_group_name) + es_client.update_elasticsearch_domain_config( DomainName=domain_name, LogPublishingOptions={ 'ES_APPLICATION_LOGS': - { - 'CloudWatchLogsLogGroupArn': log_group_arn, - 'Enabled': True - } - + { + 'CloudWatchLogsLogGroupArn': log_group_arn, + 'Enabled': True + } } ) @@ -284,9 +316,9 @@ def check(self, ids=None): logging.exception(f"Failed to describe elasticsearch domains in {self.account}") return False - domain_encrypted = False - is_logging = False for domain_detail in domain_details: + is_logging = False + domain_encrypted = False domain_name = domain_detail["DomainName"] domain_id = domain_detail["DomainId"] domain_arn = domain_detail["ARN"] @@ -303,9 +335,9 @@ def check(self, ids=None): index_logs = logging_details.get("INDEX_SLOW_LOGS") search_logs = logging_details.get("SEARCH_SLOW_LOGS") error_logs = logging_details.get("ES_APPLICATION_LOGS") - if (index_logs and index_logs["Enable"]) \ - or (search_logs and search_logs["Enable"]) \ - or (error_logs and error_logs["Enable"]): + if (index_logs and index_logs["Enabled"]) \ + or (search_logs and search_logs["Enabled"]) \ + or (error_logs and error_logs["Enabled"]): is_logging = True tags = es_client.list_tags(ARN=domain_arn)["TagList"] diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py index 5cf107b4..aa8cbe78 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py @@ -41,7 +41,6 @@ def clean_elasticsearch_domain_domain_logging_issues(self, batch=False): domain_name = issue.issue_id in_whitelist = self.config.esLogging.in_whitelist(account_id, domain_name) - #in_fixlist = self.config.esLogging.in_fixnow(account_id, domain_name) if in_whitelist: logging.debug(f"Skipping {domain_name} (in whitelist)") @@ -52,9 +51,6 @@ def clean_elasticsearch_domain_domain_logging_issues(self, batch=False): label=IssueStatus.Whitelisted.value ) continue - # if not in_fixlist: - # logging.debug(f"Skipping {domain_name} (not in fixlist)") - # continue if issue.timestamps.reported is None: logging.debug(f"Skipping '{domain_name}' (was not reported)") From 709cfd46cd0c374ca22644c0afb6aeb41d7f1c6c Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 19 Jun 2019 11:56:47 +0530 Subject: [PATCH 049/193] Updated with ES logging issue documentation. Updated with ES logging issue documentation. --- docs/_data/sidebars/mydoc_sidebar.yml | 6 + docs/pages/deployment_cloudformation.md | 1 + docs/pages/editconfig.md | 19 +- docs/pages/features.md | 1 + .../pages/playbook23_elasticsearch_logging.md | 181 ++++++++++++++++++ docs/pages/remediation_backup_rollback.md | 1 + .../analytics/security_issues_csv_report.py | 6 +- .../cronjobs/automation_scheduler.py | 1 + 8 files changed, 213 insertions(+), 3 deletions(-) create mode 100644 docs/pages/playbook23_elasticsearch_logging.md diff --git a/docs/_data/sidebars/mydoc_sidebar.yml b/docs/_data/sidebars/mydoc_sidebar.yml index c9c4bf6c..f79d4851 100644 --- a/docs/_data/sidebars/mydoc_sidebar.yml +++ b/docs/_data/sidebars/mydoc_sidebar.yml @@ -119,3 +119,9 @@ entries: - title: RDS Unencrypted instances url: /playbook12_rds_unencryption.html output: web, pdf + + + - title: Elasticsearch Domains Logging issues + url: /playbook23_elasticsearch_logging.html + output: web, pdf + diff --git a/docs/pages/deployment_cloudformation.md b/docs/pages/deployment_cloudformation.md index c7331eb7..bbb39464 100644 --- a/docs/pages/deployment_cloudformation.md +++ b/docs/pages/deployment_cloudformation.md @@ -98,6 +98,7 @@ You will need to set the following parameters: * **SourceIdentificationSQSPublicPolicy**: the relative path to the Lambda package that identifies SQS public queue issues. The default value is **sqs-public-policy-identification.zip**. * **SourceIdentificationS3Encryption**: the relative path to the Lambda package that identifies S3 un-encrypted bucket issues. The default value is **s3-unencrypted-bucket-issues-identification.zip**. * **SourceIdentificationRDSEncryption**: the relative path to the Lambda package that identifies RDS unencrypted instances. The default value is **rds-unencrypted-instance-identification.zip**. +* **SourceIdentificationElasticSearchLogging**: the relative path to the Lambda package that identifies Elasticsearch domain logging issues. The default value is **elasticsearch-domain-logging-issues-identification.zip**. **VPC config (optional)**: * **LambdaSubnets**: comma-separated list, without spaces, of subnet IDs in your VPC to run identification lambdas in. diff --git a/docs/pages/editconfig.md b/docs/pages/editconfig.md index 23ff0938..6691f94e 100644 --- a/docs/pages/editconfig.md +++ b/docs/pages/editconfig.md @@ -386,4 +386,21 @@ Parameters: * **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-rds-unencrypted`. * **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); * **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); -* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; \ No newline at end of file +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; + +### 2.23. Elasticsearch Domain Logging Issues + +This section describes how to detect whether you have Elasticesearch Domains logging enabled or not. Refer to [issue-specific playbook](playbook23_elasticsearch_logging.html) for further details. + +Edit the **es_domain_logging** section of the `config.json` file to configure the handling of this issue. + +Parameters: +* **enabled**: enables/disables issue identification. The default value is `true`; +* **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-es-domain-logging`. +* **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); +* **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +* **remediation**: defines whether Dow Jones Hammer will automatically remediate the detected issue. The default value is `false`; +* **remediation_retention_period**: the amount of days that should pass between the detection of an issue and its automatic remediation by Dow Jones Hammer. The default value is `0`. + + diff --git a/docs/pages/features.md b/docs/pages/features.md index 3b830f91..df614698 100644 --- a/docs/pages/features.md +++ b/docs/pages/features.md @@ -21,5 +21,6 @@ Dow Jones Hammer can identify and report the following issues: |[SQS Policy Public Access](playbook10_sqs_public_policy.html) |Detects publicly accessible SQS policy |Any of SQS queues is worldwide accessible by policy | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html) |Detects not encrypted at reset S3 buckets |Any of S3 bucket is not encrypted at rest | |[RDS Unencrypted instances](playbook12_rds_unencryption.html) |Detects not encrypted at rest RDS instances |Any one of RDS instances is not encrypted at reset | +|[Elasticsearch Domain Logging Issues](playbook23_elasticsearch_logging.html) |Detects Elasticsearch domains logging issues |Any one of Elasticsearch Domain logging issue | Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**. \ No newline at end of file diff --git a/docs/pages/playbook23_elasticsearch_logging.md b/docs/pages/playbook23_elasticsearch_logging.md new file mode 100644 index 00000000..ca2e50ee --- /dev/null +++ b/docs/pages/playbook23_elasticsearch_logging.md @@ -0,0 +1,181 @@ +--- +title: Elasticsearch logging issues +keywords: playbook23 +sidebar: mydoc_sidebar +permalink: playbook23_elasticsearch_logging.html +--- + +# Playbook 12: Elasticsearch logging issues + +## Introduction + +This playbook describes how to configure Dow Jones Hammer to detect Elasticsearch domains logging enabled or not. + +## 1. Issue Identification + +Dow Jones Hammer identifies those Elasticsearch domains logging enabled or not.. + +When Dow Jones Hammer detects an issue, it writes the issue to the designated DynamoDB table. + +According to the [Dow Jones Hammer architecture](/index.html), the issue identification functionality uses two Lambda functions. +The table lists the Python modules that implement this functionality: + +|Designation |Path | +|--------------|:--------------------:| +|Initialization|`hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/initiate_to_desc_elasticsearch_domains_logging_issues.py`| +|Identification|`hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py` | + +## 2. Issue Reporting + +You can configure automatic reporting of cases when Dow Jones Hammer identifies an issue of this type. Dow Jones Hammer supports integration with [JIRA](https://www.atlassian.com/software/jira) and [Slack](https://slack.com/). +These types of reporting are independent from one another and you can turn them on/off in the Dow Jones Hammer configuration. + +Thus, in case you have turned on the reporting functionality for this issue and configured corresponding integrations, Dow Jones Hammer, as [defined in the configuration](#43-the-ticket_ownersjson-file), can: +* raise a JIRA ticket and assign it to a specific person in your organization; +* send the issue notification to the Slack channel or directly to a Slack user. + +Additionally Dow Jones Hammer tries to detect person to report issue to by examining `owner` tag on affected Elasticsearch domain. In case when such tag **exists** and is **valid JIRA/Slack user**: +* for JIRA: `jira_owner` parameter from [ticket_owners.json](#43-the-ticket_ownersjson-file) **is ignored** and discovered `owner` **is used instead** as a JIRA assignee; +* for Slack: discovered `owner` **is used in addition to** `slack_owner` value from [ticket_owners.json](#43-the-ticket_ownersjson-file). + +This Python module implements the issue reporting functionality: +``` +hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py +``` + + +## 3. Setup Instructions For This Issue + +To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: + +### 3.1. The config.json File + +The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. +To identify and report issues of this type, you should add the following parameters in the **es_domain_logging** section of the **config.json** file: + +|Parameter Name |Description | Default Value| +|------------------------------|---------------------------------------|:------------:| +|`enabled` |Toggles issue detection for this issue |`true`| +|`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-es-domain-logging` | +|`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| +|`remediation` |Toggle Dow Jones Hammer remediation functionality for this issue type |`false`| +|`remediation_retention_period` |Toggle Dow Jones Hammer remediation retention period details for this issue type |`21`| + +Sample **config.json** section: +``` +"es_domain_logging": { + "enabled": true, + "ddb.table_name": "hammer-es-domain-logging", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 + } +``` + +### 3.2. The whitelist.json File + +You can define exceptions to the general automatic remediation settings for specific Elasticsearch domains. To configure such exceptions, you should edit the **es_domain_logging** section of the **whitelist.json** configuration file as follows: + +|Parameter Key | Parameter Value(s)| +|:------------:|:-----------------:| +|AWS Account ID|Elasticsearch Domain Names(s)| + +Sample **whitelist.json** section: +``` +"es_domain_logging": { + "__comment__": "Detects Elasticsearch domains which are not enabled logging - domain ARNs.", + "1234567890123": ["arn:aws:es:us-east-2:1234567890123:domain/new-domain"] + }, +``` + +### 3.3. The ticket_owners.json File + +You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. + +You can configure these parameters for specific AWS accounts and globally. Account-specific settings precede the global settings in the **ticket_owners.json** configuration file. + +Check the following table for parameters: + +|Parameter Name |Description |Sample Value | +|---------------------|--------------------------------------------------------------------|:---------------:| +|`jira_project` |The name of the JIRA project where Dow Jones Hammer will create the issue | `AWSSEC` | +|`jira_owner` |The name of the JIRA user to whom Dow Jones Hammer will assign the issue | `Support-Cloud` | +|`jira_parent_ticket` |The JIRA ticket to which Dow Jones Hammer will link the new ticket it creates | `AWSSEC-1234` | +|`slack_owner` |Name(s) of the Slack channels (prefixed by `#`) and/or Slack users that will receive issue reports from Dow Jones Hammer | `["#devops-channel", "bob"]` | + +Sample **ticket_owners.json** section: + +Account-specific settings: +``` +{ + "account": { + "123456789012": { + "jira_project": "", + "jira_owner": "Support-Cloud", + "jira_parent_ticket": "", + "slack_owner": "" + } + }, + "jira_project": "AWSSEC", + "jira_owner": "Support-General", + "jira_parent_ticket": "AWSSEC-1234", + "slack_owner": ["#devops-channel", "bob"] +} +``` + +## 4. Logging + +Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. + +Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. + +### 4.1. Issue Identification Logging + +Dow Jones Hammer issue identification functionality uses two Lambda functions: + +* Initialization: this Lambda function selects slave accounts to check for this issue as designated in the Dow Jones Hammer configuration files and triggers the check. +* Identification: this Lambda function identifies this issue for each account/region selected at the previous step. + +You can see the logs for each of these Lambda functions in the following Log Groups: + +|Lambda Function|CloudWatch Log Group Name | +|---------------|--------------------------------------------| +|Initialization |`/aws/lambda/hammer-initiate-elasticsearch-logging`| +|Identification |`/aws/lambda/hammer-describe-elasticsearch-logging`| + +### 4.2. Issue Reporting Logging + +Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: + +|Designation|CloudWatch Log Stream Name | +|-----------|---------------------------------------------------------| +|Reporting |`reporting.create_elasticsearch_domain_logging_issue_tickets`| + + +### 4.3. Slack Reports + +In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). + +Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. + +### 4.4. Using CloudWatch Logs for Dow Jones Hammer + +To access Dow Jones Hammer logs, proceed as follows: + +1. Open **AWS Management Console**. +2. Select **CloudWatch** service. +3. Select **Logs** from the CloudWatch sidebar. +4. Select the log group you want to explore. The log group will open. +5. Select the log stream you want to explore. + +Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. + +## 5. Issue specific details in DynamoDB + +Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. + +|Key |Type |Description |Example | +|-------------|:----:|----------------------------------|------------------------------------------------| +|`name` |string|Elasticsearch domain name |`test-domain` | +|`id` |string|Domain arn |`arn:aws:es:us-east-2:1234567890123:domain/test-domain` | +|`tags` |map |Tags associated with Elasticsearch domain |`{"Name": "TestInstance", "service": "archive"}`| \ No newline at end of file diff --git a/docs/pages/remediation_backup_rollback.md b/docs/pages/remediation_backup_rollback.md index d05fe010..09380a6f 100644 --- a/docs/pages/remediation_backup_rollback.md +++ b/docs/pages/remediation_backup_rollback.md @@ -27,6 +27,7 @@ The following table gives an overview of Dow Jones Hammer remediation functional |[SQS Queue Public Access](playbook10_sqs_public_policy.html#3-issue-remediation) | Yes | Yes | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html#3-issue-remediation) | Yes | Yes | |[RDS Unencrypted instances](playbook12_rds_unencryption.html#3-issue-remediation) | `No` | `No` | +|[Elasticsearch Domains Logging issues](playbook23_elasticsearch_logging.html#3-issue-remediation) | `Yes` | `No` | ## 2. How Remediation Backup Works diff --git a/hammer/reporting-remediation/analytics/security_issues_csv_report.py b/hammer/reporting-remediation/analytics/security_issues_csv_report.py index 321eeee9..0263ddcf 100755 --- a/hammer/reporting-remediation/analytics/security_issues_csv_report.py +++ b/hammer/reporting-remediation/analytics/security_issues_csv_report.py @@ -8,7 +8,9 @@ from library.aws.utility import AssumeRole from library.config import Config from library.ddb_issues import Operations as IssueOperations -from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, SQSPolicyIssue +from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, \ + IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, SQSPolicyIssue, \ + ESLoggingIssue from analytics.add_excel_sheet_records import AddRecordsToSheet from library.slack_utility import SlackNotification from library.aws.s3 import S3Operations @@ -70,6 +72,7 @@ def generate(self): (self.config.cloudtrails.ddb_table_name, "CloudTrail Logging Issues", CloudTrailIssue), (self.config.rdsSnapshot.ddb_table_name, "RDS Public Snapshots", RdsPublicSnapshotIssue), (self.config.sqspolicy.ddb_table_name, "SQS Policy Public Access", SQSPolicyIssue), + (self.config.esLogging.ddb_table_name, "Elasticsearch Logging Issues", ESLoggingIssue) ] open_security_issues_workbook = xlwt.Workbook() @@ -125,7 +128,6 @@ def generate(self): channel=channel) - if __name__ == '__main__': module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") diff --git a/hammer/reporting-remediation/cronjobs/automation_scheduler.py b/hammer/reporting-remediation/cronjobs/automation_scheduler.py index f473312f..89bc6570 100755 --- a/hammer/reporting-remediation/cronjobs/automation_scheduler.py +++ b/hammer/reporting-remediation/cronjobs/automation_scheduler.py @@ -57,6 +57,7 @@ def automation_cronjob(config): ("SQS Public Access", config.sqspolicy, "create_sqs_policy_issue_tickets", "clean_sqs_policy_permissions"), ("S3 Unencrypted Buckets", config.s3Encrypt, "create_s3_unencrypted_bucket_issue_tickets", "clean_s3bucket_unencrypted"), ("RDS Unencrypted Instances", config.rdsEncrypt, "create_rds_unencrypted_instance_issue_tickets", None), + ("Elasticsearch Logging Issues", config.esLogging, "create_elasticsearch_domain_logging_issue_tickets", "clean_elasticsearch_domain_logging"), ] for title, module_config, reporting_script, remediation_script in modules: From 76e6addcedc112d373b8210340cba4cb04aa4c16 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 19 Jun 2019 15:50:06 +0530 Subject: [PATCH 050/193] Added ES public policy issue documentation. Added ES public policy issue documentation. --- docs/_data/sidebars/mydoc_sidebar.yml | 4 + docs/pages/deployment_cloudformation.md | 1 + docs/pages/editconfig.md | 16 +- docs/pages/features.md | 1 + .../playbook22_elasticsearch_public_access.md | 201 ++++++++++++++++++ docs/pages/remediation_backup_rollback.md | 1 + hammer/library/aws/elasticsearch.py | 70 ++++-- .../analytics/security_issues_csv_report.py | 6 +- .../cronjobs/automation_scheduler.py | 2 + 9 files changed, 280 insertions(+), 22 deletions(-) create mode 100644 docs/pages/playbook22_elasticsearch_public_access.md diff --git a/docs/_data/sidebars/mydoc_sidebar.yml b/docs/_data/sidebars/mydoc_sidebar.yml index c9c4bf6c..57be2a3e 100644 --- a/docs/_data/sidebars/mydoc_sidebar.yml +++ b/docs/_data/sidebars/mydoc_sidebar.yml @@ -119,3 +119,7 @@ entries: - title: RDS Unencrypted instances url: /playbook12_rds_unencryption.html output: web, pdf + + - title: Elasticsearch Domains Public Access Policy issues + url: /playbook22_elasticsearch_public_access.html + output: web, pdf diff --git a/docs/pages/deployment_cloudformation.md b/docs/pages/deployment_cloudformation.md index c7331eb7..e40078ff 100644 --- a/docs/pages/deployment_cloudformation.md +++ b/docs/pages/deployment_cloudformation.md @@ -98,6 +98,7 @@ You will need to set the following parameters: * **SourceIdentificationSQSPublicPolicy**: the relative path to the Lambda package that identifies SQS public queue issues. The default value is **sqs-public-policy-identification.zip**. * **SourceIdentificationS3Encryption**: the relative path to the Lambda package that identifies S3 un-encrypted bucket issues. The default value is **s3-unencrypted-bucket-issues-identification.zip**. * **SourceIdentificationRDSEncryption**: the relative path to the Lambda package that identifies RDS unencrypted instances. The default value is **rds-unencrypted-instance-identification.zip**. +* **SourceIdentificationElasticSearchPublicAccess**: the relative path to the Lambda package that identifies Elasticsearch domain public access issues. The default value is **elasticsearch-public-access-domain-identification.zip**. **VPC config (optional)**: * **LambdaSubnets**: comma-separated list, without spaces, of subnet IDs in your VPC to run identification lambdas in. diff --git a/docs/pages/editconfig.md b/docs/pages/editconfig.md index 23ff0938..4e887e58 100644 --- a/docs/pages/editconfig.md +++ b/docs/pages/editconfig.md @@ -386,4 +386,18 @@ Parameters: * **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-rds-unencrypted`. * **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); * **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); -* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; \ No newline at end of file +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +### 2.22. Elasticsearch Domain Public Access Issues + +This section describes how to detect whether you have Elasticesearch Domains has public access policy. Refer to [issue-specific playbook](playbook22_elasticsearch_public_access.html) for further details. + +Edit the **es_public_access_domain** section of the `config.json` file to configure the handling of this issue. + +Parameters: +* **enabled**: enables/disables issue identification. The default value is `true`; +* **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `es-public-access-domain`. +* **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); +* **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +* **remediation**: defines whether Dow Jones Hammer will automatically remediate the detected issue. The default value is `false`; +* **remediation_retention_period**: the amount of days that should pass between the detection of an issue and its automatic remediation by Dow Jones Hammer. The default value is `0`. diff --git a/docs/pages/features.md b/docs/pages/features.md index 3b830f91..ac0b1ebf 100644 --- a/docs/pages/features.md +++ b/docs/pages/features.md @@ -21,5 +21,6 @@ Dow Jones Hammer can identify and report the following issues: |[SQS Policy Public Access](playbook10_sqs_public_policy.html) |Detects publicly accessible SQS policy |Any of SQS queues is worldwide accessible by policy | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html) |Detects not encrypted at reset S3 buckets |Any of S3 bucket is not encrypted at rest | |[RDS Unencrypted instances](playbook12_rds_unencryption.html) |Detects not encrypted at rest RDS instances |Any one of RDS instances is not encrypted at reset | +|[Elasticsearch Domain Public Access Issues](playbook22_elasticsearch_public_access.html) |Detects Elasticsearch domain public access issues |Any one of Elasticsearch Domain public access issue | Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**. \ No newline at end of file diff --git a/docs/pages/playbook22_elasticsearch_public_access.md b/docs/pages/playbook22_elasticsearch_public_access.md new file mode 100644 index 00000000..8c7b2b5c --- /dev/null +++ b/docs/pages/playbook22_elasticsearch_public_access.md @@ -0,0 +1,201 @@ +--- +title: Elasticsearch Domain public access +keywords: playbook22 +sidebar: mydoc_sidebar +permalink: playbook22_elasticsearch_public_access.html +--- + +# Playbook 22: Elasticsearch publicly accessible domains + +## Introduction + +This playbook describes how to configure Dow Jones Hammer to detect Elasticsearch domains that are publicly accessible + +## 1. Issue Identification + +Dow Jones Hammer identifies those Elasticsearch domains for public access policy attached. + +When Dow Jones Hammer detects an issue, it writes the issue to the designated DynamoDB table. + +According to the [Dow Jones Hammer architecture](/index.html), the issue identification functionality uses two Lambda functions. +The table lists the Python modules that implement this functionality: + +|Designation |Path | +|--------------|:--------------------:| +|Initialization|`hammer/identification/lambdas/elasticsearch-public-access-domain-identification/initiate_to_desc_elasticsearch_public_access_domains.py`| +|Identification|`hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py` | + +## 2. Issue Reporting + +You can configure automatic reporting of cases when Dow Jones Hammer identifies an issue of this type. Dow Jones Hammer supports integration with [JIRA](https://www.atlassian.com/software/jira) and [Slack](https://slack.com/). +These types of reporting are independent from one another and you can turn them on/off in the Dow Jones Hammer configuration. + +Thus, in case you have turned on the reporting functionality for this issue and configured corresponding integrations, Dow Jones Hammer, as [defined in the configuration](#43-the-ticket_ownersjson-file), can: +* raise a JIRA ticket and assign it to a specific person in your organization; +* send the issue notification to the Slack channel or directly to a Slack user. + +Additionally Dow Jones Hammer tries to detect person to report issue to by examining `owner` tag on affected Elasticsearch domains. In case when such tag **exists** and is **valid JIRA/Slack user**: +* for JIRA: `jira_owner` parameter from [ticket_owners.json](#43-the-ticket_ownersjson-file) **is ignored** and discovered `owner` **is used instead** as a JIRA assignee; +* for Slack: discovered `owner` **is used in addition to** `slack_owner` value from [ticket_owners.json](#43-the-ticket_ownersjson-file). + +This Python module implements the issue reporting functionality: +``` +hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +``` + +## 3. Issue Remediation + +### 3.1 Automatic + +To reduce the workload of your DevOps engineers and mitigate the threats stemming from this issue, you can configure automatic remediation of issues. It means that in case Dow Jones Hammer has detected and reported an issue, but the assignee of the report has not remediated the issue within a timeframe specified in the configuration, the Dow Jones Hammer remediation job will adjust Elasticsearch Domain policy to eliminate this vulnerability. + +In this specific case, Dow Jones Hammer restricts public statement by adding (or changing) `IpAddress` condition value that allow access only for IP addresses defined in [RFC 1918 - Address Allocation for Private Internets](https://tools.ietf.org/html/rfc1918). + +This Python module implements the issue remediation functionality: +``` +hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py +``` + +### 3.2 Manual + +To retain full control on the remediation functionality you can disable automatic remediation in [config.json](#41-the-configjson-file) and launch it manually: +1. Login to Dow Jones Hammer reporting and remediation EC2 via SSH with **centos** user and ssh key you created during [deployment](configuredeploy_overview.html#25-create-ec2-key-pair-for-hammer): `ssh -l centos -i ` +2. Become **root** user: `sudo su -` +3. Change directory to Dow Jones Hammer sources: `cd /hammer-correlation-engine` +4. Launch Dow Jones Hammer remediation script: `python3.6 -m remediation.clean_elasticsearch_policy_permissions` +5. Confirm or refuse remediation of each issue separately + + +## 4. Setup Instructions For This Issue + +To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: + +### 4.1. The config.json File + +The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. +To identify and report issues of this type, you should add the following parameters in the **es_public_access_domain** section of the **config.json** file: + +|Parameter Name |Description | Default Value| +|------------------------------|---------------------------------------|:------------:| +|`enabled` |Toggles issue detection for this issue |`true`| +|`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-es-public-access-domain` | +|`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| + +Sample **config.json** section: +``` +"es_public_access_domain": { + "enabled": true, + "ddb.table_name": "hammer-es-public-access-domain", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 + },``` + +### 4.2. The whitelist.json File + +You can define exceptions to the general automatic remediation settings for specific Elasticsearch Domains. To configure such exceptions, you should edit the **es_public_access_domain** section of the **whitelist.json** configuration file as follows: + +|Parameter Key | Parameter Value(s)| +|:------------:|:-----------------:| +|AWS Account ID|Elasticsearch Domain Names(s)| + +Sample **whitelist.json** section: +``` +"es_public_access_domain": { + "__comment__": "Detects publicly accessible Elasticsearch domains - domain ARNs.", + "1234567890123": ["arn:aws:es:us-east-2:1234567890123:domain/new-domain"] + }, +``` + +### 4.3. The ticket_owners.json File + +You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. + +You can configure these parameters for specific AWS accounts and globally. Account-specific settings precede the global settings in the **ticket_owners.json** configuration file. + +Check the following table for parameters: + +|Parameter Name |Description |Sample Value | +|---------------------|--------------------------------------------------------------------|:---------------:| +|`jira_project` |The name of the JIRA project where Dow Jones Hammer will create the issue | `AWSSEC` | +|`jira_owner` |The name of the JIRA user to whom Dow Jones Hammer will assign the issue | `Support-Cloud` | +|`jira_parent_ticket` |The JIRA ticket to which Dow Jones Hammer will link the new ticket it creates | `AWSSEC-1234` | +|`slack_owner` |Name(s) of the Slack channels (prefixed by `#`) and/or Slack users that will receive issue reports from Dow Jones Hammer | `["#devops-channel", "bob"]` | + +Sample **ticket_owners.json** section: + +Account-specific settings: +``` +{ + "account": { + "123456789012": { + "jira_project": "", + "jira_owner": "Support-Cloud", + "jira_parent_ticket": "", + "slack_owner": "" + } + }, + "jira_project": "AWSSEC", + "jira_owner": "Support-General", + "jira_parent_ticket": "AWSSEC-1234", + "slack_owner": ["#devops-channel", "bob"] +} +``` + +## 5. Logging + +Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. + +Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. + +### 5.1. Issue Identification Logging + +Dow Jones Hammer issue identification functionality uses two Lambda functions: + +* Initialization: this Lambda function selects slave accounts to check for this issue as designated in the Dow Jones Hammer configuration files and triggers the check. +* Identification: this Lambda function identifies this issue for each account/region selected at the previous step. + +You can see the logs for each of these Lambda functions in the following Log Groups: + +|Lambda Function|CloudWatch Log Group Name | +|---------------|--------------------------------------------| +|Initialization |`/aws/lambda/hammer-initiate-elasticsearch-public-access`| +|Identification |`/aws/lambda/hammer-describe-elasticsearch-public-access`| + +### 5.2. Issue Reporting Logging + +Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: + +|Designation|CloudWatch Log Stream Name | +|-----------|---------------------------------------------------------| +|Reporting |`reporting.create_elasticsearch_public_access_issue_tickets`| +|Remediation |`remediation.clean_elasticsearch_policy_permissions`| + + +### 5.3. Slack Reports + +In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). + +Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. + +### 5.4. Using CloudWatch Logs for Dow Jones Hammer + +To access Dow Jones Hammer logs, proceed as follows: + +1. Open **AWS Management Console**. +2. Select **CloudWatch** service. +3. Select **Logs** from the CloudWatch sidebar. +4. Select the log group you want to explore. The log group will open. +5. Select the log stream you want to explore. + +Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. + +## 6. Issue specific details in DynamoDB + +Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. + +|Key |Type |Description |Example | +|-------------|:----:|----------------------------------|------------------------------------------------| +|`name` |string|Elasticsearch domain name |`test-domain` | +|`arn` |string|Elasticsearch Domain Arn |`arn:aws:es:us-east-2:1234567890123:domain/test-domain` | +|`tags` |map |Tags associated with Domain |`{"Name": "TestDomain", "service": "archive"}`| \ No newline at end of file diff --git a/docs/pages/remediation_backup_rollback.md b/docs/pages/remediation_backup_rollback.md index d05fe010..794d7efc 100644 --- a/docs/pages/remediation_backup_rollback.md +++ b/docs/pages/remediation_backup_rollback.md @@ -27,6 +27,7 @@ The following table gives an overview of Dow Jones Hammer remediation functional |[SQS Queue Public Access](playbook10_sqs_public_policy.html#3-issue-remediation) | Yes | Yes | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html#3-issue-remediation) | Yes | Yes | |[RDS Unencrypted instances](playbook12_rds_unencryption.html#3-issue-remediation) | `No` | `No` | +|[Elasticsearch Domains Public Access issues](playbook22_elasticsearch_logging.html#3-issue-remediation) | `Yes` | `Yes` | ## 2. How Remediation Backup Works diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 0d36f0ee..52146849 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -67,28 +67,56 @@ def put_domain_policy(es_client, domain_name, policy): AccessPolicies=policy_json, ) - def retrieve_loggroup_arn(self, cw_client, domain_name): + @staticmethod + def retrieve_loggroup_arn(cw_client, domain_log_group_name): """ + This method used to retrieve cloud watch log group arn details if log group is available. If not, create a + cloudwatch log group and returns arn of newly created log group :param cw_client: cloudwatch logs boto3 client - :param domain_name: Elasticsearch domain name + :param domain_log_group_name: Elasticsearch domain's log group name :return: """ log_groups = cw_client.describe_log_groups() - domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" + log_group_arn = None for log_group in log_groups["logGroups"]: log_group_name = log_group["logGroupName"] if log_group_name == domain_log_group_name: log_group_arn = log_group["arn"] - if not log_group_arn: - cw_client.create_log_group(logGroupName=domain_log_group_name) - self.retrieve_loggroup_arn(cw_client, domain_name) - + if log_group_arn: + """ + In order to successfully deliver the logs to your CloudWatch Logs log group, + Amazon Elasticsearch Service (AES) will need access to two CloudWatch Logs API calls: + 1. CreateLogStream: Create a CloudWatch Logs log stream for the log group you specified + 2. PutLogEvents: Deliver CloudTrail events to the CloudWatch Logs log stream + + Adding resource policy that grants above access. + """ + policy_name = "AES-"+domain_log_group_name+"-Application-logs" + policy_doc = {} + statement = {} + principal = {} + action = [] + principal["Service"] = "es.amazonaws.com" + action.append("logs:PutLogEvents") + action.append("logs:CreateLogStream") + statement["Effect"] = "Allow" + statement["Principal"] = principal + statement["Action"] = action + statement["Resource"] = log_group_arn + + policy_doc["Statement"] = statement + + cw_client.put_resource_policy( + policyName=policy_name, + policyDocument=str(json.dumps(policy_doc)) + ) return log_group_arn - def set_domain_logging(self, es_client, cw_client, domain_name): + @staticmethod + def set_domain_logging(es_client, cw_client, domain_name): """ :param es_client: elastic search boto3 client @@ -96,16 +124,20 @@ def set_domain_logging(self, es_client, cw_client, domain_name): :param domain_name: elastic search domain name :return: """ - log_group_arn = self.retrieve_loggroup_arn(cw_client, domain_name) + domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" + log_group_arn = ElasticSearchOperations.retrieve_loggroup_arn(cw_client, domain_log_group_name) + if not log_group_arn: + cw_client.create_log_group(logGroupName=domain_log_group_name) + log_group_arn = ElasticSearchOperations.retrieve_loggroup_arn(cw_client, domain_log_group_name) + es_client.update_elasticsearch_domain_config( DomainName=domain_name, LogPublishingOptions={ 'ES_APPLICATION_LOGS': - { - 'CloudWatchLogsLogGroupArn': log_group_arn, - 'Enabled': True - } - + { + 'CloudWatchLogsLogGroupArn': log_group_arn, + 'Enabled': True + } } ) @@ -284,9 +316,9 @@ def check(self, ids=None): logging.exception(f"Failed to describe elasticsearch domains in {self.account}") return False - domain_encrypted = False - is_logging = False for domain_detail in domain_details: + is_logging = False + domain_encrypted = False domain_name = domain_detail["DomainName"] domain_id = domain_detail["DomainId"] domain_arn = domain_detail["ARN"] @@ -303,9 +335,9 @@ def check(self, ids=None): index_logs = logging_details.get("INDEX_SLOW_LOGS") search_logs = logging_details.get("SEARCH_SLOW_LOGS") error_logs = logging_details.get("ES_APPLICATION_LOGS") - if (index_logs and index_logs["Enable"]) \ - or (search_logs and search_logs["Enable"]) \ - or (error_logs and error_logs["Enable"]): + if (index_logs and index_logs["Enabled"]) \ + or (search_logs and search_logs["Enabled"]) \ + or (error_logs and error_logs["Enabled"]): is_logging = True tags = es_client.list_tags(ARN=domain_arn)["TagList"] diff --git a/hammer/reporting-remediation/analytics/security_issues_csv_report.py b/hammer/reporting-remediation/analytics/security_issues_csv_report.py index 321eeee9..f948d206 100755 --- a/hammer/reporting-remediation/analytics/security_issues_csv_report.py +++ b/hammer/reporting-remediation/analytics/security_issues_csv_report.py @@ -8,7 +8,9 @@ from library.aws.utility import AssumeRole from library.config import Config from library.ddb_issues import Operations as IssueOperations -from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, SQSPolicyIssue +from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, \ + IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, SQSPolicyIssue, \ + ESPublicAccessIssue from analytics.add_excel_sheet_records import AddRecordsToSheet from library.slack_utility import SlackNotification from library.aws.s3 import S3Operations @@ -70,6 +72,7 @@ def generate(self): (self.config.cloudtrails.ddb_table_name, "CloudTrail Logging Issues", CloudTrailIssue), (self.config.rdsSnapshot.ddb_table_name, "RDS Public Snapshots", RdsPublicSnapshotIssue), (self.config.sqspolicy.ddb_table_name, "SQS Policy Public Access", SQSPolicyIssue), + (self.config.esPublicAccess.ddb_table_name, "Elasticsearch Public Access", ESPublicAccessIssue) ] open_security_issues_workbook = xlwt.Workbook() @@ -125,7 +128,6 @@ def generate(self): channel=channel) - if __name__ == '__main__': module_name = sys.modules[__name__].__loader__.name set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") diff --git a/hammer/reporting-remediation/cronjobs/automation_scheduler.py b/hammer/reporting-remediation/cronjobs/automation_scheduler.py index f473312f..8c6e6876 100755 --- a/hammer/reporting-remediation/cronjobs/automation_scheduler.py +++ b/hammer/reporting-remediation/cronjobs/automation_scheduler.py @@ -57,6 +57,8 @@ def automation_cronjob(config): ("SQS Public Access", config.sqspolicy, "create_sqs_policy_issue_tickets", "clean_sqs_policy_permissions"), ("S3 Unencrypted Buckets", config.s3Encrypt, "create_s3_unencrypted_bucket_issue_tickets", "clean_s3bucket_unencrypted"), ("RDS Unencrypted Instances", config.rdsEncrypt, "create_rds_unencrypted_instance_issue_tickets", None), + ("Elasticsearch Public Access", config.esPublicAccess, "create_elasticsearch_public_access_issue_tickets", + "clean_elasticsearch_policy_permissions") ] for title, module_config, reporting_script, remediation_script in modules: From ac30cf8e78a857b618f24fc5656ffdf22819362a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 19 Jun 2019 16:27:59 +0530 Subject: [PATCH 051/193] Updated with remediation details for ES logging issue. Updated with remediation details for ES logging issue. --- .../pages/playbook23_elasticsearch_logging.md | 41 ++++++++++++++----- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/docs/pages/playbook23_elasticsearch_logging.md b/docs/pages/playbook23_elasticsearch_logging.md index ca2e50ee..72515ca7 100644 --- a/docs/pages/playbook23_elasticsearch_logging.md +++ b/docs/pages/playbook23_elasticsearch_logging.md @@ -5,7 +5,7 @@ sidebar: mydoc_sidebar permalink: playbook23_elasticsearch_logging.html --- -# Playbook 12: Elasticsearch logging issues +# Playbook 23: Elasticsearch logging issues ## Introduction @@ -43,12 +43,31 @@ This Python module implements the issue reporting functionality: hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py ``` +## 3. Issue Remediation -## 3. Setup Instructions For This Issue +### 3.1 Automatic + +To reduce the workload of your DevOps engineers and mitigate the threats stemming from this issue, you can configure automatic remediation of issues. It means that in case Dow Jones Hammer has detected and reported an issue, but the assignee of the report has not remediated the issue within a timeframe specified in the configuration, the Dow Jones Hammer remediation job will add/adjust logging for Elasticsearch Domain to eliminate this vulnerability. + +This Python module implements the issue remediation functionality: +``` +hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py +``` + +### 3.2 Manual + +To retain full control on the remediation functionality you can disable automatic remediation in [config.json](#41-the-configjson-file) and launch it manually: +1. Login to Dow Jones Hammer reporting and remediation EC2 via SSH with **centos** user and ssh key you created during [deployment](configuredeploy_overview.html#25-create-ec2-key-pair-for-hammer): `ssh -l centos -i ` +2. Become **root** user: `sudo su -` +3. Change directory to Dow Jones Hammer sources: `cd /hammer-correlation-engine` +4. Launch Dow Jones Hammer remediation script: `python3.6 -m remediation.clean_elasticsearch_domain_logging` +5. Confirm or refuse remediation of each issue separately + +## 4. Setup Instructions For This Issue To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: -### 3.1. The config.json File +### 4.1. The config.json File The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. To identify and report issues of this type, you should add the following parameters in the **es_domain_logging** section of the **config.json** file: @@ -72,7 +91,7 @@ Sample **config.json** section: } ``` -### 3.2. The whitelist.json File +### 4.2. The whitelist.json File You can define exceptions to the general automatic remediation settings for specific Elasticsearch domains. To configure such exceptions, you should edit the **es_domain_logging** section of the **whitelist.json** configuration file as follows: @@ -88,7 +107,7 @@ Sample **whitelist.json** section: }, ``` -### 3.3. The ticket_owners.json File +### 4.3. The ticket_owners.json File You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. @@ -123,13 +142,13 @@ Account-specific settings: } ``` -## 4. Logging +## 5. Logging Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. -### 4.1. Issue Identification Logging +### 5.1. Issue Identification Logging Dow Jones Hammer issue identification functionality uses two Lambda functions: @@ -143,7 +162,7 @@ You can see the logs for each of these Lambda functions in the following Log Gro |Initialization |`/aws/lambda/hammer-initiate-elasticsearch-logging`| |Identification |`/aws/lambda/hammer-describe-elasticsearch-logging`| -### 4.2. Issue Reporting Logging +### 5.2. Issue Reporting Logging Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: @@ -152,13 +171,13 @@ Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting |Reporting |`reporting.create_elasticsearch_domain_logging_issue_tickets`| -### 4.3. Slack Reports +### 5.3. Slack Reports In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. -### 4.4. Using CloudWatch Logs for Dow Jones Hammer +### 5.4. Using CloudWatch Logs for Dow Jones Hammer To access Dow Jones Hammer logs, proceed as follows: @@ -170,7 +189,7 @@ To access Dow Jones Hammer logs, proceed as follows: Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. -## 5. Issue specific details in DynamoDB +## 6. Issue specific details in DynamoDB Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. From 463847e3657fde5dab70dfb7438ac0cea50dfb70 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 19 Jun 2019 16:42:25 +0530 Subject: [PATCH 052/193] Added ES unencrypted domain issue documentation Added ES unencrypted domain issue documentation --- docs/_data/sidebars/mydoc_sidebar.yml | 5 + docs/pages/deployment_cloudformation.md | 1 + docs/pages/editconfig.md | 15 +- docs/pages/features.md | 3 +- .../playbook21_elasticsearch_unencryption.md | 177 ++++++++++++ docs/pages/remediation_backup_rollback.md | 1 + hammer/library/aws/elasticsearch.py | 272 +++++++++++++++--- .../analytics/security_issues_csv_report.py | 5 +- .../cronjobs/automation_scheduler.py | 1 + 9 files changed, 443 insertions(+), 37 deletions(-) create mode 100644 docs/pages/playbook21_elasticsearch_unencryption.md diff --git a/docs/_data/sidebars/mydoc_sidebar.yml b/docs/_data/sidebars/mydoc_sidebar.yml index c9c4bf6c..b52fed0f 100644 --- a/docs/_data/sidebars/mydoc_sidebar.yml +++ b/docs/_data/sidebars/mydoc_sidebar.yml @@ -119,3 +119,8 @@ entries: - title: RDS Unencrypted instances url: /playbook12_rds_unencryption.html output: web, pdf + + - title: Elasticsearch Domains Unencryption issues + url: /playbook21_elasticsearch_unencryption.html + output: web, pdf + diff --git a/docs/pages/deployment_cloudformation.md b/docs/pages/deployment_cloudformation.md index c7331eb7..b2dec8d4 100644 --- a/docs/pages/deployment_cloudformation.md +++ b/docs/pages/deployment_cloudformation.md @@ -98,6 +98,7 @@ You will need to set the following parameters: * **SourceIdentificationSQSPublicPolicy**: the relative path to the Lambda package that identifies SQS public queue issues. The default value is **sqs-public-policy-identification.zip**. * **SourceIdentificationS3Encryption**: the relative path to the Lambda package that identifies S3 un-encrypted bucket issues. The default value is **s3-unencrypted-bucket-issues-identification.zip**. * **SourceIdentificationRDSEncryption**: the relative path to the Lambda package that identifies RDS unencrypted instances. The default value is **rds-unencrypted-instance-identification.zip**. +* **SourceIdentificationElasticSearchEncryption**: the relative path to the Lambda package that identifies Elasticsearch domain encryption issues. The default value is **elasticsearch-unencrypted-domain-identification.zip**. **VPC config (optional)**: * **LambdaSubnets**: comma-separated list, without spaces, of subnet IDs in your VPC to run identification lambdas in. diff --git a/docs/pages/editconfig.md b/docs/pages/editconfig.md index 23ff0938..e7e319e2 100644 --- a/docs/pages/editconfig.md +++ b/docs/pages/editconfig.md @@ -386,4 +386,17 @@ Parameters: * **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-rds-unencrypted`. * **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); * **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); -* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; \ No newline at end of file +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; + +### 2.21. Elasticsearch Domain Encryption Issues + +This section describes how to detect whether you have Elasticesearch Domains encrypted or not. Refer to [issue-specific playbook](playbook21_elasticsearch_unencryption.html) for further details. + +Edit the **es_unencrypted_domain** section of the `config.json` file to configure the handling of this issue. + +Parameters: +* **enabled**: enables/disables issue identification. The default value is `true`; +* **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-es-unencrypted-domain`. +* **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); +* **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; diff --git a/docs/pages/features.md b/docs/pages/features.md index 3b830f91..d5ad8075 100644 --- a/docs/pages/features.md +++ b/docs/pages/features.md @@ -21,5 +21,6 @@ Dow Jones Hammer can identify and report the following issues: |[SQS Policy Public Access](playbook10_sqs_public_policy.html) |Detects publicly accessible SQS policy |Any of SQS queues is worldwide accessible by policy | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html) |Detects not encrypted at reset S3 buckets |Any of S3 bucket is not encrypted at rest | |[RDS Unencrypted instances](playbook12_rds_unencryption.html) |Detects not encrypted at rest RDS instances |Any one of RDS instances is not encrypted at reset | +|[Elasticsearch Domain Encryption Issues](playbook21_elasticsearch_unencryption.html) |Detects Elasticsearch domains encryption issues |Any one of Elasticsearch Domain unencryption issue | -Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**. \ No newline at end of file +Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues**, **RDS Unencrypted instances** and **Elasticsearch Domain Encryption Issues**. \ No newline at end of file diff --git a/docs/pages/playbook21_elasticsearch_unencryption.md b/docs/pages/playbook21_elasticsearch_unencryption.md new file mode 100644 index 00000000..3fd5c58a --- /dev/null +++ b/docs/pages/playbook21_elasticsearch_unencryption.md @@ -0,0 +1,177 @@ +--- +title: Elasticsearch unencrypted instances +keywords: playbook21 +sidebar: mydoc_sidebar +permalink: playbook21_elasticsearch_unencryption.html +--- + +# Playbook 21: Elasticsearch unencrypted instances + +## Introduction + +This playbook describes how to configure Dow Jones Hammer to detect Elasticsearch domains that are not encrypted (either at dataatrest or node-node encryption). + +## 1. Issue Identification + +Dow Jones Hammer identifies those Elasticsearch domains for which ```StorageEncrypted``` parameter value is ```false```. + +When Dow Jones Hammer detects an issue, it writes the issue to the designated DynamoDB table. + +According to the [Dow Jones Hammer architecture](/index.html), the issue identification functionality uses two Lambda functions. +The table lists the Python modules that implement this functionality: + +|Designation |Path | +|--------------|:--------------------:| +|Initialization|`hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/initiate_to_desc_elasticsearch_unencrypted_domains.py`| +|Identification|`hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py` | + +## 2. Issue Reporting + +You can configure automatic reporting of cases when Dow Jones Hammer identifies an issue of this type. Dow Jones Hammer supports integration with [JIRA](https://www.atlassian.com/software/jira) and [Slack](https://slack.com/). +These types of reporting are independent from one another and you can turn them on/off in the Dow Jones Hammer configuration. + +Thus, in case you have turned on the reporting functionality for this issue and configured corresponding integrations, Dow Jones Hammer, as [defined in the configuration](#43-the-ticket_ownersjson-file), can: +* raise a JIRA ticket and assign it to a specific person in your organization; +* send the issue notification to the Slack channel or directly to a Slack user. + +Additionally Dow Jones Hammer tries to detect person to report issue to by examining `owner` tag on affected RDS instance. In case when such tag **exists** and is **valid JIRA/Slack user**: +* for JIRA: `jira_owner` parameter from [ticket_owners.json](#43-the-ticket_ownersjson-file) **is ignored** and discovered `owner` **is used instead** as a JIRA assignee; +* for Slack: discovered `owner` **is used in addition to** `slack_owner` value from [ticket_owners.json](#43-the-ticket_ownersjson-file). + +This Python module implements the issue reporting functionality: +``` +hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +``` + + +## 3. Setup Instructions For This Issue + +To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: + +### 3.1. The config.json File + +The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. +To identify and report issues of this type, you should add the following parameters in the **rds_encryption** section of the **config.json** file: + +|Parameter Name |Description | Default Value| +|------------------------------|---------------------------------------|:------------:| +|`enabled` |Toggles issue detection for this issue |`true`| +|`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-es-unencrypted-domain` | +|`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| + +Sample **config.json** section: +``` +"es_unencrypted_domain": { + "enabled": true, + "ddb.table_name": "djif-hammer-es-unencrypted-domain", + "reporting": true + }, +``` + +### 3.2. The whitelist.json File + +You can define exceptions to the general automatic remediation settings for specific RDS instances. To configure such exceptions, you should edit the **rds_encryption** section of the **whitelist.json** configuration file as follows: + +|Parameter Key | Parameter Value(s)| +|:------------:|:-----------------:| +|AWS Account ID|Elasticsearch Domain Names(s)| + +Sample **whitelist.json** section: +``` +"es_unencrypted_domain": { + "__comment__": "Detects Unencrypted Elasticsearch domains - domain ARNs.", + "1234567890123": ["arn:aws:es:us-east-2:1234567890123:domain/new-domain"] + }, +``` + +### 3.3. The ticket_owners.json File + +You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. + +You can configure these parameters for specific AWS accounts and globally. Account-specific settings precede the global settings in the **ticket_owners.json** configuration file. + +Check the following table for parameters: + +|Parameter Name |Description |Sample Value | +|---------------------|--------------------------------------------------------------------|:---------------:| +|`jira_project` |The name of the JIRA project where Dow Jones Hammer will create the issue | `AWSSEC` | +|`jira_owner` |The name of the JIRA user to whom Dow Jones Hammer will assign the issue | `Support-Cloud` | +|`jira_parent_ticket` |The JIRA ticket to which Dow Jones Hammer will link the new ticket it creates | `AWSSEC-1234` | +|`slack_owner` |Name(s) of the Slack channels (prefixed by `#`) and/or Slack users that will receive issue reports from Dow Jones Hammer | `["#devops-channel", "bob"]` | + +Sample **ticket_owners.json** section: + +Account-specific settings: +``` +{ + "account": { + "123456789012": { + "jira_project": "", + "jira_owner": "Support-Cloud", + "jira_parent_ticket": "", + "slack_owner": "" + } + }, + "jira_project": "AWSSEC", + "jira_owner": "Support-General", + "jira_parent_ticket": "AWSSEC-1234", + "slack_owner": ["#devops-channel", "bob"] +} +``` + +## 4. Logging + +Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. + +Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. + +### 4.1. Issue Identification Logging + +Dow Jones Hammer issue identification functionality uses two Lambda functions: + +* Initialization: this Lambda function selects slave accounts to check for this issue as designated in the Dow Jones Hammer configuration files and triggers the check. +* Identification: this Lambda function identifies this issue for each account/region selected at the previous step. + +You can see the logs for each of these Lambda functions in the following Log Groups: + +|Lambda Function|CloudWatch Log Group Name | +|---------------|--------------------------------------------| +|Initialization |`/aws/lambda/hammer-initiate-rds-encryption`| +|Identification |`/aws/lambda/hammer-describe-rds-encryption`| + +### 4.2. Issue Reporting Logging + +Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: + +|Designation|CloudWatch Log Stream Name | +|-----------|---------------------------------------------------------| +|Reporting |`reporting.create_rds_unencrypted_instance_issue_tickets`| + + +### 4.3. Slack Reports + +In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). + +Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. + +### 4.4. Using CloudWatch Logs for Dow Jones Hammer + +To access Dow Jones Hammer logs, proceed as follows: + +1. Open **AWS Management Console**. +2. Select **CloudWatch** service. +3. Select **Logs** from the CloudWatch sidebar. +4. Select the log group you want to explore. The log group will open. +5. Select the log stream you want to explore. + +Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. + +## 5. Issue specific details in DynamoDB + +Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. + +|Key |Type |Description |Example | +|-------------|:----:|----------------------------------|------------------------------------------------| +|`name` |string|RDS instance name |`test-rds-instances` | +|`engine` |string|Name of the database engine |`mysql` | +|`tags` |map |Tags associated with RDS instance |`{"Name": "TestInstance", "service": "archive"}`| \ No newline at end of file diff --git a/docs/pages/remediation_backup_rollback.md b/docs/pages/remediation_backup_rollback.md index d05fe010..2b5068f7 100644 --- a/docs/pages/remediation_backup_rollback.md +++ b/docs/pages/remediation_backup_rollback.md @@ -27,6 +27,7 @@ The following table gives an overview of Dow Jones Hammer remediation functional |[SQS Queue Public Access](playbook10_sqs_public_policy.html#3-issue-remediation) | Yes | Yes | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html#3-issue-remediation) | Yes | Yes | |[RDS Unencrypted instances](playbook12_rds_unencryption.html#3-issue-remediation) | `No` | `No` | +|[Elasticsearch Domains Unencrypted Domains](playbook21_elasticsearch_unencryption.html#3-issue-remediation) | `No` | `No` | ## 2. How Remediation Backup Works diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index da08a174..52146849 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -1,9 +1,14 @@ +import json import logging +import pathlib +from datetime import datetime, timezone from botocore.exceptions import ClientError from collections import namedtuple from library.utility import timeit +from library.utility import jsonDumps from library.aws.utility import convert_tags +from library.aws.s3 import S3Operations # structure which describes Elastic search domains ElasticSearchDomain_Details = namedtuple('ElasticSearchDomain', [ @@ -16,30 +21,6 @@ ]) -class ESDomainDetails(object): - """ - Basic class for ElasticSearch domain details. - - """ - - def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted=None): - """ - :param account: `Account` instance where ECS task definition is present - - :param name: name of the task definition - :param arn: arn of the task definition - :param arn: tags of task definition. - :param is_logging: logging enabled or not. - """ - self.account = account - self.name = name - self.id = id - self.arn = arn - self.is_logging = is_logging - self.encrypted = encrypted - self.tags = convert_tags(tags) - - class ElasticSearchOperations: @classmethod @timeit @@ -69,11 +50,225 @@ def get_elasticsearch_details_of_sg_associated(cls, elasticsearch_client, group_ return domains_list + @staticmethod + def put_domain_policy(es_client, domain_name, policy): + """ + Replaces a policy on a domain. If the domain already has a policy, the one in this request completely replaces it. + + :param es_client: Elasticsearch boto3 client + :param domain_name: Elasticsearch domain where to update policy on + :param policy: `dict` or `str` with policy. `Dict` will be transformed to string using pretty json.dumps(). + + :return: nothing + """ + policy_json = jsonDumps(policy) if isinstance(policy, dict) else policy + es_client.update_elasticsearch_domain_config( + DomainName=domain_name, + AccessPolicies=policy_json, + ) + + @staticmethod + def retrieve_loggroup_arn(cw_client, domain_log_group_name): + """ + This method used to retrieve cloud watch log group arn details if log group is available. If not, create a + cloudwatch log group and returns arn of newly created log group + + :param cw_client: cloudwatch logs boto3 client + :param domain_log_group_name: Elasticsearch domain's log group name + :return: + """ + log_groups = cw_client.describe_log_groups() + + log_group_arn = None + for log_group in log_groups["logGroups"]: + log_group_name = log_group["logGroupName"] + if log_group_name == domain_log_group_name: + log_group_arn = log_group["arn"] + + if log_group_arn: + """ + In order to successfully deliver the logs to your CloudWatch Logs log group, + Amazon Elasticsearch Service (AES) will need access to two CloudWatch Logs API calls: + 1. CreateLogStream: Create a CloudWatch Logs log stream for the log group you specified + 2. PutLogEvents: Deliver CloudTrail events to the CloudWatch Logs log stream + + Adding resource policy that grants above access. + """ + policy_name = "AES-"+domain_log_group_name+"-Application-logs" + policy_doc = {} + statement = {} + principal = {} + action = [] + principal["Service"] = "es.amazonaws.com" + action.append("logs:PutLogEvents") + action.append("logs:CreateLogStream") + statement["Effect"] = "Allow" + statement["Principal"] = principal + statement["Action"] = action + statement["Resource"] = log_group_arn + + policy_doc["Statement"] = statement + + cw_client.put_resource_policy( + policyName=policy_name, + policyDocument=str(json.dumps(policy_doc)) + ) + return log_group_arn + + @staticmethod + def set_domain_logging(es_client, cw_client, domain_name): + """ + + :param es_client: elastic search boto3 client + :param cw_client: cloudwatch logs boto3 client + :param domain_name: elastic search domain name + :return: + """ + domain_log_group_name = "/aws/aes/domains/" + domain_name + "/application-logs" + log_group_arn = ElasticSearchOperations.retrieve_loggroup_arn(cw_client, domain_log_group_name) + if not log_group_arn: + cw_client.create_log_group(logGroupName=domain_log_group_name) + log_group_arn = ElasticSearchOperations.retrieve_loggroup_arn(cw_client, domain_log_group_name) + + es_client.update_elasticsearch_domain_config( + DomainName=domain_name, + LogPublishingOptions={ + 'ES_APPLICATION_LOGS': + { + 'CloudWatchLogsLogGroupArn': log_group_arn, + 'Enabled': True + } + } + ) + + @classmethod + def validate_access_policy(cls, policy_details): + """ + + :param policy_details: + :return: + """ + public_policy = False + for statement in policy_details.get("Statement", []): + effect = statement['Effect'] + principal = statement.get('Principal', {}) + not_principal = statement.get('NotPrincipal', None) + condition = statement.get('Condition', None) + suffix = "/0" + # check both `Principal` - `{"AWS": "*"}` and `"*"` + # and condition (if exists) to be restricted (not "0.0.0.0/0") + if effect == "Allow" and \ + (principal == "*" or principal.get("AWS") == "*"): + if condition is not None: + if suffix in str(condition.get("IpAddress")): + return True + else: + return True + if effect == "Allow" and \ + not_principal is not None: + # TODO: it is not recommended to use `Allow` with `NotPrincipal`, need to write proper check for such case + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_notprincipal.html + logging.error(f"TODO: is this statement public???\n{statement}") + return False + + return public_policy + + +class ESDomainDetails(object): + """ + Basic class for ElasticSearch domain details. + + """ + + def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted=None, policy=None): + """ + :param account: `Account` instance where ECS task definition is present + + :param name: name of the task definition + :param arn: arn of the task definition + :param arn: tags of task definition. + :param is_logging: logging enabled or not. + """ + self.account = account + self.name = name + self.id = id + self.arn = arn + self.is_logging = is_logging + self.encrypted = encrypted + self._policy = json.loads(policy) if policy else {} + self.backup_filename = pathlib.Path(f"{self.name}.json") + self.tags = convert_tags(tags) + + @property + def policy(self): + """ + :return: pretty formatted string with S3 bucket policy + """ + return jsonDumps(self._policy) + + @property + def public(self): + """ + :return: boolean, True - if Elasticsearch domain policy allows public access + False - otherwise + """ + return ElasticSearchOperations.validate_access_policy(self._policy) + + def backup_policy_s3(self, s3_client, bucket): + """ + Backup Elasticsearch policy json to S3. + + :param s3_client: S3 boto3 client + :param bucket: S3 bucket name where to put backup of S3 bucket policy + + :return: S3 path (without bucket name) to saved object with elasticsearch domain policy backup + """ + timestamp = datetime.now(timezone.utc).isoformat('T', 'seconds') + path = (f"queue_policies/" + f"{self.account.id}/" + f"{self.backup_filename.stem}_{timestamp}" + f"{self.backup_filename.suffix}") + if S3Operations.object_exists(s3_client, bucket, path): + raise Exception(f"s3://{bucket}/{path} already exists") + S3Operations.put_object(s3_client, bucket, path, self.policy) + return path + + def restrict_policy(self): + """ + Restrict and replace current policy on domain. + + :return: nothing + + .. note:: This keeps self._policy unchanged. + You need to recheck Elasticsearch domain policy to ensure that it was really restricted. + """ + restricted_policy = S3Operations.restrict_policy(self._policy) + try: + ElasticSearchOperations.put_domain_policy(self.account.client("es"), self.name, restricted_policy) + except Exception: + logging.exception(f"Failed to put {self.name} restricted policy") + return False + + return True + + def set_logging(self): + """ + + :return: + """ + try: + ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), self.name) + except Exception: + logging.exception(f"Failed to enable {self.name} logging") + return False + + return True + class ESDomainChecker: """ - Basic class for checking EBS snapshots in account/region. - Encapsulates discovered EBS snapshots. + Basic class for checking Elasticsearch unencrypted and logging issues in account/region. + Encapsulates discovered Elasticsearch domains. """ def __init__(self, account): @@ -85,10 +280,10 @@ def __init__(self, account): def get_domain(self, id): """ - :return: `EBSSnapshot` by id + :return: `Elasticsearch Domain` by id """ for domain in self.domains: - if domain.id == id: + if domain.name == id: return domain return None @@ -121,9 +316,9 @@ def check(self, ids=None): logging.exception(f"Failed to describe elasticsearch domains in {self.account}") return False - domain_encrypted = False - is_logging = False for domain_detail in domain_details: + is_logging = False + domain_encrypted = False domain_name = domain_detail["DomainName"] domain_id = domain_detail["DomainId"] domain_arn = domain_detail["ARN"] @@ -136,17 +331,26 @@ def check(self, ids=None): logging_details = domain_detail.get("LogPublishingOptions") - if logging_details and logging_details["Options"]: - is_logging = True + if logging_details: + index_logs = logging_details.get("INDEX_SLOW_LOGS") + search_logs = logging_details.get("SEARCH_SLOW_LOGS") + error_logs = logging_details.get("ES_APPLICATION_LOGS") + if (index_logs and index_logs["Enabled"]) \ + or (search_logs and search_logs["Enabled"]) \ + or (error_logs and error_logs["Enabled"]): + is_logging = True tags = es_client.list_tags(ARN=domain_arn)["TagList"] + access_policy = domain_detail.get("AccessPolicies") + domain = ESDomainDetails(self.account, name=domain_name, id=domain_id, arn=domain_arn, tags=tags, is_logging=is_logging, - encrypted=domain_encrypted) + encrypted=domain_encrypted, + policy=access_policy) self.domains.append(domain) - return True + return True \ No newline at end of file diff --git a/hammer/reporting-remediation/analytics/security_issues_csv_report.py b/hammer/reporting-remediation/analytics/security_issues_csv_report.py index 321eeee9..5443e1f2 100755 --- a/hammer/reporting-remediation/analytics/security_issues_csv_report.py +++ b/hammer/reporting-remediation/analytics/security_issues_csv_report.py @@ -8,7 +8,9 @@ from library.aws.utility import AssumeRole from library.config import Config from library.ddb_issues import Operations as IssueOperations -from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, SQSPolicyIssue +from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, \ + IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, SQSPolicyIssue, \ + ESEncryptionIssue from analytics.add_excel_sheet_records import AddRecordsToSheet from library.slack_utility import SlackNotification from library.aws.s3 import S3Operations @@ -70,6 +72,7 @@ def generate(self): (self.config.cloudtrails.ddb_table_name, "CloudTrail Logging Issues", CloudTrailIssue), (self.config.rdsSnapshot.ddb_table_name, "RDS Public Snapshots", RdsPublicSnapshotIssue), (self.config.sqspolicy.ddb_table_name, "SQS Policy Public Access", SQSPolicyIssue), + (self.config.esEncrypt.ddb_table_name, "Elasticsearch Encryption Issue", ESEncryptionIssue) ] open_security_issues_workbook = xlwt.Workbook() diff --git a/hammer/reporting-remediation/cronjobs/automation_scheduler.py b/hammer/reporting-remediation/cronjobs/automation_scheduler.py index f473312f..5f95f1f6 100755 --- a/hammer/reporting-remediation/cronjobs/automation_scheduler.py +++ b/hammer/reporting-remediation/cronjobs/automation_scheduler.py @@ -57,6 +57,7 @@ def automation_cronjob(config): ("SQS Public Access", config.sqspolicy, "create_sqs_policy_issue_tickets", "clean_sqs_policy_permissions"), ("S3 Unencrypted Buckets", config.s3Encrypt, "create_s3_unencrypted_bucket_issue_tickets", "clean_s3bucket_unencrypted"), ("RDS Unencrypted Instances", config.rdsEncrypt, "create_rds_unencrypted_instance_issue_tickets", None), + ("Elasticsearch Unencrypted Domains", config.esEncrypt, "create_elasticsearch_unencrypted_issue_tickets", None) ] for title, module_config, reporting_script, remediation_script in modules: From 25300816ac53576d31aaf103cd13231f761d6a29 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 25 Jun 2019 12:16:41 +0530 Subject: [PATCH 053/193] Updated with ECS image source review comment changes. Updated with ECS image source review comment changes. --- deployment/cf-templates/ddb.json | 5 +- .../cf-templates/identification-nested.json | 267 ++ deployment/cf-templates/identification.json | 3402 +++-------------- .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 7 +- ...scribe_ecs_external_image_source_issues.py | 21 +- hammer/library/aws/ecs.py | 26 +- ...ecs_external_image_source_issue_tickets.py | 11 +- 8 files changed, 838 insertions(+), 2905 deletions(-) create mode 100644 deployment/cf-templates/identification-nested.json diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index de1739a1..e87ad9b4 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -24,7 +24,7 @@ } ], "ProvisionedThroughput": { - "ReadCapacityUnits": "10", + "ReadCapacityUnits": "25", "WriteCapacityUnits": "2" }, "SSESpecification": { @@ -330,7 +330,6 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-public-snapshots" ] ]} } }, - "DynamoDBSQSPublicPolicy": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", @@ -363,7 +362,6 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "sqs-public-access" ] ]} } }, - "DynamoDBS3Unencrypted": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", @@ -396,7 +394,6 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "s3-unencrypted" ] ]} } }, - "DynamoDBRDSUnencrypted": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", diff --git a/deployment/cf-templates/identification-nested.json b/deployment/cf-templates/identification-nested.json new file mode 100644 index 00000000..53d2fd81 --- /dev/null +++ b/deployment/cf-templates/identification-nested.json @@ -0,0 +1,267 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Hammer identification child stack", + "Parameters": { + "SourceS3Bucket": { + "Type": "String", + "Default": "" + }, + "IdentificationIAMRole": { + "Type": "String", + "Default": "cloudsec-master-id" + }, + "IdentificationCheckRateExpression": { + "Type": "String" + }, + "LambdaSubnets": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas in default system-managed VPC (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "LambdaSecurityGroups": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas with default access rules (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "IdentificationLambdaSource": { + "Type": "String", + "Default": "sg-issues-identification.zip" + }, + "InitiateLambdaDescription": { + "Type": "String", + "Default": "Lambda that triggers the process of issues identification" + }, + "EvaluateLambdaDescription": { + "Type": "String", + "Default": "Lambda that performs issues identification" + }, + "InitiateLambdaName": { + "Type": "String" + }, + "EvaluateLambdaName": { + "Type": "String" + }, + "InitiateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaMemorySize": { + "Type": "String", + "Default": "256" + }, + "LambdaLogsForwarderArn": { + "Type": "String" + }, + "EventRuleDescription": { + "Type": "String", + "Default": "Triggers initiate lambda" + }, + "EventRuleName": { + "Type": "String" + }, + "SNSDisplayName": { + "Type": "String" + }, + "SNSTopicName": { + "Type": "String" + }, + "SNSIdentificationErrors": { + "Type": "String" + } + }, + "Conditions": { + "LambdaSubnetsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSubnets"}, "" ] + }, + "LambdaSecurityGroupsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSecurityGroups"}, "" ] + } + }, + "Resources": { + "LambdaInitiateEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LogGroupLambdaInitiateEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "Environment": { + "Variables": { + "SNS_ARN": { "Ref": "SNSNotifyLambdaEvaluate" } + } + }, + "Description": { "Ref": "InitiateLambdaDescription" }, + "FunctionName": { "Ref": "InitiateLambdaName" }, + "Handler": {"Ref": "InitiateLambdaHandler"}, + "MemorySize": 128, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "InitiateLambdaName" } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaInitiateEvaluation"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEvaluation" } + } + }, + "LambdaEvaluate": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "VpcConfig": { + "SecurityGroupIds": { + "Fn::If": [ + "LambdaSecurityGroupsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } + ] + }, + "SubnetIds": { + "Fn::If": [ + "LambdaSubnetsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } + ] + } + }, + "Description": {"Ref": "EvaluateLambdaDescription"}, + "FunctionName": { "Ref": "EvaluateLambdaName" }, + "Handler": {"Ref": "EvaluateLambdaHandler"}, + "MemorySize": {"Ref": "EvaluateLambdaMemorySize"}, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluate": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "EvaluateLambdaName"} ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaLambdaEvaluate": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluate" } + } + }, + "EventInitiateEvaluation": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "Description": {"Ref": "EventRuleDescription"}, + "Name": {"Ref": "EventRuleName"}, + "ScheduleExpression": { "Ref": "IdentificationCheckRateExpression" }, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateEvaluation", "Arn"] }, + "Id": {"Ref": "LambdaInitiateEvaluation"} + } + ] + } + }, + "PermissionToInvokeLambdaInitiateEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateEvaluation", "EventInitiateEvaluation"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluation", "Arn"] } + } + }, + "SNSNotifyLambdaEvaluate": { + "Type": "AWS::SNS::Topic", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "DisplayName": { "Ref": "SNSDisplayName" }, + "TopicName": { "Ref": "SNSTopicName" }, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluate", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, + "PermissionToInvokeLambdaEvaluateSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LambdaEvaluate"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluate" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluate", "Arn"] } + } + }, + "AlarmErrorsLambdaInitiateEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluate" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluate" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + } + } +} diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 0650ee75..40ec92e3 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -110,6 +110,10 @@ "Type": "String", "Default": "" }, + "NestedStackTemplate": { + "Type": "String", + "Default": "" + }, "IdentificationIAMRole": { "Type": "String", "Default": "cloudsec-master-id" @@ -331,7 +335,8 @@ "value": "describe-rds-encryption-lambda" }, "SNSDisplayNameECSExternalImageSource": { - "value": "describe-ecs-external-image-source-sns"}, + "value": "describe-ecs-external-image-source-sns" + }, "SNSTopicNameECSExternalImageSource": { "value": "describe-ecs-external-image-source-lambda" }, @@ -511,3007 +516,654 @@ "LogGroupName" : { "Ref": "LogGroupLambdaBackupDDB" } } }, - "LambdaInitiateSGEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LogGroupLambdaInitiateSGEvaluation"], + "EventBackupDDB": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "Environment": { - "Variables": { - "SNS_SG_ARN": { "Ref": "SNSNotifyLambdaEvaluateSG" } - } - }, - "Description": "Lambda function for initiate to identify bad security groups", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_sec_grps.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "Description": "Hammer ScheduledRule for DDB tables backup", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, + "ScheduleExpression": "rate(1 day)", + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, + "Id": "LambdaBackupDDB" + } + ] } }, - "LogGroupLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaLogsForwarder"], + "Properties": { + "FunctionName": { "Ref": "LambdaLogsForwarder" }, + "Action": "lambda:InvokeFunction", + "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, + "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} } }, - "SubscriptionFilterLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSGEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSGEvaluation" } + "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], + "Properties": { + "FunctionName": { "Ref": "LambdaBackupDDB" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } } }, - "LambdaEvaluateSG": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSG"], + "SNSIdentificationErrors": { + "Type": "AWS::SNS::Topic", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe security groups unrestricted access.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_sec_grps_unrestricted_access.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] + ]} } }, - "LogGroupLambdaEvaluateSG": { - "Type" : "AWS::Logs::LogGroup", + "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { + "Type" : "AWS::SNS::Subscription", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "Protocol" : "lambda", + "TopicArn" : { "Ref": "SNSIdentificationErrors" } } }, - "SubscriptionFilterLambdaLambdaEvaluateSG": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSG"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSG" } + "PermissionToInvokeLambdaLogsForwarderSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSIdentificationErrors" }, + "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } } }, - "LambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LogGroupLambdaInitiateCloudTrailsEvaluation"], + "AlarmErrorsLambdaBackupDDB": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "Environment": { - "Variables": { - "SNS_CLOUDTRAILS_ARN": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" } + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaBackupDDB" } } - }, - "Description": "Lambda function for initiate identification of CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_cloudtrails.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + ], + "Period": 86400, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "StackEvaluateSG": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateCloudTrailsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateCloudTrailsEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": {"Ref": "SourceIdentificationSG"}, + "InitiateLambdaDescription": "Lambda function for initiate to identify bad security groups", + "EvaluateLambdaDescription": "Lambda function to describe security groups unrestricted access.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sec_grps.lambda_handler", + "EvaluateLambdaHandler": "describe_sec_grps_unrestricted_access.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Security Groups evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateCloudTrails": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateCloudTrails"], + "StackEvaluateCloudTrails": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_cloudtrails.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateCloudTrails"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateCloudTrails" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationCloudTrails" }, + "InitiateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "EvaluateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_cloudtrails.lambda_handler", + "EvaluateLambdaHandler": "describe_cloudtrails.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate CloudTrails evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateS3ACLEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3ACL", "LogGroupLambdaInitiateS3ACLEvaluation"], + "StackEvaluateS3ACL": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "Environment": { - "Variables": { - "SNS_S3_ACL_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3ACLEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3ACLEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3ACL" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_acl.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_acl.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 ACL evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3ACL"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateS3ACL": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3ACL"], + "StackEvaluateS3Policy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3ACL"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3ACL" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Policy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_policy.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 Policy evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Policy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LogGroupLambdaInitiateS3PolicyEvaluation"], + "StackEvaluateIAMUserKeysRotation": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "Environment": { - "Variables": { - "SNS_S3_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3PolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3PolicyEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserKeysRotation" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which to be rotate.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys to be rotated.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_key_rotation.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserKeysRotation evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserKeysRotation"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LambdaEvaluateS3Policy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Policy"], + "StackEvaluateIAMUserInactiveKeys": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Policy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Policy" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserInactiveKeys" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which last used.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys last used.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_access_keys.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_accesskey_details.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserInactiveKeys evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserInactiveKeys"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], + "StackEvaluateEBSVolumes": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_KEYS_ROTATION_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which to be rotate.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSVolumes" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted EBS volumes.", + "EvaluateLambdaDescription": "Lambda function to describe unencrypted ebs volumes.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS volumes evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserKeysRotation"], + "StackEvaluateEBSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys to be rotated.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserKeysRotation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserKeysRotation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public EBS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public ebs snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], + "StackEvaluateRDSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_INACTIVE_KEYS_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_access_keys.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public RDS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public RDS snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_rds_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate RDS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserInactiveKeys"], + "StackEvaluateSQSPublicPolicy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_accesskey_details.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationSQSPublicPolicy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public SQS queues.", + "EvaluateLambdaDescription": "Lambda function to describe public SQS queues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sqs_public_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_sqs_public_policy.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate SQS queue evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "SubscriptionFilterLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserInactiveKeys"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserInactiveKeys" } + "StackEvaluateS3Encryption": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Encryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify S3 unencrypted buckets.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted S3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Encryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LogGroupLambdaInitiateEBSVolumesEvaluation"], + "StackEvaluateRDSEncryption": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "Environment": { - "Variables": { - "SNS_EBS_VOLUMES_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted EBS volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSEncryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted RDS instances.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted RDS instances.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_rds_instance_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_instance_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate rds instance encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LogGroupLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSVolumesEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSVolumesEvaluation" } - } - }, - - "LambdaEvaluateEBSVolumes": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSVolumes"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe unencrypted ebs volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSVolumes"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSVolumes" } - } - }, - "LambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], + "StackEvaluateAmiPublicAccess": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_EBS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public EBS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSSnapshotsEvaluation" } - } - }, - "LambdaEvaluateEBSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSSnapshots"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public ebs snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "45 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationAMIPublicAccess" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public AMI access issues.", + "EvaluateLambdaDescription": "Lambda function to describe public AMI issues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_ebs_public_snapshots.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSSnapshots" } - } - }, - "LambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_RDS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public RDS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_rds_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSSnapshotsEvaluation" } + "InitiateLambdaHandler": "initiate_to_desc_public_ami_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_public_ami_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate public AMI access evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LambdaEvaluateRDSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSSnapshots"], + "StackEvaluateECSExternalImageSource": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public rds snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_rds_public_snapshots.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSSnapshots" } - } - }, - "LambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "Environment": { - "Variables": { - "SNS_SQS_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" } - } - }, - "Description": "Lambda function for initiate to identify public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationECSExternalImageSource" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify ECS image source is internal or external.", + "EvaluateLambdaDescription": "Lambda function to describe ECS image source is internal or external.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateECSExternalImageSourceLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_sqs_public_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSQSPublicPolicyEvaluation" } - } - }, - "LambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSQSPublicPolicy"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyECSExternalImageSourceLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_sqs_public_policy.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSQSPublicPolicy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSQSPublicPolicy" } - } - }, - "LambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Environment": { - "Variables": { - "SNS_S3_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" } - } - }, - "Description": "Lambda function for initiate to identify S3 unencrypted buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3EncryptionEvaluation" } - } - }, - "LambdaEvaluateS3Encryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Encryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Description": "Lambda function to describe un-encrypted S3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Encryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Encryption" } - } - }, - "LambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Environment": { - "Variables": { - "SNS_RDS_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_rds_instance_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSEncryptionEvaluation" } - } - }, - "LambdaEvaluateRDSEncryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSEncryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Description": "Lambda function to describe un-encrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_rds_instance_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSEncryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSEncryption" } - } - }, - "LambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Environment": { - "Variables": { - "SNS_PUBLIC_AMI_ARN": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" } - } - }, - "Description": "Lambda function for initiate to identify public AMI access issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_public_ami_issues.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } - } - }, - "LambdaEvaluateAMIPublicAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Description": "Lambda function to describe public AMI issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_public_ami_issues.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } - } - }, - "LambdaInitiateECSExternalImageSourceEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateECSExternalImageSource", "LogGroupLambdaInitiateECSExternalImageSourceEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationECSExternalImageSource" } - }, - "Environment": { - "Variables": { - "SNS_ECS_EXTERNAL_IMAGE_ARN": { "Ref": "SNSNotifyLambdaEvaluateECSExternalImageSource" } - } - }, - "Description": "Lambda function for initiate to identify ECS task definition image source.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateECSExternalImageSourceLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ecs_external_image_source_issues.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateECSExternalImageSourceEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateECSExternalImageSourceLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateECSExternalImageSourceEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateECSExternalImageSourceEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateECSExternalImageSourceEvaluation" } - } - }, - "LambdaEvaluateECSExternalImageSource": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateECSExternalImageSource"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationECSExternalImageSource" } - }, - "Description": "Lambda function to describe ECS task definitions image source ", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyECSExternalImageSourceLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_ecs_external_image_source_issues.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateECSExternalImageSource": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyECSExternalImageSourceLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateECSExternalImageSource": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateECSExternalImageSource"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateECSExternalImageSource" } - } - }, - "EventBackupDDB": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaBackupDDB"], - "Properties": { - "Description": "Hammer ScheduledRule for DDB tables backup", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, - "ScheduleExpression": "rate(1 day)", - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, - "Id": "LambdaBackupDDB" - } - ] - } - }, - "EventInitiateEvaluationS3IAM": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", - "LambdaInitiateIAMUserInactiveKeysEvaluation", - "LambdaInitiateS3EncryptionEvaluation", - "LambdaInitiateS3ACLEvaluation", - "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate S3 and IAM evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3IAM"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserKeysRotationEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserKeysRotationEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserInactiveKeysEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3EncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3EncryptionEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3ACLEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3ACLEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3PolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3PolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationCloudTrails": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate CloudTrails evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateCloudTrailsEvaluation", "Arn"] }, - "Id": "LambdaInitiateCloudTrailsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSVolumes": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS volumes evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSVolumesEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSVolumesEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate RDS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSG": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSGEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate Security Groups evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSGEvaluation", "Arn"] }, - "Id": "LambdaInitiateSGEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSQSPublicPolicy": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate SQS queue evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSQSPublicPolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateSQSPublicPolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSEncryption": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate rds instance encryption evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSEncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSEncryptionEvaluation" - } - ] - } - }, - "EventInitiateEvaluationAMIPublicAccess": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate public AMI access evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateAMIPublicAccessEvaluation", "Arn"] }, - "Id": "LambdaInitiateAMIPublicAccessEvaluation" - } - ] - } - }, - "EventInitiateEvaluationECSExternalImageSource": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateECSExternalImageSourceEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate ECS task definition image source evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSExternalImageSource"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateECSExternalImageSourceEvaluation", "Arn"] }, - "Id": "LambdaInitiateECSExternalImageSourceEvaluation" - } - ] - } - }, - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaLogsForwarder"], - "Properties": { - "FunctionName": { "Ref": "LambdaLogsForwarder" }, - "Action": "lambda:InvokeFunction", - "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, - "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} - } - }, - "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], - "Properties": { - "FunctionName": { "Ref": "LambdaBackupDDB" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSGEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSGEvaluation", "EventInitiateEvaluationSG"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSGEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateCloudTrailsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation", "EventInitiateEvaluationCloudTrails"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3ACLEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3ACLEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3ACLEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3PolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3PolicyEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3PolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserKeysRotationEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { - "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] - } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserInactiveKeysEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSVolumesEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation", "EventInitiateEvaluationEBSVolumes"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation", "EventInitiateEvaluationEBSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation", "EventInitiateEvaluationRDSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSQSPublicPolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation", "EventInitiateEvaluationSQSPublicPolicy"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3EncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3EncryptionEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSEncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation", "EventInitiateEvaluationRDSEncryption"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSEncryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateAMIPublicAccessEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation", "EventInitiateEvaluationAMIPublicAccess"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateECSExternalImageSourceEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateECSExternalImageSourceEvaluation", "EventInitiateEvaluationECSExternalImageSource"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateECSExternalImageSourceEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationECSExternalImageSource", "Arn"] } - } - }, - "SNSNotifyLambdaEvaluateSG": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateSG"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateCloudTrails": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateCloudTrails"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3ACL": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3ACL"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Policy": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3Policy"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSVolumes": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSVolumes"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSSnapshots"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSSnapshots", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateSQSPublicPolicy", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Encryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateS3Encryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSEncryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSEncryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateAMIPublicAccess": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateAMIPublicAccess", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateECSExternalImageSource": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateECSExternalImageSource", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSExternalImageSource", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSExternalImageSource", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateECSExternalImageSource", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "PermissionToInvokeLambdaEvaluateSgSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSG" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateCloudTrailsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LambdaEvaluateCloudTrails"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3AclSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": "SNSNotifyLambdaEvaluateS3ACL", - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3PolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LambdaEvaluateS3Policy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserKeysRotationSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserInactiveKeysSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSVolumesSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LambdaEvaluateEBSVolumes"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateSQSPublicPolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3EncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LambdaEvaluateS3Encryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSEncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LambdaEvaluateRDSEncryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateAMIPublicAccessSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } - - } - }, - "PermissionToInvokeLambdaEvaluateECSExternalImageSourceSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateECSExternalImageSource", "LambdaEvaluateECSExternalImageSource"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateECSExternalImageSource" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateECSExternalImageSource", "Arn"] } - } - }, - "SNSIdentificationErrors": { - "Type": "AWS::SNS::Topic", - "Properties": { - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] - ]} - } - }, - "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { - "Type" : "AWS::SNS::Subscription", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties" : { - "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "Protocol" : "lambda", - "TopicArn" : { "Ref": "SNSIdentificationErrors" } - } - }, - "PermissionToInvokeLambdaLogsForwarderSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSIdentificationErrors" }, - "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } - } - }, - "AlarmErrorsLambdaBackupDDB": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaBackupDDB" } - } - ], - "Period": 86400, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSGEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSGEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSGEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSG"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSG" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSG" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateCloudTrailsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEvaluateCloudTrails": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateCloudTrails"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateCloudTrails" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateCloudTrails" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3ACLEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3ACLEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3ACLEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3ACL"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3ACL" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3ACL" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3PolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3PolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Policy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Policy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Policy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserKeysRotation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserKeysRotation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserInactiveKeys" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserInactiveKeys" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSVolumesEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSVolumes"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSVolumes" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSVolumes" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3EncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSQSPublicPolicy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSQSPublicPolicy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Encryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Encryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Encryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSEncryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSEncryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSEncryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateAMIPublicAccess" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateAMIPublicAccess" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateECSExternalImageSourceEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateECSExternalImageSourceEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateECSExternalImageSourceEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateECSExternalImageSourceEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaECSExternalImageSourceEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateECSExternalImageSource"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateECSExternalImageSource" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateECSExternalImageSource" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" + "InitiateLambdaHandler": "initiate_to_desc_ecs_external_image_source_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_ecs_external_image_source_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate ECS image source evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSExternalImageSource"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSExternalImageSource", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSExternalImageSource", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } } }, "Outputs": { "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} } -} +} \ No newline at end of file diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 9ec1cb9e..42f87319 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -1,7 +1,8 @@ resource "aws_cloudformation_stack" "identification" { - name = "hammer-identification" + name = "hammer-identification-main" depends_on = [ "aws_s3_bucket_object.identification-cfn", + "aws_s3_bucket_object.identification-nested-cfn", "aws_s3_bucket_object.logs-forwarder", "aws_s3_bucket_object.ddb-tables-backup", "aws_s3_bucket_object.sg-issues-identification", @@ -23,6 +24,7 @@ resource "aws_cloudformation_stack" "identification" { parameters { SourceS3Bucket = "${var.s3bucket}" + NestedStackTemplate = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-nested-cfn.id}" ResourcesPrefix = "${var.resources-prefix}" IdentificationIAMRole = "${var.identificationIAMRole}" IdentificationCheckRateExpression = "${var.identificationCheckRateExpression}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index e4539e91..3ac63507 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -4,6 +4,12 @@ resource "aws_s3_bucket_object" "identification-cfn" { source = "${path.module}/../../../cf-templates/identification.json" } +resource "aws_s3_bucket_object" "identification-nested-cfn" { + bucket = "${var.s3bucket}" + key = "cfn/${format("identification-nested-%s.json", "${md5(file("${path.module}/../../../cf-templates/identification-nested.json"))}")}" + source = "${path.module}/../../../cf-templates/identification-nested.json" +} + resource "aws_s3_bucket_object" "logs-forwarder" { bucket = "${var.s3bucket}" key = "lambda/${format("logs-forwarder-%s.zip", "${md5(file("${path.module}/../../../packages/logs-forwarder.zip"))}")}" @@ -79,7 +85,6 @@ resource "aws_s3_bucket_object" "sqs-public-policy-identification" { key = "lambda/${format("sqs-public-policy-identification-%s.zip", "${md5(file("${path.module}/../../../packages/sqs-public-policy-identification.zip"))}")}" source = "${path.module}/../../../packages/sqs-public-policy-identification.zip" } - resource "aws_s3_bucket_object" "s3-unencrypted-bucket-issues-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("s3-unencrypted-bucket-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/s3-unencrypted-bucket-issues-identification.zip"))}")}" diff --git a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py index 82fa662e..2a93a799 100644 --- a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py +++ b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py @@ -4,7 +4,7 @@ from library.logger import set_logging from library.config import Config from library.aws.ecs import ECSChecker -from library.aws.utility import Account +from library.aws.utility import Account, DDB from library.ddb_issues import IssueStatus, ECSExternalImageSourceIssue from library.ddb_issues import Operations as IssueOperations from library.aws.utility import Sns @@ -20,7 +20,8 @@ def lambda_handler(event, context): account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() - # region = payload['region'] + # if request_id is present in payload then this lambda was called from the API + request_id = payload.get('request_id', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -55,8 +56,7 @@ def lambda_handler(event, context): issue = ECSExternalImageSourceIssue(account_id, task_definition.name) issue.issue_details.arn = task_definition.arn issue.issue_details.tags = task_definition.tags - issue.issue_details.container_name = task_definition.container_name - issue.issue_details.image_url = task_definition.image_url + issue.issue_details.container_image_details = task_definition.container_image_details issue.issue_details.region = task_definition.account.region if config.ecs_external_image_source.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted @@ -68,10 +68,15 @@ def lambda_handler(event, context): # as we already checked it open_issues.pop(task_definition.name, None) - logging.debug(f"ECS task definitions in DDB:\n{open_issues.keys()}") - # all other unresolved issues in DDB are for removed/remediated task definitions - for issue in open_issues.values(): - IssueOperations.set_status_resolved(ddb_table, issue) + logging.debug(f"ECS task definitions in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated task definitions + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + + # track the progress of API request to scan specific account/region/feature + if request_id: + api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) + DDB.track_progress(api_table, request_id) except Exception: logging.exception(f"Failed to check ECS task definitions for '{account_id} ({account_name})'") return diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index fd80c5d2..e9bb275f 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -64,8 +64,8 @@ class ECSTaskDefinitions(object): """ - def __init__(self, account, name, arn, tags, container_name=None, image_url= None, is_logging=None, is_privileged=None, - external_image=None): + def __init__(self, account, name, arn, tags, is_logging=None, disabled_logging_container_names=None, + is_privileged=None, privileged_container_names=None, external_image=None, container_image_details=None): """ :param account: `Account` instance where ECS task definition is present @@ -79,10 +79,11 @@ def __init__(self, account, name, arn, tags, container_name=None, image_url= Non self.arn = arn self.tags = convert_tags(tags) self.is_logging = is_logging + self.disabled_logging_container_names = disabled_logging_container_names self.is_privileged = is_privileged + self.privileged_container_names = privileged_container_names self.external_image = external_image - self.container_name = container_name - self.image_url = image_url + self.container_image_details = container_image_details class ECSChecker(object): @@ -124,7 +125,9 @@ def check(self): logging_enabled = False external_image = False is_privileged = False - container_name = None + container_image_details = [] + disabled_logging_container_names = [] + privileged_container_names = [] try: task_definition = self.account.client("ecs").describe_task_definition( taskDefinition=task_definition_name @@ -135,6 +138,7 @@ def check(self): container_name = container_definition["name"] if container_definition.get('logConfiguration') is None: logging_enabled = False + disabled_logging_container_names.append(container_name) else: logging_enabled = True @@ -142,12 +146,17 @@ def check(self): if container_privileged_details is not None: if container_definition['privileged']: is_privileged = True + privileged_container_names.append(container_name) else: is_privileged = False image = container_definition.get('image') + image_details = {} if image is not None: if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: + image_details["container_name"] = container_name + image_details["image_url"] = image + container_image_details.append(image_details) external_image = True else: external_image = False @@ -158,11 +167,12 @@ def check(self): name=task_definition_name, arn=task_definition_arn, tags=tags, - container_name=container_name, - image_url=image, is_logging=logging_enabled, + disabled_logging_container_names=disabled_logging_container_names, is_privileged=is_privileged, - external_image=external_image + privileged_container_names=privileged_container_names, + external_image=external_image, + container_image_details=container_image_details, ) self.task_definitions.append(task_definition_details) except ClientError as err: diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py index 4ac1c6c0..9117cf59 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -36,8 +36,7 @@ def create_tickets_ecs_external_images(self): task_definition_name = issue.issue_id region = issue.issue_details.region tags = issue.issue_details.tags - container_name = issue.issue_details.container_name - image_url = issue.issue_details.image_url + container_image_details = issue.issue_details.container_image_details # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner @@ -53,7 +52,7 @@ def create_tickets_ecs_external_images(self): # Adding label with "whitelisted" to jira ticket. jira.add_label( ticket_id=issue.jira_details.ticket, - labels=IssueStatus.Whitelisted + label=IssueStatus.Whitelisted.value ) jira.close_issue( ticket_id=issue.jira_details.ticket, @@ -99,14 +98,10 @@ def create_tickets_ecs_external_images(self): f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" f"*ECS Task Definition*: {task_definition_name}\n" - f"*ECS Task definition's Container Name*: {container_name}\n" f"*ECS container image Source*: External \n" - f"*ECS container image url*: {image_url} \n" + f"*ECS container image details*: {container_image_details} \n" ) - auto_remediation_date = (self.config.now + self.config.ecs_external_image_source.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" - issue_description += JiraOperations.build_tags_table(tags) issue_description += "\n" From 152205da3b82fa01df74b161a4ec65e10a7163ed Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 25 Jun 2019 12:19:27 +0530 Subject: [PATCH 054/193] Updated with ECS image source review comments changes. Updated with ECS image source review comments changes. --- .../initiate_to_desc_ecs_external_image_source_issues.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/initiate_to_desc_ecs_external_image_source_issues.py b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/initiate_to_desc_ecs_external_image_source_issues.py index 34e15859..7cf338ec 100644 --- a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/initiate_to_desc_ecs_external_image_source_issues.py +++ b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/initiate_to_desc_ecs_external_image_source_issues.py @@ -12,7 +12,7 @@ def lambda_handler(event, context): logging.debug("Initiating ECS task definitions' image source checking") try: - sns_arn = os.environ["SNS_ECS_EXTERNAL_IMAGE_ARN"] + sns_arn = os.environ["SNS_ARN"] config = Config() if not config.ecs_external_image_source.enabled: From 391d4c452c3a9cf82044e6bff1f16814ec6ebeb8 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 25 Jun 2019 12:22:15 +0530 Subject: [PATCH 055/193] Updated with ECS image source review comments changes. Updated with ECS image source review comments changes. --- .../reporting/create_ecs_external_image_source_issue_tickets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py index 9117cf59..67cace2e 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -92,7 +92,7 @@ def create_tickets_ecs_external_images(self): f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") issue_description = ( - f"The ECS image source taken from external.\n\n" + f"The ECS image source taken from external source.\n\n" f"*Risk*: High\n\n" f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" From 0829e66fd853549119b7685640b276a11c6fc09b Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 25 Jun 2019 13:17:40 +0530 Subject: [PATCH 056/193] Updated ECS issues review comments. Updated ECS issues review comments. --- hammer/library/aws/ecs.py | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index e9bb275f..ac7f05d7 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -122,9 +122,6 @@ def check(self): if "families" in response: for task_definition_name in response["families"]: tags = {} - logging_enabled = False - external_image = False - is_privileged = False container_image_details = [] disabled_logging_container_names = [] privileged_container_names = [] @@ -137,18 +134,11 @@ def check(self): for container_definition in task_definition['containerDefinitions']: container_name = container_definition["name"] if container_definition.get('logConfiguration') is None: - logging_enabled = False disabled_logging_container_names.append(container_name) - else: - logging_enabled = True - container_privileged_details = container_definition.get('privileged') - if container_privileged_details is not None: - if container_definition['privileged']: - is_privileged = True + if container_definition.get('privileged') is not None \ + and container_definition['privileged']: privileged_container_names.append(container_name) - else: - is_privileged = False image = container_definition.get('image') image_details = {} @@ -157,9 +147,21 @@ def check(self): image_details["container_name"] = container_name image_details["image_url"] = image container_image_details.append(image_details) - external_image = True - else: - external_image = False + + if len(disabled_logging_container_names) > 0: + logging_enabled = False + else: + logging_enabled = True + + if len(privileged_container_names) > 0: + is_privileged = True + else: + is_privileged = False + + if len(container_image_details) > 0: + external_image = True + else: + external_image = False if "Tags" in task_definition: tags = task_definition["Tags"] From 16ababe3b7dab28056db1ea49bd3528e98d9b271 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 25 Jun 2019 17:17:30 +0530 Subject: [PATCH 057/193] Updated with ECS privileged access issue review comments changes. Updated with ECS privileged access issue review comments changes. --- deployment/cf-templates/ddb.json | 3 +- .../cf-templates/identification-nested.json | 267 ++ deployment/cf-templates/identification.json | 3400 +++-------------- .../modules/identification/identification.tf | 6 +- .../modules/identification/sources.tf | 7 +- .../describe_ecs_privileged_access_issues.py | 19 +- ...te_to_desc_ecs_privileged_access_issues.py | 2 +- hammer/library/aws/ecs.py | 57 +- ...ate_ecs_privileged_access_issue_tickets.py | 16 +- 9 files changed, 859 insertions(+), 2918 deletions(-) create mode 100644 deployment/cf-templates/identification-nested.json diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 0c41eaec..f58d3bc8 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -24,7 +24,7 @@ } ], "ProvisionedThroughput": { - "ReadCapacityUnits": "10", + "ReadCapacityUnits": "25", "WriteCapacityUnits": "2" }, "SSESpecification": { @@ -330,7 +330,6 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-public-snapshots" ] ]} } }, - "DynamoDBSQSPublicPolicy": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", diff --git a/deployment/cf-templates/identification-nested.json b/deployment/cf-templates/identification-nested.json new file mode 100644 index 00000000..53d2fd81 --- /dev/null +++ b/deployment/cf-templates/identification-nested.json @@ -0,0 +1,267 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Hammer identification child stack", + "Parameters": { + "SourceS3Bucket": { + "Type": "String", + "Default": "" + }, + "IdentificationIAMRole": { + "Type": "String", + "Default": "cloudsec-master-id" + }, + "IdentificationCheckRateExpression": { + "Type": "String" + }, + "LambdaSubnets": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas in default system-managed VPC (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "LambdaSecurityGroups": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas with default access rules (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "IdentificationLambdaSource": { + "Type": "String", + "Default": "sg-issues-identification.zip" + }, + "InitiateLambdaDescription": { + "Type": "String", + "Default": "Lambda that triggers the process of issues identification" + }, + "EvaluateLambdaDescription": { + "Type": "String", + "Default": "Lambda that performs issues identification" + }, + "InitiateLambdaName": { + "Type": "String" + }, + "EvaluateLambdaName": { + "Type": "String" + }, + "InitiateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaMemorySize": { + "Type": "String", + "Default": "256" + }, + "LambdaLogsForwarderArn": { + "Type": "String" + }, + "EventRuleDescription": { + "Type": "String", + "Default": "Triggers initiate lambda" + }, + "EventRuleName": { + "Type": "String" + }, + "SNSDisplayName": { + "Type": "String" + }, + "SNSTopicName": { + "Type": "String" + }, + "SNSIdentificationErrors": { + "Type": "String" + } + }, + "Conditions": { + "LambdaSubnetsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSubnets"}, "" ] + }, + "LambdaSecurityGroupsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSecurityGroups"}, "" ] + } + }, + "Resources": { + "LambdaInitiateEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LogGroupLambdaInitiateEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "Environment": { + "Variables": { + "SNS_ARN": { "Ref": "SNSNotifyLambdaEvaluate" } + } + }, + "Description": { "Ref": "InitiateLambdaDescription" }, + "FunctionName": { "Ref": "InitiateLambdaName" }, + "Handler": {"Ref": "InitiateLambdaHandler"}, + "MemorySize": 128, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "InitiateLambdaName" } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaInitiateEvaluation"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEvaluation" } + } + }, + "LambdaEvaluate": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "VpcConfig": { + "SecurityGroupIds": { + "Fn::If": [ + "LambdaSecurityGroupsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } + ] + }, + "SubnetIds": { + "Fn::If": [ + "LambdaSubnetsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } + ] + } + }, + "Description": {"Ref": "EvaluateLambdaDescription"}, + "FunctionName": { "Ref": "EvaluateLambdaName" }, + "Handler": {"Ref": "EvaluateLambdaHandler"}, + "MemorySize": {"Ref": "EvaluateLambdaMemorySize"}, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluate": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "EvaluateLambdaName"} ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaLambdaEvaluate": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluate" } + } + }, + "EventInitiateEvaluation": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "Description": {"Ref": "EventRuleDescription"}, + "Name": {"Ref": "EventRuleName"}, + "ScheduleExpression": { "Ref": "IdentificationCheckRateExpression" }, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateEvaluation", "Arn"] }, + "Id": {"Ref": "LambdaInitiateEvaluation"} + } + ] + } + }, + "PermissionToInvokeLambdaInitiateEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateEvaluation", "EventInitiateEvaluation"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluation", "Arn"] } + } + }, + "SNSNotifyLambdaEvaluate": { + "Type": "AWS::SNS::Topic", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "DisplayName": { "Ref": "SNSDisplayName" }, + "TopicName": { "Ref": "SNSTopicName" }, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluate", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, + "PermissionToInvokeLambdaEvaluateSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LambdaEvaluate"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluate" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluate", "Arn"] } + } + }, + "AlarmErrorsLambdaInitiateEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluate" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluate" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + } + } +} diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index a9fcf63f..a74fdc5f 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -110,6 +110,10 @@ "Type": "String", "Default": "" }, + "NestedStackTemplate": { + "Type": "String", + "Default": "" + }, "IdentificationIAMRole": { "Type": "String", "Default": "cloudsec-master-id" @@ -512,3008 +516,654 @@ "LogGroupName" : { "Ref": "LogGroupLambdaBackupDDB" } } }, - "LambdaInitiateSGEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LogGroupLambdaInitiateSGEvaluation"], + "EventBackupDDB": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "Environment": { - "Variables": { - "SNS_SG_ARN": { "Ref": "SNSNotifyLambdaEvaluateSG" } - } - }, - "Description": "Lambda function for initiate to identify bad security groups", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_sec_grps.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "Description": "Hammer ScheduledRule for DDB tables backup", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, + "ScheduleExpression": "rate(1 day)", + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, + "Id": "LambdaBackupDDB" + } + ] } }, - "LogGroupLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaLogsForwarder"], + "Properties": { + "FunctionName": { "Ref": "LambdaLogsForwarder" }, + "Action": "lambda:InvokeFunction", + "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, + "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} } }, - "SubscriptionFilterLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSGEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSGEvaluation" } + "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], + "Properties": { + "FunctionName": { "Ref": "LambdaBackupDDB" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } } }, - "LambdaEvaluateSG": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSG"], + "SNSIdentificationErrors": { + "Type": "AWS::SNS::Topic", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe security groups unrestricted access.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_sec_grps_unrestricted_access.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] + ]} } }, - "LogGroupLambdaEvaluateSG": { - "Type" : "AWS::Logs::LogGroup", + "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { + "Type" : "AWS::SNS::Subscription", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "Protocol" : "lambda", + "TopicArn" : { "Ref": "SNSIdentificationErrors" } } }, - "SubscriptionFilterLambdaLambdaEvaluateSG": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSG"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSG" } + "PermissionToInvokeLambdaLogsForwarderSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSIdentificationErrors" }, + "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } } }, - "LambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LogGroupLambdaInitiateCloudTrailsEvaluation"], + "AlarmErrorsLambdaBackupDDB": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "Environment": { - "Variables": { - "SNS_CLOUDTRAILS_ARN": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" } + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaBackupDDB" } } - }, - "Description": "Lambda function for initiate identification of CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_cloudtrails.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + ], + "Period": 86400, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "StackEvaluateSG": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateCloudTrailsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateCloudTrailsEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": {"Ref": "SourceIdentificationSG"}, + "InitiateLambdaDescription": "Lambda function for initiate to identify bad security groups", + "EvaluateLambdaDescription": "Lambda function to describe security groups unrestricted access.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sec_grps.lambda_handler", + "EvaluateLambdaHandler": "describe_sec_grps_unrestricted_access.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Security Groups evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateCloudTrails": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateCloudTrails"], + "StackEvaluateCloudTrails": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_cloudtrails.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateCloudTrails"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateCloudTrails" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationCloudTrails" }, + "InitiateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "EvaluateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_cloudtrails.lambda_handler", + "EvaluateLambdaHandler": "describe_cloudtrails.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate CloudTrails evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateS3ACLEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3ACL", "LogGroupLambdaInitiateS3ACLEvaluation"], + "StackEvaluateS3ACL": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "Environment": { - "Variables": { - "SNS_S3_ACL_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3ACLEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3ACLEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3ACL" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_acl.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_acl.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 ACL evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3ACL"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateS3ACL": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3ACL"], + "StackEvaluateS3Policy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Policy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_policy.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 Policy evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Policy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LogGroupLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3ACL"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3ACL" } - } - }, - "LambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LogGroupLambdaInitiateS3PolicyEvaluation"], + "StackEvaluateIAMUserKeysRotation": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "Environment": { - "Variables": { - "SNS_S3_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3PolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3PolicyEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserKeysRotation" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which to be rotate.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys to be rotated.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_key_rotation.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserKeysRotation evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserKeysRotation"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LambdaEvaluateS3Policy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Policy"], + "StackEvaluateIAMUserInactiveKeys": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Policy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Policy" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserInactiveKeys" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which last used.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys last used.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_access_keys.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_accesskey_details.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserInactiveKeys evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserInactiveKeys"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], + "StackEvaluateEBSVolumes": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_KEYS_ROTATION_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which to be rotate.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSVolumes" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted EBS volumes.", + "EvaluateLambdaDescription": "Lambda function to describe unencrypted ebs volumes.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS volumes evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserKeysRotation"], + "StackEvaluateEBSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys to be rotated.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserKeysRotation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserKeysRotation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public EBS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public ebs snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], + "StackEvaluateRDSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_INACTIVE_KEYS_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_access_keys.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public RDS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public RDS snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_rds_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate RDS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserInactiveKeys"], + "StackEvaluateSQSPublicPolicy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_accesskey_details.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationSQSPublicPolicy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public SQS queues.", + "EvaluateLambdaDescription": "Lambda function to describe public SQS queues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sqs_public_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_sqs_public_policy.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate SQS queue evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "SubscriptionFilterLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserInactiveKeys"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserInactiveKeys" } + "StackEvaluateS3Encryption": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Encryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify S3 unencrypted buckets.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted S3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Encryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LogGroupLambdaInitiateEBSVolumesEvaluation"], + "StackEvaluateRDSEncryption": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "Environment": { - "Variables": { - "SNS_EBS_VOLUMES_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted EBS volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSEncryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted RDS instances.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted RDS instances.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_rds_instance_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_instance_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate rds instance encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LogGroupLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSVolumesEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSVolumesEvaluation" } - } - }, - - "LambdaEvaluateEBSVolumes": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSVolumes"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe unencrypted ebs volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSVolumes"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSVolumes" } - } - }, - "LambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], + "StackEvaluateAmiPublicAccess": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_EBS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public EBS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSSnapshotsEvaluation" } - } - }, - "LambdaEvaluateEBSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSSnapshots"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public ebs snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "45 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationAMIPublicAccess" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public AMI access issues.", + "EvaluateLambdaDescription": "Lambda function to describe public AMI issues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_ebs_public_snapshots.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSSnapshots" } - } - }, - "LambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_RDS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public RDS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_rds_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSSnapshotsEvaluation" } + "InitiateLambdaHandler": "initiate_to_desc_public_ami_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_public_ami_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate public AMI access evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LambdaEvaluateRDSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSSnapshots"], + "StackEvaluateECSPrivilegedAccess": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public rds snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_rds_public_snapshots.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSSnapshots" } - } - }, - "LambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "Environment": { - "Variables": { - "SNS_SQS_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" } - } - }, - "Description": "Lambda function for initiate to identify public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationECSPrivilegedAccess" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify ECS privileged access issues.", + "EvaluateLambdaDescription": "Lambda function to describe ECS privileged access issues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateECSPrivilegedAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_sqs_public_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSQSPublicPolicyEvaluation" } - } - }, - "LambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSQSPublicPolicy"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyECSPrivilegedAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_sqs_public_policy.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSQSPublicPolicy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSQSPublicPolicy" } - } - }, - "LambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Environment": { - "Variables": { - "SNS_S3_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" } - } - }, - "Description": "Lambda function for initiate to identify S3 unencrypted buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3EncryptionEvaluation" } - } - }, - "LambdaEvaluateS3Encryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Encryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Description": "Lambda function to describe un-encrypted S3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Encryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Encryption" } - } - }, - "LambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Environment": { - "Variables": { - "SNS_RDS_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_rds_instance_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSEncryptionEvaluation" } - } - }, - "LambdaEvaluateRDSEncryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSEncryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Description": "Lambda function to describe un-encrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_rds_instance_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSEncryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSEncryption" } - } - }, - "LambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Environment": { - "Variables": { - "SNS_PUBLIC_AMI_ARN": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" } - } - }, - "Description": "Lambda function for initiate to identify public AMI access issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_public_ami_issues.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } - } - }, - "LambdaEvaluateAMIPublicAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Description": "Lambda function to describe public AMI issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_public_ami_issues.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } - } - }, - "LambdaInitiateECSPrivilegedAccessEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateECSPrivilegedAccess", "LogGroupLambdaInitiateECSPrivilegedAccessEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationECSPrivilegedAccess" } - }, - "Environment": { - "Variables": { - "SNS_ECS_PRIVILEGED_ACCESS_ARN": { "Ref": "SNSNotifyLambdaEvaluateECSPrivilegedAccess" } - } - }, - "Description": "Lambda function for initiate to identify privileged access enabled of ECS task definition.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateECSPrivilegedAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ecs_privileged_access_issues.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateECSPrivilegedAccessEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateECSPrivilegedAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateECSPrivilegedAccessEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateECSPrivilegedAccessEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateECSPrivilegedAccessEvaluation" } - } - }, - "LambdaEvaluateECSPrivilegedAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateECSPrivilegedAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationECSPrivilegedAccess" } - }, - "Description": "Lambda function to describe priviled access enabled ECS task difinitions.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyECSPrivilegedAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_ecs_privileged_access_issues.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateECSPrivilegedAccess": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyECSPrivilegedAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateECSPrivilegedAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateECSPrivilegedAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateECSPrivilegedAccess" } - } - }, - "EventBackupDDB": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaBackupDDB"], - "Properties": { - "Description": "Hammer ScheduledRule for DDB tables backup", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, - "ScheduleExpression": "rate(1 day)", - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, - "Id": "LambdaBackupDDB" - } - ] - } - }, - "EventInitiateEvaluationS3IAM": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", - "LambdaInitiateIAMUserInactiveKeysEvaluation", - "LambdaInitiateS3EncryptionEvaluation", - "LambdaInitiateS3ACLEvaluation", - "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate S3 and IAM evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3IAM"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserKeysRotationEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserKeysRotationEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserInactiveKeysEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3EncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3EncryptionEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3ACLEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3ACLEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3PolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3PolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationCloudTrails": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate CloudTrails evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateCloudTrailsEvaluation", "Arn"] }, - "Id": "LambdaInitiateCloudTrailsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSVolumes": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS volumes evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSVolumesEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSVolumesEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate RDS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSG": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSGEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate Security Groups evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSGEvaluation", "Arn"] }, - "Id": "LambdaInitiateSGEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSQSPublicPolicy": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate SQS queue evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSQSPublicPolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateSQSPublicPolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSEncryption": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate rds instance encryption evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSEncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSEncryptionEvaluation" - } - ] - } - }, - "EventInitiateEvaluationAMIPublicAccess": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate public AMI access evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateAMIPublicAccessEvaluation", "Arn"] }, - "Id": "LambdaInitiateAMIPublicAccessEvaluation" - } - ] - } - }, - "EventInitiateEvaluationECSPrivilegedAccess": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateECSPrivilegedAccessEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate privileged access issue ECS task definition evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSPrivilegedAccess"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateECSPrivilegedAccessEvaluation", "Arn"] }, - "Id": "LambdaInitiateECSPrivilegedAccessEvaluation" - } - ] - } - }, - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaLogsForwarder"], - "Properties": { - "FunctionName": { "Ref": "LambdaLogsForwarder" }, - "Action": "lambda:InvokeFunction", - "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, - "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} - } - }, - "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], - "Properties": { - "FunctionName": { "Ref": "LambdaBackupDDB" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSGEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSGEvaluation", "EventInitiateEvaluationSG"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSGEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateCloudTrailsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation", "EventInitiateEvaluationCloudTrails"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3ACLEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3ACLEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3ACLEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3PolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3PolicyEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3PolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserKeysRotationEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { - "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] - } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserInactiveKeysEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSVolumesEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation", "EventInitiateEvaluationEBSVolumes"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation", "EventInitiateEvaluationEBSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation", "EventInitiateEvaluationRDSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSQSPublicPolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation", "EventInitiateEvaluationSQSPublicPolicy"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3EncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3EncryptionEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSEncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation", "EventInitiateEvaluationRDSEncryption"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSEncryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateAMIPublicAccessEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation", "EventInitiateEvaluationAMIPublicAccess"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateECSPrivilegedAccessEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateECSPrivilegedAccessEvaluation", "EventInitiateEvaluationECSPrivilegedAccess"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateECSPrivilegedAccessEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationECSPrivilegedAccess", "Arn"] } - } - }, - "SNSNotifyLambdaEvaluateSG": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateSG"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateCloudTrails": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateCloudTrails"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3ACL": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3ACL"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Policy": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3Policy"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSVolumes": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSVolumes"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSSnapshots"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSSnapshots", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateSQSPublicPolicy", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Encryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateS3Encryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSEncryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSEncryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateAMIPublicAccess": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateAMIPublicAccess", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] - - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateECSPrivilegedAccess": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateECSPrivilegedAccess", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSPrivilegedAccess", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSPrivilegedAccess", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateECSPrivilegedAccess", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "PermissionToInvokeLambdaEvaluateSgSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSG" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateCloudTrailsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LambdaEvaluateCloudTrails"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3AclSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": "SNSNotifyLambdaEvaluateS3ACL", - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3PolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LambdaEvaluateS3Policy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserKeysRotationSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserInactiveKeysSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSVolumesSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LambdaEvaluateEBSVolumes"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateSQSPublicPolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3EncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LambdaEvaluateS3Encryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSEncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LambdaEvaluateRDSEncryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateAMIPublicAccessSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } - - } - }, - "PermissionToInvokeLambdaEvaluateECSPrivilegedAccessSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateECSPrivilegedAccess", "LambdaEvaluateECSPrivilegedAccess"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateECSPrivilegedAccess" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateECSPrivilegedAccess", "Arn"] } - } - }, - "SNSIdentificationErrors": { - "Type": "AWS::SNS::Topic", - "Properties": { - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] - ]} - } - }, - "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { - "Type" : "AWS::SNS::Subscription", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties" : { - "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "Protocol" : "lambda", - "TopicArn" : { "Ref": "SNSIdentificationErrors" } - } - }, - "PermissionToInvokeLambdaLogsForwarderSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSIdentificationErrors" }, - "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } - } - }, - "AlarmErrorsLambdaBackupDDB": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaBackupDDB" } - } - ], - "Period": 86400, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSGEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSGEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSGEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSG"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSG" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSG" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateCloudTrailsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEvaluateCloudTrails": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateCloudTrails"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateCloudTrails" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateCloudTrails" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3ACLEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3ACLEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3ACLEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3ACL"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3ACL" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3ACL" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3PolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3PolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Policy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Policy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Policy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserKeysRotation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserKeysRotation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserInactiveKeys" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserInactiveKeys" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSVolumesEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSVolumes"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSVolumes" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSVolumes" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3EncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSQSPublicPolicy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSQSPublicPolicy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Encryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Encryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Encryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSEncryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSEncryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSEncryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateAMIPublicAccess" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateAMIPublicAccess" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateECSPrivilegedAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateECSPrivilegedAccessEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateECSPrivilegedAccessEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateECSPrivilegedAccessEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaECSPrivilegedAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateECSPrivilegedAccess"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateECSPrivilegedAccess" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateECSPrivilegedAccess" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" + "InitiateLambdaHandler": "initiate_to_desc_ecs_privileged_access_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_ecs_privileged_access_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate ECS privileged access evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSPrivilegedAccess"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSPrivilegedAccess", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSPrivilegedAccess", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } } }, "Outputs": { "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} } -} +} \ No newline at end of file diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 8d977168..36f95541 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -1,7 +1,8 @@ resource "aws_cloudformation_stack" "identification" { - name = "hammer-identification" + name = "hammer-identification-main" depends_on = [ "aws_s3_bucket_object.identification-cfn", + "aws_s3_bucket_object.identification-nested-cfn", "aws_s3_bucket_object.logs-forwarder", "aws_s3_bucket_object.ddb-tables-backup", "aws_s3_bucket_object.sg-issues-identification", @@ -23,6 +24,7 @@ resource "aws_cloudformation_stack" "identification" { parameters { SourceS3Bucket = "${var.s3bucket}" + NestedStackTemplate = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-nested-cfn.id}" ResourcesPrefix = "${var.resources-prefix}" IdentificationIAMRole = "${var.identificationIAMRole}" IdentificationCheckRateExpression = "${var.identificationCheckRateExpression}" @@ -47,4 +49,4 @@ resource "aws_cloudformation_stack" "identification" { } template_url = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-cfn.id}" -} +} \ No newline at end of file diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index e07b2acb..9f2b1c66 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -4,6 +4,12 @@ resource "aws_s3_bucket_object" "identification-cfn" { source = "${path.module}/../../../cf-templates/identification.json" } +resource "aws_s3_bucket_object" "identification-nested-cfn" { + bucket = "${var.s3bucket}" + key = "cfn/${format("identification-nested-%s.json", "${md5(file("${path.module}/../../../cf-templates/identification-nested.json"))}")}" + source = "${path.module}/../../../cf-templates/identification-nested.json" +} + resource "aws_s3_bucket_object" "logs-forwarder" { bucket = "${var.s3bucket}" key = "lambda/${format("logs-forwarder-%s.zip", "${md5(file("${path.module}/../../../packages/logs-forwarder.zip"))}")}" @@ -79,7 +85,6 @@ resource "aws_s3_bucket_object" "sqs-public-policy-identification" { key = "lambda/${format("sqs-public-policy-identification-%s.zip", "${md5(file("${path.module}/../../../packages/sqs-public-policy-identification.zip"))}")}" source = "${path.module}/../../../packages/sqs-public-policy-identification.zip" } - resource "aws_s3_bucket_object" "s3-unencrypted-bucket-issues-identification" { bucket = "${var.s3bucket}" key = "lambda/${format("s3-unencrypted-bucket-issues-identification-%s.zip", "${md5(file("${path.module}/../../../packages/s3-unencrypted-bucket-issues-identification.zip"))}")}" diff --git a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py index ca6f693a..edaf5e1a 100644 --- a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py +++ b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py @@ -4,7 +4,7 @@ from library.logger import set_logging from library.config import Config from library.aws.ecs import ECSChecker -from library.aws.utility import Account +from library.aws.utility import Account, DDB from library.ddb_issues import IssueStatus, ECSPrivilegedAccessIssue from library.ddb_issues import Operations as IssueOperations from library.aws.utility import Sns @@ -20,7 +20,8 @@ def lambda_handler(event, context): account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() - # region = payload['region'] + # if request_id is present in payload then this lambda was called from the API + request_id = payload.get('request_id', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -55,7 +56,7 @@ def lambda_handler(event, context): issue = ECSPrivilegedAccessIssue(account_id, task_definition.name) issue.issue_details.arn = task_definition.arn issue.issue_details.tags = task_definition.tags - issue.issue_details.container_name = task_definition.container_name + issue.issue_details.privileged_container_names = task_definition.privileged_container_names issue.issue_details.region = task_definition.account.region if config.ecs_privileged_access.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted @@ -67,10 +68,14 @@ def lambda_handler(event, context): # as we already checked it open_issues.pop(task_definition.name, None) - logging.debug(f"ECS privileged access issues in DDB:\n{open_issues.keys()}") - # all other unresolved issues in DDB are for removed/remediated task definitions - for issue in open_issues.values(): - IssueOperations.set_status_resolved(ddb_table, issue) + logging.debug(f"ECS privileged access issues in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated task definitions + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + # track the progress of API request to scan specific account/region/feature + if request_id: + api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) + DDB.track_progress(api_table, request_id) except Exception: logging.exception(f"Failed to check ECS privileged access issues for '{account_id} ({account_name})'") return diff --git a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/initiate_to_desc_ecs_privileged_access_issues.py b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/initiate_to_desc_ecs_privileged_access_issues.py index 983334d3..3555df60 100644 --- a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/initiate_to_desc_ecs_privileged_access_issues.py +++ b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/initiate_to_desc_ecs_privileged_access_issues.py @@ -12,7 +12,7 @@ def lambda_handler(event, context): logging.debug("Initiating ECS privileged access checking") try: - sns_arn = os.environ["SNS_ECS_PRIVILEGED_ACCESS_ARN"] + sns_arn = os.environ["SNS_ARN"] config = Config() if not config.ecs_privileged_access.enabled: diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index e693ae48..f837c1ab 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -1,4 +1,3 @@ -import json import logging from botocore.exceptions import ClientError @@ -64,8 +63,8 @@ class ECSTaskDefinitions(object): """ - def __init__(self, account, name, arn, tags, container_name=None, is_logging=None, is_privileged=None, - external_image=None): + def __init__(self, account, name, arn, tags, is_logging=None, disabled_logging_container_names=None, + is_privileged=None, privileged_container_names=None, external_image=None, container_image_details=None): """ :param account: `Account` instance where ECS task definition is present @@ -79,9 +78,11 @@ def __init__(self, account, name, arn, tags, container_name=None, is_logging=Non self.arn = arn self.tags = convert_tags(tags) self.is_logging = is_logging + self.disabled_logging_container_names = disabled_logging_container_names self.is_privileged = is_privileged + self.privileged_container_names = privileged_container_names self.external_image = external_image - self.container_name = container_name + self.container_image_details = container_image_details class ECSChecker(object): @@ -120,10 +121,9 @@ def check(self): if "families" in response: for task_definition_name in response["families"]: tags = {} - logging_enabled = False - external_image = False - is_privileged = False - container_name = None + container_image_details = [] + disabled_logging_container_names = [] + privileged_container_names = [] try: task_definition = self.account.client("ecs").describe_task_definition( taskDefinition=task_definition_name @@ -133,23 +133,34 @@ def check(self): for container_definition in task_definition['containerDefinitions']: container_name = container_definition["name"] if container_definition.get('logConfiguration') is None: - logging_enabled = False - else: - logging_enabled = True + disabled_logging_container_names.append(container_name) - container_privileged_details = container_definition.get('privileged') - if container_privileged_details is not None: - if container_definition['privileged']: - is_privileged = True - else: - is_privileged = False + if container_definition.get('privileged') is not None \ + and container_definition['privileged']: + privileged_container_names.append(container_name) image = container_definition.get('image') + image_details = {} if image is not None: if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: - external_image = True - else: - external_image = False + image_details["container_name"] = container_name + image_details["image_url"] = image + container_image_details.append(image_details) + + if len(disabled_logging_container_names) > 0: + logging_enabled = False + else: + logging_enabled = True + + if len(privileged_container_names) > 0: + is_privileged = True + else: + is_privileged = False + + if len(container_image_details) > 0: + external_image = True + else: + external_image = False if "Tags" in task_definition: tags = task_definition["Tags"] @@ -157,10 +168,12 @@ def check(self): name=task_definition_name, arn=task_definition_arn, tags=tags, - container_name=container_name, is_logging=logging_enabled, + disabled_logging_container_names=disabled_logging_container_names, is_privileged=is_privileged, - external_image=external_image + privileged_container_names=privileged_container_names, + external_image=external_image, + container_image_details=container_image_details, ) self.task_definitions.append(task_definition_details) except ClientError as err: diff --git a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py index 40773735..13f83504 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py @@ -34,7 +34,7 @@ def create_tickets_ecs_privileged(self): issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ECSPrivilegedAccessIssue) for issue in issues: task_definition_name = issue.issue_id - container_name = issue.issue_details.container_name + privileged_container_names = issue.issue_details.privileged_container_names region = issue.issue_details.region tags = issue.issue_details.tags # issue has been already reported @@ -44,9 +44,11 @@ def create_tickets_ecs_privileged(self): product = issue.jira_details.product if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: - logging.debug(f"Closing {issue.status.value} ECS privileged access disabled '{task_definition_name}' issue") + logging.debug(f"Closing {issue.status.value} ECS privileged access disabled " + f"'{task_definition_name}' issue") - comment = (f"Closing {issue.status.value} ECS privileged access disabled '{task_definition_name}' issue " + comment = (f"Closing {issue.status.value} ECS privileged access disabled " + f"'{task_definition_name}' issue " f"in '{account_name} / {account_id}' account, '{region}' region") if issue.status == IssueStatus.Whitelisted: # Adding label with "whitelisted" to jira ticket. @@ -98,19 +100,17 @@ def create_tickets_ecs_privileged(self): f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" f"*ECS Task Definition Name*: {task_definition_name}\n" - f"*ECS Task definition's Container Name*: {container_name}\n" + f"*ECS Task definition's privileged container names*: {privileged_container_names}\n" f"*Container has privileged access*: True \n" ) - auto_remediation_date = (self.config.now + self.config.ecs_privileged_access.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" - issue_description += JiraOperations.build_tags_table(tags) issue_description += "\n" issue_description += ( f"*Recommendation*: " - f"By default, containers are unprivileged and cannot. To disable ECS privileged access, follow below steps:" + f"By default, containers are unprivileged and cannot. To disable ECS privileged access, " + f"follow below steps:" f"1. Open the Amazon ECS console at https://console.aws.amazon.com/ecs/. \n" f"2. From the navigation bar, " f"choose region that contains your task definition and choose Task Definitions.\n" From 3007cdad7ca0ea6fda1d86f7c885656461ff73de Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 25 Jun 2019 17:24:36 +0530 Subject: [PATCH 058/193] updated with ECS privileged access issue review comment changes. updated with ECS privileged access issue review comment changes. --- hammer/library/aws/ecs.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index f837c1ab..1361e3ce 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -66,13 +66,19 @@ class ECSTaskDefinitions(object): def __init__(self, account, name, arn, tags, is_logging=None, disabled_logging_container_names=None, is_privileged=None, privileged_container_names=None, external_image=None, container_image_details=None): """ + :param account: `Account` instance where ECS task definition is present - :param name: name of the task definition :param arn: arn of the task definition - :param arn: tags of task definition. - :param is_logging: logging enabled or not. + :param tags: tags of task definition. + :param is_logging: boolean. Task definition's container logging is enabled or not + :param disabled_logging_container_names: List of containers which logging disabled. + :param is_privileged: boolean + :param privileged_container_names: List of containers which privileged access enabled + :param external_image: boolean + :param container_image_details: List of containers which image source is taken from external """ + self.account = account self.name = name self.arn = arn From e47f8e9073499cadc6c42c74517cad7bdfeae231 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 25 Jun 2019 17:42:43 +0530 Subject: [PATCH 059/193] Updated with ECS logging issue review comment changes. Updated with ECS logging issue review comment changes. --- deployment/cf-templates/ddb.json | 5 +- .../cf-templates/identification-nested.json | 267 ++ deployment/cf-templates/identification.json | 3399 +++-------------- .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 6 + .../describe_ecs_logging_issues.py | 19 +- .../initiate_to_desc_ecs_logging_issues.py | 2 +- hammer/library/aws/ecs.py | 79 +- .../create_ecs_logging_issue_tickets.py | 9 +- 9 files changed, 869 insertions(+), 2921 deletions(-) create mode 100644 deployment/cf-templates/identification-nested.json diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 1c6cf8b1..5881dc64 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -24,7 +24,7 @@ } ], "ProvisionedThroughput": { - "ReadCapacityUnits": "10", + "ReadCapacityUnits": "25", "WriteCapacityUnits": "2" }, "SSESpecification": { @@ -330,7 +330,6 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-public-snapshots" ] ]} } }, - "DynamoDBSQSPublicPolicy": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", @@ -363,7 +362,6 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "sqs-public-access" ] ]} } }, - "DynamoDBS3Unencrypted": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", @@ -396,7 +394,6 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "s3-unencrypted" ] ]} } }, - "DynamoDBRDSUnencrypted": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", diff --git a/deployment/cf-templates/identification-nested.json b/deployment/cf-templates/identification-nested.json new file mode 100644 index 00000000..53d2fd81 --- /dev/null +++ b/deployment/cf-templates/identification-nested.json @@ -0,0 +1,267 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Hammer identification child stack", + "Parameters": { + "SourceS3Bucket": { + "Type": "String", + "Default": "" + }, + "IdentificationIAMRole": { + "Type": "String", + "Default": "cloudsec-master-id" + }, + "IdentificationCheckRateExpression": { + "Type": "String" + }, + "LambdaSubnets": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas in default system-managed VPC (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "LambdaSecurityGroups": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas with default access rules (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "IdentificationLambdaSource": { + "Type": "String", + "Default": "sg-issues-identification.zip" + }, + "InitiateLambdaDescription": { + "Type": "String", + "Default": "Lambda that triggers the process of issues identification" + }, + "EvaluateLambdaDescription": { + "Type": "String", + "Default": "Lambda that performs issues identification" + }, + "InitiateLambdaName": { + "Type": "String" + }, + "EvaluateLambdaName": { + "Type": "String" + }, + "InitiateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaMemorySize": { + "Type": "String", + "Default": "256" + }, + "LambdaLogsForwarderArn": { + "Type": "String" + }, + "EventRuleDescription": { + "Type": "String", + "Default": "Triggers initiate lambda" + }, + "EventRuleName": { + "Type": "String" + }, + "SNSDisplayName": { + "Type": "String" + }, + "SNSTopicName": { + "Type": "String" + }, + "SNSIdentificationErrors": { + "Type": "String" + } + }, + "Conditions": { + "LambdaSubnetsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSubnets"}, "" ] + }, + "LambdaSecurityGroupsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSecurityGroups"}, "" ] + } + }, + "Resources": { + "LambdaInitiateEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LogGroupLambdaInitiateEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "Environment": { + "Variables": { + "SNS_ARN": { "Ref": "SNSNotifyLambdaEvaluate" } + } + }, + "Description": { "Ref": "InitiateLambdaDescription" }, + "FunctionName": { "Ref": "InitiateLambdaName" }, + "Handler": {"Ref": "InitiateLambdaHandler"}, + "MemorySize": 128, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "InitiateLambdaName" } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaInitiateEvaluation"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEvaluation" } + } + }, + "LambdaEvaluate": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "VpcConfig": { + "SecurityGroupIds": { + "Fn::If": [ + "LambdaSecurityGroupsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } + ] + }, + "SubnetIds": { + "Fn::If": [ + "LambdaSubnetsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } + ] + } + }, + "Description": {"Ref": "EvaluateLambdaDescription"}, + "FunctionName": { "Ref": "EvaluateLambdaName" }, + "Handler": {"Ref": "EvaluateLambdaHandler"}, + "MemorySize": {"Ref": "EvaluateLambdaMemorySize"}, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluate": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "EvaluateLambdaName"} ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaLambdaEvaluate": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluate" } + } + }, + "EventInitiateEvaluation": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "Description": {"Ref": "EventRuleDescription"}, + "Name": {"Ref": "EventRuleName"}, + "ScheduleExpression": { "Ref": "IdentificationCheckRateExpression" }, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateEvaluation", "Arn"] }, + "Id": {"Ref": "LambdaInitiateEvaluation"} + } + ] + } + }, + "PermissionToInvokeLambdaInitiateEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateEvaluation", "EventInitiateEvaluation"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluation", "Arn"] } + } + }, + "SNSNotifyLambdaEvaluate": { + "Type": "AWS::SNS::Topic", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "DisplayName": { "Ref": "SNSDisplayName" }, + "TopicName": { "Ref": "SNSTopicName" }, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluate", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, + "PermissionToInvokeLambdaEvaluateSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LambdaEvaluate"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluate" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluate", "Arn"] } + } + }, + "AlarmErrorsLambdaInitiateEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluate" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluate" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + } + } +} diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 39cf2bf4..45f7c304 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -110,6 +110,10 @@ "Type": "String", "Default": "" }, + "NestedStackTemplate": { + "Type": "String", + "Default": "" + }, "IdentificationIAMRole": { "Type": "String", "Default": "cloudsec-master-id" @@ -512,3007 +516,654 @@ "LogGroupName" : { "Ref": "LogGroupLambdaBackupDDB" } } }, - "LambdaInitiateSGEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LogGroupLambdaInitiateSGEvaluation"], + "EventBackupDDB": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "Environment": { - "Variables": { - "SNS_SG_ARN": { "Ref": "SNSNotifyLambdaEvaluateSG" } - } - }, - "Description": "Lambda function for initiate to identify bad security groups", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_sec_grps.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "Description": "Hammer ScheduledRule for DDB tables backup", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, + "ScheduleExpression": "rate(1 day)", + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, + "Id": "LambdaBackupDDB" + } + ] } }, - "LogGroupLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaLogsForwarder"], + "Properties": { + "FunctionName": { "Ref": "LambdaLogsForwarder" }, + "Action": "lambda:InvokeFunction", + "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, + "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} } }, - "SubscriptionFilterLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSGEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSGEvaluation" } + "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], + "Properties": { + "FunctionName": { "Ref": "LambdaBackupDDB" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } } }, - "LambdaEvaluateSG": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSG"], + "SNSIdentificationErrors": { + "Type": "AWS::SNS::Topic", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe security groups unrestricted access.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_sec_grps_unrestricted_access.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] + ]} } }, - "LogGroupLambdaEvaluateSG": { - "Type" : "AWS::Logs::LogGroup", + "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { + "Type" : "AWS::SNS::Subscription", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "Protocol" : "lambda", + "TopicArn" : { "Ref": "SNSIdentificationErrors" } } }, - "SubscriptionFilterLambdaLambdaEvaluateSG": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSG"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSG" } + "PermissionToInvokeLambdaLogsForwarderSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSIdentificationErrors" }, + "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } } }, - "LambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LogGroupLambdaInitiateCloudTrailsEvaluation"], + "AlarmErrorsLambdaBackupDDB": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "Environment": { - "Variables": { - "SNS_CLOUDTRAILS_ARN": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" } + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaBackupDDB" } } - }, - "Description": "Lambda function for initiate identification of CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_cloudtrails.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + ], + "Period": 86400, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "StackEvaluateSG": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateCloudTrailsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateCloudTrailsEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": {"Ref": "SourceIdentificationSG"}, + "InitiateLambdaDescription": "Lambda function for initiate to identify bad security groups", + "EvaluateLambdaDescription": "Lambda function to describe security groups unrestricted access.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sec_grps.lambda_handler", + "EvaluateLambdaHandler": "describe_sec_grps_unrestricted_access.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Security Groups evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateCloudTrails": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateCloudTrails"], + "StackEvaluateCloudTrails": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_cloudtrails.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateCloudTrails"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateCloudTrails" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationCloudTrails" }, + "InitiateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "EvaluateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_cloudtrails.lambda_handler", + "EvaluateLambdaHandler": "describe_cloudtrails.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate CloudTrails evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateS3ACLEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3ACL", "LogGroupLambdaInitiateS3ACLEvaluation"], + "StackEvaluateS3ACL": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "Environment": { - "Variables": { - "SNS_S3_ACL_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3ACLEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3ACLEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3ACL" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_acl.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_acl.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 ACL evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3ACL"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateS3ACL": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3ACL"], + "StackEvaluateS3Policy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Policy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_policy.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 Policy evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Policy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LogGroupLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3ACL"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3ACL" } - } - }, - "LambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LogGroupLambdaInitiateS3PolicyEvaluation"], + "StackEvaluateIAMUserKeysRotation": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "Environment": { - "Variables": { - "SNS_S3_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3PolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3PolicyEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserKeysRotation" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which to be rotate.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys to be rotated.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_key_rotation.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserKeysRotation evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserKeysRotation"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LambdaEvaluateS3Policy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Policy"], + "StackEvaluateIAMUserInactiveKeys": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Policy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Policy" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserInactiveKeys" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which last used.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys last used.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_access_keys.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_accesskey_details.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserInactiveKeys evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserInactiveKeys"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], + "StackEvaluateEBSVolumes": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_KEYS_ROTATION_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which to be rotate.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSVolumes" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted EBS volumes.", + "EvaluateLambdaDescription": "Lambda function to describe unencrypted ebs volumes.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS volumes evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserKeysRotation"], + "StackEvaluateEBSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys to be rotated.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserKeysRotation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserKeysRotation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public EBS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public ebs snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], + "StackEvaluateRDSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_INACTIVE_KEYS_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_access_keys.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public RDS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public RDS snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_rds_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate RDS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserInactiveKeys"], + "StackEvaluateSQSPublicPolicy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_accesskey_details.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationSQSPublicPolicy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public SQS queues.", + "EvaluateLambdaDescription": "Lambda function to describe public SQS queues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sqs_public_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_sqs_public_policy.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate SQS queue evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "SubscriptionFilterLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserInactiveKeys"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserInactiveKeys" } + "StackEvaluateS3Encryption": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Encryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify S3 unencrypted buckets.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted S3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Encryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LogGroupLambdaInitiateEBSVolumesEvaluation"], + "StackEvaluateRDSEncryption": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "Environment": { - "Variables": { - "SNS_EBS_VOLUMES_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted EBS volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSEncryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted RDS instances.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted RDS instances.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_rds_instance_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_instance_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate rds instance encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LogGroupLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSVolumesEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSVolumesEvaluation" } - } - }, - - "LambdaEvaluateEBSVolumes": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSVolumes"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe unencrypted ebs volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSVolumes"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSVolumes" } - } - }, - "LambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], + "StackEvaluateAmiPublicAccess": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_EBS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public EBS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSSnapshotsEvaluation" } - } - }, - "LambdaEvaluateEBSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSSnapshots"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public ebs snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "45 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationAMIPublicAccess" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public AMI access issues.", + "EvaluateLambdaDescription": "Lambda function to describe public AMI issues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_ebs_public_snapshots.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSSnapshots" } - } - }, - "LambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_RDS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public RDS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_rds_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSSnapshotsEvaluation" } + "InitiateLambdaHandler": "initiate_to_desc_public_ami_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_public_ami_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate public AMI access evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LambdaEvaluateRDSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSSnapshots"], + "StackEvaluateECSLogging": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public rds snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_rds_public_snapshots.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSSnapshots" } - } - }, - "LambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "Environment": { - "Variables": { - "SNS_SQS_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" } - } - }, - "Description": "Lambda function for initiate to identify public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationECSLogging" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify ECS logging enabled or not.", + "EvaluateLambdaDescription": "Lambda function to describe ECS logging enabled or not.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateECSLoggingLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_sqs_public_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSQSPublicPolicyEvaluation" } - } - }, - "LambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSQSPublicPolicy"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyECSLoggingLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_sqs_public_policy.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSQSPublicPolicy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSQSPublicPolicy" } - } - }, - "LambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Environment": { - "Variables": { - "SNS_S3_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" } - } - }, - "Description": "Lambda function for initiate to identify S3 unencrypted buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3EncryptionEvaluation" } - } - }, - "LambdaEvaluateS3Encryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Encryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Description": "Lambda function to describe un-encrypted S3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Encryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Encryption" } - } - }, - "LambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Environment": { - "Variables": { - "SNS_RDS_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_rds_instance_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSEncryptionEvaluation" } - } - }, - "LambdaEvaluateRDSEncryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSEncryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Description": "Lambda function to describe un-encrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_rds_instance_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSEncryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSEncryption" } - } - }, - "LambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Environment": { - "Variables": { - "SNS_PUBLIC_AMI_ARN": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" } - } - }, - "Description": "Lambda function for initiate to identify public AMI access issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_public_ami_issues.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } - } - }, - "LambdaEvaluateAMIPublicAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Description": "Lambda function to describe public AMI issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_public_ami_issues.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } - } - }, - "LambdaInitiateECSLoggingEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateECSLogging", "LogGroupLambdaInitiateECSLoggingEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationECSLogging" } - }, - "Environment": { - "Variables": { - "SNS_ECS_LOGGING_ARN": { "Ref": "SNSNotifyLambdaEvaluateECSLogging" } - } - }, - "Description": "Lambda function for initiate to identify disabled loging of ECS task definition.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateECSLoggingLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ecs_logging_issues.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateECSLoggingEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateECSLoggingLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateECSLoggingEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateECSLoggingEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateECSLoggingEvaluation" } - } - }, - "LambdaEvaluateECSLogging": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateECSLogging"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationECSLogging" } - }, - "Description": "Lambda function to describe disabled ECS logging.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyECSLoggingLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_ecs_logging_issues.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateECSLogging": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyECSLoggingLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateECSLogging": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateECSLogging"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateECSLogging" } - } - }, - "EventBackupDDB": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaBackupDDB"], - "Properties": { - "Description": "Hammer ScheduledRule for DDB tables backup", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, - "ScheduleExpression": "rate(1 day)", - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, - "Id": "LambdaBackupDDB" - } - ] - } - }, - "EventInitiateEvaluationS3IAM": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", - "LambdaInitiateIAMUserInactiveKeysEvaluation", - "LambdaInitiateS3EncryptionEvaluation", - "LambdaInitiateS3ACLEvaluation", - "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate S3 and IAM evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3IAM"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserKeysRotationEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserKeysRotationEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserInactiveKeysEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3EncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3EncryptionEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3ACLEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3ACLEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3PolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3PolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationCloudTrails": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate CloudTrails evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateCloudTrailsEvaluation", "Arn"] }, - "Id": "LambdaInitiateCloudTrailsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSVolumes": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS volumes evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSVolumesEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSVolumesEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate RDS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSG": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSGEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate Security Groups evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSGEvaluation", "Arn"] }, - "Id": "LambdaInitiateSGEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSQSPublicPolicy": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate SQS queue evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSQSPublicPolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateSQSPublicPolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSEncryption": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate rds instance encryption evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSEncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSEncryptionEvaluation" - } - ] - } - }, - "EventInitiateEvaluationAMIPublicAccess": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate public AMI access evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateAMIPublicAccessEvaluation", "Arn"] }, - "Id": "LambdaInitiateAMIPublicAccessEvaluation" - } - ] - } - }, - "EventInitiateEvaluationECSLogging": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateECSLoggingEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate logging issue ECS task definition evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSLogging"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateECSLoggingEvaluation", "Arn"] }, - "Id": "LambdaInitiateECSLoggingEvaluation" - } - ] - } - }, - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaLogsForwarder"], - "Properties": { - "FunctionName": { "Ref": "LambdaLogsForwarder" }, - "Action": "lambda:InvokeFunction", - "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, - "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} - } - }, - "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], - "Properties": { - "FunctionName": { "Ref": "LambdaBackupDDB" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSGEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSGEvaluation", "EventInitiateEvaluationSG"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSGEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateCloudTrailsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation", "EventInitiateEvaluationCloudTrails"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3ACLEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3ACLEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3ACLEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3PolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3PolicyEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3PolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserKeysRotationEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { - "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] - } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserInactiveKeysEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSVolumesEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation", "EventInitiateEvaluationEBSVolumes"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation", "EventInitiateEvaluationEBSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation", "EventInitiateEvaluationRDSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSQSPublicPolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation", "EventInitiateEvaluationSQSPublicPolicy"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3EncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3EncryptionEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSEncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation", "EventInitiateEvaluationRDSEncryption"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSEncryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateAMIPublicAccessEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation", "EventInitiateEvaluationAMIPublicAccess"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateECSLoggingEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateECSLoggingEvaluation", "EventInitiateEvaluationECSLogging"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateECSLoggingEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationECSLogging", "Arn"] } - } - }, - "SNSNotifyLambdaEvaluateSG": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateSG"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateCloudTrails": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateCloudTrails"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3ACL": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3ACL"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Policy": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3Policy"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSVolumes": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSVolumes"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSSnapshots"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSSnapshots", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateSQSPublicPolicy", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Encryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateS3Encryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSEncryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSEncryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateAMIPublicAccess": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateAMIPublicAccess", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] - - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateECSLogging": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateECSLogging", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSLogging", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSLogging", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateECSLogging", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "PermissionToInvokeLambdaEvaluateSgSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSG" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateCloudTrailsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LambdaEvaluateCloudTrails"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3AclSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": "SNSNotifyLambdaEvaluateS3ACL", - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3PolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LambdaEvaluateS3Policy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserKeysRotationSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserInactiveKeysSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSVolumesSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LambdaEvaluateEBSVolumes"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateSQSPublicPolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3EncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LambdaEvaluateS3Encryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSEncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LambdaEvaluateRDSEncryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateAMIPublicAccessSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateECSLoggingSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateECSLogging", "LambdaEvaluateECSLogging"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateECSLogging" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateECSLogging", "Arn"] } - } - }, - "SNSIdentificationErrors": { - "Type": "AWS::SNS::Topic", - "Properties": { - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] - ]} - } - }, - "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { - "Type" : "AWS::SNS::Subscription", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties" : { - "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "Protocol" : "lambda", - "TopicArn" : { "Ref": "SNSIdentificationErrors" } - } - }, - "PermissionToInvokeLambdaLogsForwarderSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSIdentificationErrors" }, - "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } - } - }, - "AlarmErrorsLambdaBackupDDB": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaBackupDDB" } - } - ], - "Period": 86400, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSGEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSGEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSGEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSG"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSG" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSG" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateCloudTrailsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEvaluateCloudTrails": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateCloudTrails"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateCloudTrails" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateCloudTrails" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3ACLEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3ACLEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3ACLEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3ACL"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3ACL" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3ACL" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3PolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3PolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Policy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Policy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Policy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserKeysRotation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserKeysRotation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserInactiveKeys" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserInactiveKeys" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSVolumesEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSVolumes"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSVolumes" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSVolumes" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3EncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSQSPublicPolicy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSQSPublicPolicy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Encryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Encryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Encryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSEncryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSEncryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSEncryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateAMIPublicAccess" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateAMIPublicAccess" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateECSLoggingEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateECSLoggingEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateECSLoggingEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateECSLoggingEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaECSLoggingEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateECSLogging"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateECSLogging" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateECSLogging" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" + "InitiateLambdaHandler": "initiate_to_desc_ecs_logging_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_ecs_logging_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate ECS logging enabled or not evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSLogging"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSLogging", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSLogging", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } } }, "Outputs": { "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} } -} +} \ No newline at end of file diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 9030a8b0..86d30d56 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -1,7 +1,8 @@ resource "aws_cloudformation_stack" "identification" { - name = "hammer-identification" + name = "hammer-identification-main" depends_on = [ "aws_s3_bucket_object.identification-cfn", + "aws_s3_bucket_object.identification-nested-cfn", "aws_s3_bucket_object.logs-forwarder", "aws_s3_bucket_object.ddb-tables-backup", "aws_s3_bucket_object.sg-issues-identification", @@ -23,6 +24,7 @@ resource "aws_cloudformation_stack" "identification" { parameters { SourceS3Bucket = "${var.s3bucket}" + NestedStackTemplate = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-nested-cfn.id}" ResourcesPrefix = "${var.resources-prefix}" IdentificationIAMRole = "${var.identificationIAMRole}" IdentificationCheckRateExpression = "${var.identificationCheckRateExpression}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index b30d552a..31a46ad8 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -4,6 +4,12 @@ resource "aws_s3_bucket_object" "identification-cfn" { source = "${path.module}/../../../cf-templates/identification.json" } +resource "aws_s3_bucket_object" "identification-nested-cfn" { + bucket = "${var.s3bucket}" + key = "cfn/${format("identification-nested-%s.json", "${md5(file("${path.module}/../../../cf-templates/identification-nested.json"))}")}" + source = "${path.module}/../../../cf-templates/identification-nested.json" +} + resource "aws_s3_bucket_object" "logs-forwarder" { bucket = "${var.s3bucket}" key = "lambda/${format("logs-forwarder-%s.zip", "${md5(file("${path.module}/../../../packages/logs-forwarder.zip"))}")}" diff --git a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py index 4bcf6c08..04fa3281 100644 --- a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py +++ b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py @@ -4,7 +4,7 @@ from library.logger import set_logging from library.config import Config from library.aws.ecs import ECSChecker -from library.aws.utility import Account +from library.aws.utility import Account, DDB from library.ddb_issues import IssueStatus, ECSLoggingIssue from library.ddb_issues import Operations as IssueOperations from library.aws.utility import Sns @@ -20,7 +20,8 @@ def lambda_handler(event, context): account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() - # region = payload['region'] + # if request_id is present in payload then this lambda was called from the API + request_id = payload.get('request_id', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -55,7 +56,7 @@ def lambda_handler(event, context): issue = ECSLoggingIssue(account_id, task_definition.name) issue.issue_details.region = task_definition.account.region issue.issue_details.task_definition_arn = task_definition.arn - issue.issue_details.container_name = task_definition.container_name + issue.issue_details.disabled_logging_container_names = task_definition.disabled_logging_container_names issue.issue_details.tags = task_definition.tags if config.ecs_logging.in_whitelist(account_id, task_definition.name): @@ -68,10 +69,14 @@ def lambda_handler(event, context): # as we already checked it open_issues.pop(task_definition.name, None) - logging.debug(f"ECS task definitions in DDB:\n{open_issues.keys()}") - # all other unresolved issues in DDB are for removed/remediated task definitions - for issue in open_issues.values(): - IssueOperations.set_status_resolved(ddb_table, issue) + logging.debug(f"ECS task definitions in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated task definitions + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + # track the progress of API request to scan specific account/region/feature + if request_id: + api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) + DDB.track_progress(api_table, request_id) except Exception: logging.exception(f"Failed to check ECS task definitions for '{account_id} ({account_name})'") return diff --git a/hammer/identification/lambdas/ecs-logging-issues-identification/initiate_to_desc_ecs_logging_issues.py b/hammer/identification/lambdas/ecs-logging-issues-identification/initiate_to_desc_ecs_logging_issues.py index 166ce9a6..99079bdb 100644 --- a/hammer/identification/lambdas/ecs-logging-issues-identification/initiate_to_desc_ecs_logging_issues.py +++ b/hammer/identification/lambdas/ecs-logging-issues-identification/initiate_to_desc_ecs_logging_issues.py @@ -12,7 +12,7 @@ def lambda_handler(event, context): logging.debug("Initiating ECS Cluster logging checking") try: - sns_arn = os.environ["SNS_ECS_LOGGING_ARN"] + sns_arn = os.environ["SNS_ARN"] config = Config() if not config.ecs_logging.enabled: diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 7b2eb30f..6ff6c7ca 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -1,4 +1,3 @@ -import json import logging from botocore.exceptions import ClientError @@ -12,7 +11,7 @@ 'cluster_arn', # subnet_group_id 'cluster_instance_arn' - ]) +]) class ECSClusterOperations(object): @@ -20,7 +19,7 @@ class ECSClusterOperations(object): @timeit def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): """ Retrieve ecs clusters meta data with security group attached - + :param ec2_client: boto3 ec2 client :param ecs_client: boto3 ECS client :param group_id: security group id @@ -46,7 +45,8 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): ) ec2_instance_id = container_instance[0]["ec2InstanceId"] - ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] + ec2_instance = \ + ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] if group_id in str(ec2_instance["SecurityGroups"]): ecs_instances.append(ECSCluster_Details( @@ -60,25 +60,36 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): class ECSTaskDefinitions(object): """ Basic class for ECS task definitions. - + """ - def __init__(self, account, name, arn, tags, container_name=None, is_logging=None, is_privileged=None, external_image=None): + + def __init__(self, account, name, arn, tags, is_logging=None, disabled_logging_container_names=None, + is_privileged=None, privileged_container_names=None, external_image=None, + container_image_details=None): """ + :param account: `Account` instance where ECS task definition is present - :param name: name of the task definition :param arn: arn of the task definition - :param arn: tags of task definition. - :param is_logging: logging enabled or not. + :param tags: tags of task definition. + :param is_logging: boolean. Task definition's container logging is enabled or not + :param disabled_logging_container_names: List of containers which logging disabled. + :param is_privileged: boolean + :param privileged_container_names: List of containers which privileged access enabled + :param external_image: boolean + :param container_image_details: List of containers which image source is taken from external """ + self.account = account self.name = name self.arn = arn self.tags = convert_tags(tags) self.is_logging = is_logging + self.disabled_logging_container_names = disabled_logging_container_names self.is_privileged = is_privileged + self.privileged_container_names = privileged_container_names self.external_image = external_image - self.container_name = container_name + self.container_image_details = container_image_details class ECSChecker(object): @@ -117,10 +128,9 @@ def check(self): if "families" in response: for task_definition_name in response["families"]: tags = {} - logging_enabled = False - external_image = False - is_privileged = False - container_name = None + container_image_details = [] + disabled_logging_container_names = [] + privileged_container_names = [] try: task_definition = self.account.client("ecs").describe_task_definition( taskDefinition=task_definition_name @@ -130,23 +140,34 @@ def check(self): for container_definition in task_definition['containerDefinitions']: container_name = container_definition["name"] if container_definition.get('logConfiguration') is None: - logging_enabled = False - else: - logging_enabled = True + disabled_logging_container_names.append(container_name) - container_privileged_details = container_definition.get('privileged') - if container_privileged_details is not None: - if container_definition['privileged']: - is_privileged = True - else: - is_privileged = False + if container_definition.get('privileged') is not None \ + and container_definition['privileged']: + privileged_container_names.append(container_name) image = container_definition.get('image') + image_details = {} if image is not None: if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: - external_image = True - else: - external_image = False + image_details["container_name"] = container_name + image_details["image_url"] = image + container_image_details.append(image_details) + + if len(disabled_logging_container_names) > 0: + logging_enabled = False + else: + logging_enabled = True + + if len(privileged_container_names) > 0: + is_privileged = True + else: + is_privileged = False + + if len(container_image_details) > 0: + external_image = True + else: + external_image = False if "Tags" in task_definition: tags = task_definition["Tags"] @@ -154,10 +175,12 @@ def check(self): name=task_definition_name, arn=task_definition_arn, tags=tags, - container_name=container_name, is_logging=logging_enabled, + disabled_logging_container_names=disabled_logging_container_names, is_privileged=is_privileged, - external_image=external_image + privileged_container_names=privileged_container_names, + external_image=external_image, + container_image_details=container_image_details, ) self.task_definitions.append(task_definition_details) except ClientError as err: diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index 130add86..595938dc 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -34,7 +34,7 @@ def create_tickets_ecs_logging(self): issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, ECSLoggingIssue) for issue in issues: task_definition_name = issue.issue_id - container_name = issue.issue_details.container_name + disabled_logging_container_names = issue.issue_details.disabled_logging_container_names region = issue.issue_details.region tags = issue.issue_details.tags # issue has been already reported @@ -52,7 +52,7 @@ def create_tickets_ecs_logging(self): # Adding label with "whitelisted" to jira ticket. jira.add_label( ticket_id=issue.jira_details.ticket, - labels=IssueStatus.Whitelisted + label=IssueStatus.Whitelisted.value ) jira.close_issue( ticket_id=issue.jira_details.ticket, @@ -98,13 +98,10 @@ def create_tickets_ecs_logging(self): f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" f"*ECS Task Definition*: {task_definition_name}\n" - f"*ECS Task definition's Container Name*: {container_name}\n", + f"*ECS Task definition disabled logging container names*: {disabled_logging_container_names}\n", f"*Container's logging enabled*: False \n" ) - auto_remediation_date = (self.config.now + self.config.ecs_logging.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" - issue_description += JiraOperations.build_tags_table(tags) issue_description += "\n" From 58b991d4c7c8d3fc948baa15a445e79dbf9cec1c Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 25 Jun 2019 18:16:51 +0530 Subject: [PATCH 060/193] Updated with ES encryption issue changes. Updated with ES encryption issue changes. --- .../create_elasticsearch_unencrypted_issue_tickets.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index cd6ad97f..df6358d7 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -98,9 +98,6 @@ def create_tickets_elasticsearch_unencryption(self): issue_description += JiraOperations.build_tags_table(tags) - auto_remediation_date = (self.config.now + self.config.esEncrypt.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" - issue_description += ( f"*Recommendation*: Encrypt Elasticsearch domain. To enable encryption follow below steps: \n" f"1. Choose to create new domain. \n" From aba993c5d883d6a621348d17f8f4b4f4fa4e92bf Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 25 Jun 2019 21:23:16 +0530 Subject: [PATCH 061/193] Updated ECS logging testing issue changes. Updated ECS logging testing issue changes. --- .../reporting/create_ecs_logging_issue_tickets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index 595938dc..b3ef1ab6 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -98,7 +98,7 @@ def create_tickets_ecs_logging(self): f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" f"*ECS Task Definition*: {task_definition_name}\n" - f"*ECS Task definition disabled logging container names*: {disabled_logging_container_names}\n", + f"*ECS Task definition disabled logging container names*: {disabled_logging_container_names}\n" f"*Container's logging enabled*: False \n" ) From a7b7eda1ac8b6d900c8f455a83f03332b968e298 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 12:29:41 +0530 Subject: [PATCH 062/193] Updated with nested stack deployment changes. Updated with nested stack deployment changes. --- deployment/cf-templates/ddb.json | 40 +- .../cf-templates/identification-nested.json | 267 ++ deployment/cf-templates/identification.json | 3403 +++-------------- .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 6 + .../describe_redshift_encryption.py | 18 +- .../initiate_to_desc_redshift_encryption.py | 2 +- ...shift_unencrypted_cluster_issue_tickets.py | 2 +- 8 files changed, 829 insertions(+), 2913 deletions(-) create mode 100644 deployment/cf-templates/identification-nested.json diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 22ffa9d2..801e0397 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -24,7 +24,7 @@ } ], "ProvisionedThroughput": { - "ReadCapacityUnits": "10", + "ReadCapacityUnits": "25", "WriteCapacityUnits": "2" }, "SSESpecification": { @@ -458,59 +458,59 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ec2-public-ami" ] ]} } }, - "DynamoDBRedshiftClusterEncryption": { + "DynamoDBApiRequests": { "Type": "AWS::DynamoDB::Table", - "DeletionPolicy": "Retain", - "DependsOn": ["DynamoDBCredentials"], + "DependsOn": ["DynamoDBCredentials", "DynamoDBSQSPublicPolicy"], "Properties": { "AttributeDefinitions": [ { - "AttributeName": "account_id", - "AttributeType": "S" - }, - { - "AttributeName": "issue_id", + "AttributeName": "request_id", "AttributeType": "S" } ], "KeySchema": [ { - "AttributeName": "account_id", + "AttributeName": "request_id", "KeyType": "HASH" - }, - { - "AttributeName": "issue_id", - "KeyType": "RANGE" } ], "ProvisionedThroughput": { "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, - "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-unencrypted" ] ]} + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} } }, - "DynamoDBApiRequests": { + "DynamoDBRedshiftClusterEncryption": { "Type": "AWS::DynamoDB::Table", - "DependsOn": ["DynamoDBCredentials", "DynamoDBSQSPublicPolicy"], + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], "Properties": { "AttributeDefinitions": [ { - "AttributeName": "request_id", + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", "AttributeType": "S" } ], "KeySchema": [ { - "AttributeName": "request_id", + "AttributeName": "account_id", "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" } ], "ProvisionedThroughput": { "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, - "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-unencrypted" ] ]} } } } diff --git a/deployment/cf-templates/identification-nested.json b/deployment/cf-templates/identification-nested.json new file mode 100644 index 00000000..53d2fd81 --- /dev/null +++ b/deployment/cf-templates/identification-nested.json @@ -0,0 +1,267 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Hammer identification child stack", + "Parameters": { + "SourceS3Bucket": { + "Type": "String", + "Default": "" + }, + "IdentificationIAMRole": { + "Type": "String", + "Default": "cloudsec-master-id" + }, + "IdentificationCheckRateExpression": { + "Type": "String" + }, + "LambdaSubnets": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas in default system-managed VPC (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "LambdaSecurityGroups": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas with default access rules (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "IdentificationLambdaSource": { + "Type": "String", + "Default": "sg-issues-identification.zip" + }, + "InitiateLambdaDescription": { + "Type": "String", + "Default": "Lambda that triggers the process of issues identification" + }, + "EvaluateLambdaDescription": { + "Type": "String", + "Default": "Lambda that performs issues identification" + }, + "InitiateLambdaName": { + "Type": "String" + }, + "EvaluateLambdaName": { + "Type": "String" + }, + "InitiateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaMemorySize": { + "Type": "String", + "Default": "256" + }, + "LambdaLogsForwarderArn": { + "Type": "String" + }, + "EventRuleDescription": { + "Type": "String", + "Default": "Triggers initiate lambda" + }, + "EventRuleName": { + "Type": "String" + }, + "SNSDisplayName": { + "Type": "String" + }, + "SNSTopicName": { + "Type": "String" + }, + "SNSIdentificationErrors": { + "Type": "String" + } + }, + "Conditions": { + "LambdaSubnetsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSubnets"}, "" ] + }, + "LambdaSecurityGroupsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSecurityGroups"}, "" ] + } + }, + "Resources": { + "LambdaInitiateEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LogGroupLambdaInitiateEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "Environment": { + "Variables": { + "SNS_ARN": { "Ref": "SNSNotifyLambdaEvaluate" } + } + }, + "Description": { "Ref": "InitiateLambdaDescription" }, + "FunctionName": { "Ref": "InitiateLambdaName" }, + "Handler": {"Ref": "InitiateLambdaHandler"}, + "MemorySize": 128, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "InitiateLambdaName" } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaInitiateEvaluation"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEvaluation" } + } + }, + "LambdaEvaluate": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "VpcConfig": { + "SecurityGroupIds": { + "Fn::If": [ + "LambdaSecurityGroupsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } + ] + }, + "SubnetIds": { + "Fn::If": [ + "LambdaSubnetsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } + ] + } + }, + "Description": {"Ref": "EvaluateLambdaDescription"}, + "FunctionName": { "Ref": "EvaluateLambdaName" }, + "Handler": {"Ref": "EvaluateLambdaHandler"}, + "MemorySize": {"Ref": "EvaluateLambdaMemorySize"}, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluate": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "EvaluateLambdaName"} ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaLambdaEvaluate": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluate" } + } + }, + "EventInitiateEvaluation": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "Description": {"Ref": "EventRuleDescription"}, + "Name": {"Ref": "EventRuleName"}, + "ScheduleExpression": { "Ref": "IdentificationCheckRateExpression" }, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateEvaluation", "Arn"] }, + "Id": {"Ref": "LambdaInitiateEvaluation"} + } + ] + } + }, + "PermissionToInvokeLambdaInitiateEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateEvaluation", "EventInitiateEvaluation"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluation", "Arn"] } + } + }, + "SNSNotifyLambdaEvaluate": { + "Type": "AWS::SNS::Topic", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "DisplayName": { "Ref": "SNSDisplayName" }, + "TopicName": { "Ref": "SNSTopicName" }, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluate", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, + "PermissionToInvokeLambdaEvaluateSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LambdaEvaluate"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluate" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluate", "Arn"] } + } + }, + "AlarmErrorsLambdaInitiateEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluate" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluate" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + } + } +} diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 893942af..ec769b39 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,8 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", - "SourceIdentificationRedshiftClusterEncryption", - "SourceIdentificationAMIPublicAccess" + "SourceIdentificationAMIPublicAccess", + "SourceIdentificationRedshiftClusterEncryption" ] }, { @@ -91,11 +91,11 @@ "SourceIdentificationRDSSnapshots": { "default": "Relative path to public RDS snapshots lambda sources" }, - "SourceIdentificationRedshiftClusterEncryption":{ - "default": "Relative path to unencrypted Redshift Cluster sources" - }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" + }, + "SourceIdentificationRedshiftClusterEncryption":{ + "default": "Relative path to unencrypted Redshift Cluster sources" } } } @@ -110,6 +110,10 @@ "Type": "String", "Default": "" }, + "NestedStackTemplate": { + "Type": "String", + "Default": "" + }, "IdentificationIAMRole": { "Type": "String", "Default": "cloudsec-master-id" @@ -465,7 +469,6 @@ "RetentionInDays": "7" } }, - "LambdaBackupDDB": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaBackupDDB"], @@ -513,3022 +516,654 @@ "LogGroupName" : { "Ref": "LogGroupLambdaBackupDDB" } } }, - - "LambdaInitiateSGEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LogGroupLambdaInitiateSGEvaluation"], + "EventBackupDDB": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "Environment": { - "Variables": { - "SNS_SG_ARN": { "Ref": "SNSNotifyLambdaEvaluateSG" } - } - }, - "Description": "Lambda function for initiate to identify bad security groups", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_sec_grps.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "Description": "Hammer ScheduledRule for DDB tables backup", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, + "ScheduleExpression": "rate(1 day)", + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, + "Id": "LambdaBackupDDB" + } + ] } }, - "LogGroupLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaLogsForwarder"], + "Properties": { + "FunctionName": { "Ref": "LambdaLogsForwarder" }, + "Action": "lambda:InvokeFunction", + "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, + "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} } }, - "SubscriptionFilterLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSGEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSGEvaluation" } + "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], + "Properties": { + "FunctionName": { "Ref": "LambdaBackupDDB" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } } }, - - "LambdaEvaluateSG": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSG"], + "SNSIdentificationErrors": { + "Type": "AWS::SNS::Topic", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe security groups unrestricted access.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_sec_grps_unrestricted_access.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] + ]} } }, - "LogGroupLambdaEvaluateSG": { - "Type" : "AWS::Logs::LogGroup", + "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { + "Type" : "AWS::SNS::Subscription", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "Protocol" : "lambda", + "TopicArn" : { "Ref": "SNSIdentificationErrors" } } }, - "SubscriptionFilterLambdaLambdaEvaluateSG": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSG"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSG" } + "PermissionToInvokeLambdaLogsForwarderSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSIdentificationErrors" }, + "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } } }, - - "LambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LogGroupLambdaInitiateCloudTrailsEvaluation"], + "AlarmErrorsLambdaBackupDDB": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "Environment": { - "Variables": { - "SNS_CLOUDTRAILS_ARN": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" } + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaBackupDDB" } } - }, - "Description": "Lambda function for initiate identification of CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_cloudtrails.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + ], + "Period": 86400, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "StackEvaluateSG": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateCloudTrailsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateCloudTrailsEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": {"Ref": "SourceIdentificationSG"}, + "InitiateLambdaDescription": "Lambda function for initiate to identify bad security groups", + "EvaluateLambdaDescription": "Lambda function to describe security groups unrestricted access.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sec_grps.lambda_handler", + "EvaluateLambdaHandler": "describe_sec_grps_unrestricted_access.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Security Groups evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateCloudTrails": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateCloudTrails"], + "StackEvaluateCloudTrails": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_cloudtrails.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateCloudTrails"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateCloudTrails" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationCloudTrails" }, + "InitiateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "EvaluateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_cloudtrails.lambda_handler", + "EvaluateLambdaHandler": "describe_cloudtrails.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate CloudTrails evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateS3ACLEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3ACL", "LogGroupLambdaInitiateS3ACLEvaluation"], + "StackEvaluateS3ACL": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "Environment": { - "Variables": { - "SNS_S3_ACL_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3ACLEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3ACLEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3ACL" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_acl.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_acl.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 ACL evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3ACL"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateS3ACL": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3ACL"], + "StackEvaluateS3Policy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3ACL"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3ACL" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Policy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_policy.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 Policy evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Policy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LogGroupLambdaInitiateS3PolicyEvaluation"], + "StackEvaluateIAMUserKeysRotation": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "Environment": { - "Variables": { - "SNS_S3_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3PolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3PolicyEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserKeysRotation" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which to be rotate.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys to be rotated.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_key_rotation.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserKeysRotation evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserKeysRotation"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateS3Policy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Policy"], + "StackEvaluateIAMUserInactiveKeys": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Policy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Policy" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserInactiveKeys" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which last used.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys last used.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_access_keys.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_accesskey_details.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserInactiveKeys evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserInactiveKeys"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], + "StackEvaluateEBSVolumes": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_KEYS_ROTATION_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which to be rotate.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSVolumes" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted EBS volumes.", + "EvaluateLambdaDescription": "Lambda function to describe unencrypted ebs volumes.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS volumes evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserKeysRotation"], + "StackEvaluateEBSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys to be rotated.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserKeysRotation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserKeysRotation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public EBS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public ebs snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], + "StackEvaluateRDSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_INACTIVE_KEYS_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_access_keys.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public RDS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public RDS snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_rds_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate RDS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserInactiveKeys"], + "StackEvaluateSQSPublicPolicy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_accesskey_details.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserInactiveKeys"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserInactiveKeys" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationSQSPublicPolicy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public SQS queues.", + "EvaluateLambdaDescription": "Lambda function to describe public SQS queues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sqs_public_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_sqs_public_policy.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate SQS queue evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LogGroupLambdaInitiateEBSVolumesEvaluation"], + "StackEvaluateS3Encryption": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "Environment": { - "Variables": { - "SNS_EBS_VOLUMES_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted EBS volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Encryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify S3 unencrypted buckets.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted S3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Encryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LogGroupLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSVolumesEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSVolumesEvaluation" } - } - }, - - "LambdaEvaluateEBSVolumes": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSVolumes"], + "StackEvaluateRDSEncryption": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe unencrypted ebs volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSVolumes"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSVolumes" } - } - }, - - "LambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_EBS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public EBS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSEncryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted RDS instances.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted RDS instances.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSSnapshotsEvaluation" } - } - }, - - "LambdaEvaluateEBSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSSnapshots"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public ebs snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_ebs_public_snapshots.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSSnapshots" } + "InitiateLambdaHandler": "initiate_to_desc_rds_instance_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_instance_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate rds instance encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], + "StackEvaluateAmiPublicAccess": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_RDS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public RDS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_rds_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSSnapshotsEvaluation" } - } - }, - - "LambdaEvaluateRDSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSSnapshots"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public rds snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "45 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationAMIPublicAccess" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public AMI access issues.", + "EvaluateLambdaDescription": "Lambda function to describe public AMI issues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_rds_public_snapshots.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSSnapshots" } - } - }, - "LambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "Environment": { - "Variables": { - "SNS_SQS_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" } - } - }, - "Description": "Lambda function for initiate to identify public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_sqs_public_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSQSPublicPolicyEvaluation" } + "InitiateLambdaHandler": "initiate_to_desc_public_ami_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_public_ami_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate public AMI access evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSQSPublicPolicy"], + "StackEvaluateRedshiftClusterEncryption": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_sqs_public_policy.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSQSPublicPolicy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSQSPublicPolicy" } - } - }, - - "LambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Environment": { - "Variables": { - "SNS_S3_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" } - } - }, - "Description": "Lambda function for initiate to identify S3 unencrypted buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3EncryptionEvaluation" } - } - }, - "LambdaEvaluateS3Encryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Encryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Description": "Lambda function to describe un-encrypted S3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Encryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Encryption" } - } - }, - - "LambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Environment": { - "Variables": { - "SNS_RDS_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_rds_instance_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSEncryptionEvaluation" } - } - }, - - "LambdaEvaluateRDSEncryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSEncryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Description": "Lambda function to describe un-encrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_rds_instance_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSEncryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSEncryption" } - } - }, - "LambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Environment": { - "Variables": { - "SNS_PUBLIC_AMI_ARN": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" } - } - }, - "Description": "Lambda function for initiate to identify public AMI access issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_public_ami_issues.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } - } - }, - - "LambdaEvaluateAMIPublicAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Description": "Lambda function to describe public AMI issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_public_ami_issues.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } - } - }, - "LambdaInitiateRedshiftClusterEncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftClusterEncryption", "LogGroupLambdaInitiateRedshiftClusterEncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRedshiftClusterEncryption" } - }, - "Environment": { - "Variables": { - "SNS_REDSHIFT_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateRedshiftClusterEncryption" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted Redshift clusters.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRedshiftClusterEncryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify Redshift cluster is encrypted or not.", + "EvaluateLambdaDescription": "Lambda function to describe Redshift cluster is encrypted or not.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "InitiateRedshiftClusterEncryptionLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_redshift_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRedshiftClusterEncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRedshiftClusterEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRedshiftClusterEncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRedshiftClusterEncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRedshiftClusterEncryptionEvaluation" } - } - }, - "LambdaEvaluateRedshiftClusterEncryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRedshiftClusterEncryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRedshiftClusterEncryption" } - }, - "Description": "Lambda function to describe unencrypted Redshift clusters.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftClusterEncryptionLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_redshift_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRedshiftClusterEncryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRedshiftClusterEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRedshiftClusterEncryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRedshiftClusterEncryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRedshiftClusterEncryption" } - } - }, - "EventBackupDDB": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaBackupDDB"], - "Properties": { - "Description": "Hammer ScheduledRule for DDB tables backup", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, - "ScheduleExpression": "rate(1 day)", - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, - "Id": "LambdaBackupDDB" - } - ] - } - }, - "EventInitiateEvaluationS3IAM": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", - "LambdaInitiateIAMUserInactiveKeysEvaluation", - "LambdaInitiateS3EncryptionEvaluation", - "LambdaInitiateS3ACLEvaluation", - "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate S3 and IAM evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3IAM"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserKeysRotationEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserKeysRotationEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserInactiveKeysEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3EncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3EncryptionEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3ACLEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3ACLEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3PolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3PolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationCloudTrails": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate CloudTrails evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateCloudTrailsEvaluation", "Arn"] }, - "Id": "LambdaInitiateCloudTrailsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSVolumes": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS volumes evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSVolumesEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSVolumesEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate RDS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSG": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSGEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate Security Groups evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSGEvaluation", "Arn"] }, - "Id": "LambdaInitiateSGEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSQSPublicPolicy": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate SQS queue evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSQSPublicPolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateSQSPublicPolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSEncryption": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate rds instance encryption evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSEncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSEncryptionEvaluation" - } - ] - } - }, - "EventInitiateEvaluationAMIPublicAccess": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate public AMI access evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateAMIPublicAccessEvaluation", "Arn"] }, - "Id": "LambdaInitiateAMIPublicAccessEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRedshiftClusterEncryption": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRedshiftClusterEncryptionEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate unencrypted Redshift cluster evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRedshiftClusterEncryption"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRedshiftClusterEncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateRedshiftClusterEncryptionEvaluation" - } - ] - } - }, - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaLogsForwarder"], - "Properties": { - "FunctionName": { "Ref": "LambdaLogsForwarder" }, - "Action": "lambda:InvokeFunction", - "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, - "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} - } - }, - "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], - "Properties": { - "FunctionName": { "Ref": "LambdaBackupDDB" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSGEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSGEvaluation", "EventInitiateEvaluationSG"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSGEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateCloudTrailsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation", "EventInitiateEvaluationCloudTrails"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3ACLEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3ACLEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3ACLEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3PolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3PolicyEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3PolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserKeysRotationEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { - "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] + "InitiateLambdaHandler": ".initiate_to_desc_redshift_encryptionlambda_handler", + "EvaluateLambdaHandler": "describe_redshift_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Redshift Cluster evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRedshiftClusterEncryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRedshiftClusterEncryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftClusterEncryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } - }, - "PermissionToInvokeLambdaInitiateIAMUserInactiveKeysEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSVolumesEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation", "EventInitiateEvaluationEBSVolumes"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation", "EventInitiateEvaluationEBSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation", "EventInitiateEvaluationRDSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSQSPublicPolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation", "EventInitiateEvaluationSQSPublicPolicy"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3EncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3EncryptionEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSEncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation", "EventInitiateEvaluationRDSEncryption"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSEncryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateAMIPublicAccessEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation", "EventInitiateEvaluationAMIPublicAccess"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRedshiftClusterEncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRedshiftClusterEncryptionEvaluation", "EventInitiateEvaluationRedshiftClusterEncryption"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRedshiftClusterEncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRedshiftClusterEncryption", "Arn"] } - } - }, - "SNSNotifyLambdaEvaluateSG": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateSG"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateCloudTrails": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateCloudTrails"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3ACL": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3ACL"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Policy": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3Policy"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSVolumes": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSVolumes"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSSnapshots"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSSnapshots", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateSQSPublicPolicy", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Encryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateS3Encryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSEncryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSEncryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateAMIPublicAccess": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateAMIPublicAccess", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRedshiftClusterEncryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRedshiftClusterEncryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRedshiftClusterEncryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftClusterEncryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRedshiftClusterEncryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "PermissionToInvokeLambdaEvaluateSgSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSG" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateCloudTrailsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LambdaEvaluateCloudTrails"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3AclSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": "SNSNotifyLambdaEvaluateS3ACL", - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3PolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LambdaEvaluateS3Policy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserKeysRotationSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserInactiveKeysSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSVolumesSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LambdaEvaluateEBSVolumes"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateSQSPublicPolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3EncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LambdaEvaluateS3Encryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSEncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LambdaEvaluateRDSEncryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateAMIPublicAccessSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRedshiftClusterEncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftClusterEncryption", "LambdaEvaluateRedshiftClusterEncryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRedshiftClusterEncryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRedshiftClusterEncryption", "Arn"] } - } - }, - "SNSIdentificationErrors": { - "Type": "AWS::SNS::Topic", - "Properties": { - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] - ]} - } - }, - "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { - "Type" : "AWS::SNS::Subscription", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties" : { - "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "Protocol" : "lambda", - "TopicArn" : { "Ref": "SNSIdentificationErrors" } - } - }, - "PermissionToInvokeLambdaLogsForwarderSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSIdentificationErrors" }, - "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } - } - }, - "AlarmErrorsLambdaBackupDDB": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaBackupDDB" } - } - ], - "Period": 86400, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSGEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSGEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSGEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSG"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSG" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSG" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateCloudTrailsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEvaluateCloudTrails": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateCloudTrails"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateCloudTrails" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateCloudTrails" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3ACLEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3ACLEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3ACLEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3ACL"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3ACL" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3ACL" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3PolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3PolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Policy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Policy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Policy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserKeysRotation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserKeysRotation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserInactiveKeys" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserInactiveKeys" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSVolumesEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSVolumes"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSVolumes" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSVolumes" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3EncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSQSPublicPolicy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSQSPublicPolicy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Encryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Encryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Encryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSEncryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSEncryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSEncryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateAMIPublicAccess" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateAMIPublicAccess" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRedshiftClusterEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRedshiftClusterEncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRedshiftClusterEncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRedshiftClusterEncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRedshiftClusterEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRedshiftClusterEncryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRedshiftClusterEncryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRedshiftClusterEncryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } } }, - "Outputs": { "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} } -} +} \ No newline at end of file diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 5c61813e..7eb193b4 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -1,7 +1,8 @@ resource "aws_cloudformation_stack" "identification" { - name = "hammer-identification" + name = "hammer-identification-main" depends_on = [ "aws_s3_bucket_object.identification-cfn", + "aws_s3_bucket_object.identification-nested-cfn", "aws_s3_bucket_object.logs-forwarder", "aws_s3_bucket_object.ddb-tables-backup", "aws_s3_bucket_object.sg-issues-identification", @@ -23,6 +24,7 @@ resource "aws_cloudformation_stack" "identification" { parameters { SourceS3Bucket = "${var.s3bucket}" + NestedStackTemplate = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-nested-cfn.id}" ResourcesPrefix = "${var.resources-prefix}" IdentificationIAMRole = "${var.identificationIAMRole}" IdentificationCheckRateExpression = "${var.identificationCheckRateExpression}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index 7fb3a64b..975ba0b5 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -4,6 +4,12 @@ resource "aws_s3_bucket_object" "identification-cfn" { source = "${path.module}/../../../cf-templates/identification.json" } +resource "aws_s3_bucket_object" "identification-nested-cfn" { + bucket = "${var.s3bucket}" + key = "cfn/${format("identification-nested-%s.json", "${md5(file("${path.module}/../../../cf-templates/identification-nested.json"))}")}" + source = "${path.module}/../../../cf-templates/identification-nested.json" +} + resource "aws_s3_bucket_object" "logs-forwarder" { bucket = "${var.s3bucket}" key = "lambda/${format("logs-forwarder-%s.zip", "${md5(file("${path.module}/../../../packages/logs-forwarder.zip"))}")}" diff --git a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py index a3001955..7f99775a 100644 --- a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py +++ b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py @@ -4,7 +4,7 @@ from library.logger import set_logging from library.config import Config from library.aws.redshift import RedshiftEncryptionChecker -from library.aws.utility import Account +from library.aws.utility import Account, DDB from library.ddb_issues import IssueStatus, RedshiftEncryptionIssue from library.ddb_issues import Operations as IssueOperations from library.aws.utility import Sns @@ -20,7 +20,8 @@ def lambda_handler(event, context): account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() - # region = payload['region'] + # if request_id is present in payload then this lambda was called from the API + request_id = payload.get('request_id', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -65,10 +66,15 @@ def lambda_handler(event, context): # as we already checked it open_issues.pop(cluster.name, None) - logging.debug(f"Redshift Clusters in DDB:\n{open_issues.keys()}") - # all other unresolved issues in DDB are for removed/remediated clusters - for issue in open_issues.values(): - IssueOperations.set_status_resolved(ddb_table, issue) + logging.debug(f"Redshift Clusters in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated clusters + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + + # track the progress of API request to scan specific account/region/feature + if request_id: + api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) + DDB.track_progress(api_table, request_id) except Exception: logging.exception(f"Failed to check Redshift clusters for '{account_id} ({account_name})'") return diff --git a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/initiate_to_desc_redshift_encryption.py b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/initiate_to_desc_redshift_encryption.py index d6cc8606..cc499bf0 100644 --- a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/initiate_to_desc_redshift_encryption.py +++ b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/initiate_to_desc_redshift_encryption.py @@ -12,7 +12,7 @@ def lambda_handler(event, context): logging.debug("Initiating Redshift Clusters checking") try: - sns_arn = os.environ["SNS_REDSHIFT_ENCRYPT_ARN"] + sns_arn = os.environ["SNS_ARN"] config = Config() if not config.redshiftEncrypt.enabled: diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py index 4c36e39a..0c57db30 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -51,7 +51,7 @@ def create_tickets_redshift_unencrypted_cluster(self): # Adding label with "whitelisted" to jira ticket. jira.add_label( ticket_id=issue.jira_details.ticket, - labels=IssueStatus.Whitelisted + label=IssueStatus.Whitelisted.value ) jira.close_issue( ticket_id=issue.jira_details.ticket, From 129811dcd2ccdca741bd265054d550d4b9865a85 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 13:12:02 +0530 Subject: [PATCH 063/193] Updated with Redshift deployment changes and docs. Updated with Redshift deployment changes and docs. --- deployment/cf-templates/ddb.json | 44 +- .../cf-templates/identification-nested.json | 267 ++ deployment/cf-templates/identification.json | 3456 +++-------------- .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 6 + docs/_data/sidebars/mydoc_sidebar.yml | 3 + docs/pages/deployment_cloudformation.md | 1 + docs/pages/editconfig.md | 15 +- docs/pages/features.md | 3 +- .../playbook17_redshift_audit_logging.md | 178 + docs/pages/remediation_backup_rollback.md | 1 + .../describe_redshift_logging_issues.py | 17 +- ...nitiate_to_desc_redshift_logging_issues.py | 2 +- hammer/library/aws/redshift.py | 1 - .../create_redshift_logging_issue_tickets.py | 2 +- 15 files changed, 1055 insertions(+), 2945 deletions(-) create mode 100644 deployment/cf-templates/identification-nested.json create mode 100644 docs/pages/playbook17_redshift_audit_logging.md diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 0e87b36f..b426054a 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -24,7 +24,7 @@ } ], "ProvisionedThroughput": { - "ReadCapacityUnits": "10", + "ReadCapacityUnits": "25", "WriteCapacityUnits": "2" }, "SSESpecification": { @@ -426,7 +426,7 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-unencrypted" ] ]} } }, - "DynamoDBRedshiftLogging": { + "DynamoDBAMIPublicAccess": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", "DependsOn": ["DynamoDBCredentials"], @@ -455,62 +455,62 @@ "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, - "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-logging" ] ]} + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ec2-public-ami" ] ]} } }, - "DynamoDBAMIPublicAccess": { + "DynamoDBApiRequests": { "Type": "AWS::DynamoDB::Table", - "DeletionPolicy": "Retain", - "DependsOn": ["DynamoDBCredentials"], + "DependsOn": ["DynamoDBCredentials", "DynamoDBSQSPublicPolicy"], "Properties": { "AttributeDefinitions": [ { - "AttributeName": "account_id", - "AttributeType": "S" - }, - { - "AttributeName": "issue_id", + "AttributeName": "request_id", "AttributeType": "S" } ], "KeySchema": [ { - "AttributeName": "account_id", + "AttributeName": "request_id", "KeyType": "HASH" - }, - { - "AttributeName": "issue_id", - "KeyType": "RANGE" } ], "ProvisionedThroughput": { "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, - "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ec2-public-ami" ] ]} + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} } }, - "DynamoDBApiRequests": { + "DynamoDBRedshiftLogging": { "Type": "AWS::DynamoDB::Table", - "DependsOn": ["DynamoDBCredentials", "DynamoDBSQSPublicPolicy"], + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], "Properties": { "AttributeDefinitions": [ { - "AttributeName": "request_id", + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", "AttributeType": "S" } ], "KeySchema": [ { - "AttributeName": "request_id", + "AttributeName": "account_id", "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" } ], "ProvisionedThroughput": { "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, - "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-logging" ] ]} } } } diff --git a/deployment/cf-templates/identification-nested.json b/deployment/cf-templates/identification-nested.json new file mode 100644 index 00000000..53d2fd81 --- /dev/null +++ b/deployment/cf-templates/identification-nested.json @@ -0,0 +1,267 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Hammer identification child stack", + "Parameters": { + "SourceS3Bucket": { + "Type": "String", + "Default": "" + }, + "IdentificationIAMRole": { + "Type": "String", + "Default": "cloudsec-master-id" + }, + "IdentificationCheckRateExpression": { + "Type": "String" + }, + "LambdaSubnets": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas in default system-managed VPC (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "LambdaSecurityGroups": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas with default access rules (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "IdentificationLambdaSource": { + "Type": "String", + "Default": "sg-issues-identification.zip" + }, + "InitiateLambdaDescription": { + "Type": "String", + "Default": "Lambda that triggers the process of issues identification" + }, + "EvaluateLambdaDescription": { + "Type": "String", + "Default": "Lambda that performs issues identification" + }, + "InitiateLambdaName": { + "Type": "String" + }, + "EvaluateLambdaName": { + "Type": "String" + }, + "InitiateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaMemorySize": { + "Type": "String", + "Default": "256" + }, + "LambdaLogsForwarderArn": { + "Type": "String" + }, + "EventRuleDescription": { + "Type": "String", + "Default": "Triggers initiate lambda" + }, + "EventRuleName": { + "Type": "String" + }, + "SNSDisplayName": { + "Type": "String" + }, + "SNSTopicName": { + "Type": "String" + }, + "SNSIdentificationErrors": { + "Type": "String" + } + }, + "Conditions": { + "LambdaSubnetsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSubnets"}, "" ] + }, + "LambdaSecurityGroupsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSecurityGroups"}, "" ] + } + }, + "Resources": { + "LambdaInitiateEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LogGroupLambdaInitiateEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "Environment": { + "Variables": { + "SNS_ARN": { "Ref": "SNSNotifyLambdaEvaluate" } + } + }, + "Description": { "Ref": "InitiateLambdaDescription" }, + "FunctionName": { "Ref": "InitiateLambdaName" }, + "Handler": {"Ref": "InitiateLambdaHandler"}, + "MemorySize": 128, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "InitiateLambdaName" } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaInitiateEvaluation"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEvaluation" } + } + }, + "LambdaEvaluate": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "VpcConfig": { + "SecurityGroupIds": { + "Fn::If": [ + "LambdaSecurityGroupsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } + ] + }, + "SubnetIds": { + "Fn::If": [ + "LambdaSubnetsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } + ] + } + }, + "Description": {"Ref": "EvaluateLambdaDescription"}, + "FunctionName": { "Ref": "EvaluateLambdaName" }, + "Handler": {"Ref": "EvaluateLambdaHandler"}, + "MemorySize": {"Ref": "EvaluateLambdaMemorySize"}, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluate": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "EvaluateLambdaName"} ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaLambdaEvaluate": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluate" } + } + }, + "EventInitiateEvaluation": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "Description": {"Ref": "EventRuleDescription"}, + "Name": {"Ref": "EventRuleName"}, + "ScheduleExpression": { "Ref": "IdentificationCheckRateExpression" }, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateEvaluation", "Arn"] }, + "Id": {"Ref": "LambdaInitiateEvaluation"} + } + ] + } + }, + "PermissionToInvokeLambdaInitiateEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateEvaluation", "EventInitiateEvaluation"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluation", "Arn"] } + } + }, + "SNSNotifyLambdaEvaluate": { + "Type": "AWS::SNS::Topic", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "DisplayName": { "Ref": "SNSDisplayName" }, + "TopicName": { "Ref": "SNSTopicName" }, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluate", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, + "PermissionToInvokeLambdaEvaluateSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LambdaEvaluate"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluate" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluate", "Arn"] } + } + }, + "AlarmErrorsLambdaInitiateEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluate" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluate" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + } + } +} diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index d8a2ca1e..9e8f5a3e 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,8 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", - "SourceIdentificationRedshiftLogging", - "SourceIdentificationAMIPublicAccess" + "SourceIdentificationAMIPublicAccess", + "SourceIdentificationRedshiftLogging" ] }, { @@ -91,11 +91,11 @@ "SourceIdentificationRDSSnapshots": { "default": "Relative path to public RDS snapshots lambda sources" }, - "SourceIdentificationRedshiftLogging": { - "default": "Relative path to disabled logging Redshift Cluster sources" - }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" + }, + "SourceIdentificationRedshiftLogging": { + "default": "Relative path to disabled logging Redshift Cluster sources" } } } @@ -110,6 +110,10 @@ "Type": "String", "Default": "" }, + "NestedStackTemplate": { + "Type": "String", + "Default": "" + }, "IdentificationIAMRole": { "Type": "String", "Default": "cloudsec-master-id" @@ -426,7 +430,7 @@ "IdentifyRedshiftLoggingLambdaFunctionName": { "value": "describe-redshift-logging" } - } + } }, "Resources": { "LambdaLogsForwarder": { @@ -465,7 +469,6 @@ "RetentionInDays": "7" } }, - "LambdaBackupDDB": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaBackupDDB"], @@ -513,3023 +516,654 @@ "LogGroupName" : { "Ref": "LogGroupLambdaBackupDDB" } } }, - - "LambdaInitiateSGEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LogGroupLambdaInitiateSGEvaluation"], + "EventBackupDDB": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "Environment": { - "Variables": { - "SNS_SG_ARN": { "Ref": "SNSNotifyLambdaEvaluateSG" } - } - }, - "Description": "Lambda function for initiate to identify bad security groups", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_sec_grps.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "Description": "Hammer ScheduledRule for DDB tables backup", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, + "ScheduleExpression": "rate(1 day)", + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, + "Id": "LambdaBackupDDB" + } + ] } }, - "LogGroupLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaLogsForwarder"], + "Properties": { + "FunctionName": { "Ref": "LambdaLogsForwarder" }, + "Action": "lambda:InvokeFunction", + "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, + "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} } }, - "SubscriptionFilterLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSGEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSGEvaluation" } + "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], + "Properties": { + "FunctionName": { "Ref": "LambdaBackupDDB" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } } }, - - "LambdaEvaluateSG": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSG"], + "SNSIdentificationErrors": { + "Type": "AWS::SNS::Topic", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe security groups unrestricted access.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_sec_grps_unrestricted_access.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] + ]} } }, - "LogGroupLambdaEvaluateSG": { - "Type" : "AWS::Logs::LogGroup", + "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { + "Type" : "AWS::SNS::Subscription", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "Protocol" : "lambda", + "TopicArn" : { "Ref": "SNSIdentificationErrors" } } }, - "SubscriptionFilterLambdaLambdaEvaluateSG": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSG"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSG" } + "PermissionToInvokeLambdaLogsForwarderSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSIdentificationErrors" }, + "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } } }, - - "LambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LogGroupLambdaInitiateCloudTrailsEvaluation"], + "AlarmErrorsLambdaBackupDDB": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "Environment": { - "Variables": { - "SNS_CLOUDTRAILS_ARN": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" } + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaBackupDDB" } } - }, - "Description": "Lambda function for initiate identification of CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_cloudtrails.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + ], + "Period": 86400, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "StackEvaluateSG": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateCloudTrailsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateCloudTrailsEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": {"Ref": "SourceIdentificationSG"}, + "InitiateLambdaDescription": "Lambda function for initiate to identify bad security groups", + "EvaluateLambdaDescription": "Lambda function to describe security groups unrestricted access.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sec_grps.lambda_handler", + "EvaluateLambdaHandler": "describe_sec_grps_unrestricted_access.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Security Groups evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateCloudTrails": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateCloudTrails"], + "StackEvaluateCloudTrails": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationCloudTrails" }, + "InitiateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "EvaluateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_cloudtrails.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "InitiateLambdaHandler": "initiate_to_desc_cloudtrails.lambda_handler", + "EvaluateLambdaHandler": "describe_cloudtrails.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate CloudTrails evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } + } + }, + "StackEvaluateS3ACL": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3ACL" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_acl.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_acl.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 ACL evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3ACL"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LogGroupLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "StackEvaluateS3Policy": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Policy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_policy.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 Policy evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Policy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "SubscriptionFilterLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateCloudTrails"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateCloudTrails" } + "StackEvaluateIAMUserKeysRotation": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserKeysRotation" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which to be rotate.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys to be rotated.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_key_rotation.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserKeysRotation evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserKeysRotation"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateS3ACLEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3ACL", "LogGroupLambdaInitiateS3ACLEvaluation"], + "StackEvaluateIAMUserInactiveKeys": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "Environment": { - "Variables": { - "SNS_S3_ACL_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserInactiveKeys" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which last used.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys last used.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_access_keys.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_accesskey_details.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserInactiveKeys evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserInactiveKeys"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LogGroupLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3ACLEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3ACLEvaluation" } - } - }, - - "LambdaEvaluateS3ACL": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3ACL"], + "StackEvaluateEBSVolumes": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3ACL"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3ACL" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSVolumes" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted EBS volumes.", + "EvaluateLambdaDescription": "Lambda function to describe unencrypted ebs volumes.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS volumes evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LogGroupLambdaInitiateS3PolicyEvaluation"], + "StackEvaluateEBSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "Environment": { - "Variables": { - "SNS_S3_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3PolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3PolicyEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public EBS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public ebs snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateS3Policy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Policy"], + "StackEvaluateRDSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Policy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Policy" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public RDS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public RDS snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_rds_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate RDS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], + "StackEvaluateSQSPublicPolicy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_KEYS_ROTATION_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which to be rotate.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationSQSPublicPolicy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public SQS queues.", + "EvaluateLambdaDescription": "Lambda function to describe public SQS queues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sqs_public_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_sqs_public_policy.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate SQS queue evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserKeysRotation"], + "StackEvaluateS3Encryption": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys to be rotated.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserKeysRotation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserKeysRotation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Encryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify S3 unencrypted buckets.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted S3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Encryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], + "StackEvaluateRDSEncryption": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_INACTIVE_KEYS_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_access_keys.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSEncryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted RDS instances.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted RDS instances.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_rds_instance_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_instance_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate rds instance encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserInactiveKeys"], + "StackEvaluateAmiPublicAccess": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_accesskey_details.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "45 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationAMIPublicAccess" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public AMI access issues.", + "EvaluateLambdaDescription": "Lambda function to describe public AMI issues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_public_ami_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_public_ami_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate public AMI access evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "SubscriptionFilterLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserInactiveKeys"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserInactiveKeys" } - } - }, - - "LambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LogGroupLambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "Environment": { - "Variables": { - "SNS_EBS_VOLUMES_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted EBS volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSVolumesEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSVolumesEvaluation" } - } - }, - - "LambdaEvaluateEBSVolumes": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSVolumes"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe unencrypted ebs volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSVolumes"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSVolumes" } - } - }, - - "LambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_EBS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public EBS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSSnapshotsEvaluation" } - } - }, - - "LambdaEvaluateEBSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSSnapshots"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public ebs snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_ebs_public_snapshots.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSSnapshots" } - } - }, - - "LambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], + "StackEvaluateECSExternalImageSource": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_RDS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public RDS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_rds_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSSnapshotsEvaluation" } - } - }, - - "LambdaEvaluateRDSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSSnapshots"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public rds snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationECSExternalImageSource" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify ECS image source is internal or external.", + "EvaluateLambdaDescription": "Lambda function to describe ECS image source is internal or external.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateECSExternalImageSourceLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_rds_public_snapshots.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSSnapshots" } - } - }, - "LambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "Environment": { - "Variables": { - "SNS_SQS_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" } - } - }, - "Description": "Lambda function for initiate to identify public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyECSExternalImageSourceLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_sqs_public_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSQSPublicPolicyEvaluation" } - } - }, - - "LambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSQSPublicPolicy"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_sqs_public_policy.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSQSPublicPolicy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSQSPublicPolicy" } - } - }, - - "LambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Environment": { - "Variables": { - "SNS_S3_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" } - } - }, - "Description": "Lambda function for initiate to identify S3 unencrypted buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3EncryptionEvaluation" } - } - }, - "LambdaEvaluateS3Encryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Encryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Description": "Lambda function to describe un-encrypted S3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Encryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Encryption" } - } - }, - - "LambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Environment": { - "Variables": { - "SNS_RDS_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_rds_instance_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSEncryptionEvaluation" } - } - }, - - "LambdaEvaluateRDSEncryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSEncryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Description": "Lambda function to describe un-encrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_rds_instance_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSEncryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSEncryption" } - } - }, - "LambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Environment": { - "Variables": { - "SNS_PUBLIC_AMI_ARN": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" } - } - }, - "Description": "Lambda function for initiate to identify public AMI access issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_public_ami_issues.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } - } - }, - "LambdaEvaluateAMIPublicAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Description": "Lambda function to describe public AMI issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_public_ami_issues.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } - } - }, - "LambdaInitiateRedshiftLoggingEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftLogging", "LogGroupLambdaInitiateRedshiftLoggingEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRedshiftLogging" } - }, - "Environment": { - "Variables": { - "SNS_REDSHIFT_LOGGING_ARN": { "Ref": "SNSNotifyLambdaEvaluateRedshiftLogging" } - } - }, - "Description": "Lambda function for initiate to identify disabled audit logging Redshift clusters.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRedshiftLoggingLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_redshift_logging_issues.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRedshiftLoggingEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRedshiftLoggingLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRedshiftLoggingEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRedshiftLoggingEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRedshiftLoggingEvaluation" } - } - }, - "LambdaEvaluateRedshiftLogging": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRedshiftLogging"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRedshiftLogging" } - }, - "Description": "Lambda function to describe disabled audit logging Redshift clusters.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftLoggingLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_redshift_logging_issues.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRedshiftLogging": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRedshiftLoggingLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRedshiftLogging": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRedshiftLogging"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRedshiftLogging" } - } - }, - "EventBackupDDB": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaBackupDDB"], - "Properties": { - "Description": "Hammer ScheduledRule for DDB tables backup", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, - "ScheduleExpression": "rate(1 day)", - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, - "Id": "LambdaBackupDDB" - } - ] - } - }, - "EventInitiateEvaluationS3IAM": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", - "LambdaInitiateIAMUserInactiveKeysEvaluation", - "LambdaInitiateS3EncryptionEvaluation", - "LambdaInitiateS3ACLEvaluation", - "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate S3 and IAM evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3IAM"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserKeysRotationEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserKeysRotationEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserInactiveKeysEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3EncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3EncryptionEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3ACLEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3ACLEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3PolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3PolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationCloudTrails": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate CloudTrails evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateCloudTrailsEvaluation", "Arn"] }, - "Id": "LambdaInitiateCloudTrailsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSVolumes": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS volumes evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSVolumesEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSVolumesEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate RDS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSG": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSGEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate Security Groups evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSGEvaluation", "Arn"] }, - "Id": "LambdaInitiateSGEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSQSPublicPolicy": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate SQS queue evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSQSPublicPolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateSQSPublicPolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSEncryption": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate rds instance encryption evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSEncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSEncryptionEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRedshiftLogging": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRedshiftLoggingEvaluaion"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate audit logging issue Redshift cluster evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRedshiftLogging"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENALED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRedshiftLoggingEvaluation", "Arn"] }, - "Id": "LambdaInitiateRedshiftLoggingEvaluation" - } - ] - } - }, - "EventInitiateEvaluationAMIPublicAccess": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate public AMI access evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateAMIPublicAccessEvaluation", "Arn"] }, - "Id": "LambdaInitiateAMIPublicAccessEvaluation" - } - ] - } - }, - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaLogsForwarder"], - "Properties": { - "FunctionName": { "Ref": "LambdaLogsForwarder" }, - "Action": "lambda:InvokeFunction", - "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, - "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} - } - }, - "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], - "Properties": { - "FunctionName": { "Ref": "LambdaBackupDDB" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSGEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSGEvaluation", "EventInitiateEvaluationSG"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSGEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateCloudTrailsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation", "EventInitiateEvaluationCloudTrails"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3ACLEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3ACLEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3ACLEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3PolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3PolicyEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3PolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserKeysRotationEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { - "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] - } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserInactiveKeysEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSVolumesEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation", "EventInitiateEvaluationEBSVolumes"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation", "EventInitiateEvaluationEBSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation", "EventInitiateEvaluationRDSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSQSPublicPolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation", "EventInitiateEvaluationSQSPublicPolicy"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3EncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3EncryptionEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSEncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation", "EventInitiateEvaluationRDSEncryption"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSEncryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRedshiftLoggingEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRedshiftLoggingEvaluation", "EventInitiateEvaluationRedshiftLoggig"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRedshiftLogging", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateAMIPublicAccessEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation", "EventInitiateEvaluationAMIPublicAccess"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } - } - }, - "SNSNotifyLambdaEvaluateSG": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateSG"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateCloudTrails": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateCloudTrails"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3ACL": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3ACL"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Policy": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3Policy"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSVolumes": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSVolumes"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSSnapshots"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSSnapshots", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateSQSPublicPolicy", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Encryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateS3Encryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSEncryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSEncryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRedshiftLogging": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRedshiftLoggig", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRedshiftLogging", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftLogging", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRedshiftLogging", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateAMIPublicAccess": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateAMIPublicAccess", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "PermissionToInvokeLambdaEvaluateSgSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSG" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateCloudTrailsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LambdaEvaluateCloudTrails"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3AclSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": "SNSNotifyLambdaEvaluateS3ACL", - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3PolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LambdaEvaluateS3Policy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserKeysRotationSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserInactiveKeysSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSVolumesSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LambdaEvaluateEBSVolumes"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateSQSPublicPolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3EncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LambdaEvaluateS3Encryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSEncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LambdaEvaluateRDSEncryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] } - } - }, - - "PermissionToInvokeLambdaEvaluateRedshiftLoggingSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftLogging", "LambdaEvaluateRedshiftLogging"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRedshiftLogging" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRedshiftLogging", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateAMIPublicAccessSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } - } - }, - "SNSIdentificationErrors": { - "Type": "AWS::SNS::Topic", - "Properties": { - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] - ]} - } - }, - "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { - "Type" : "AWS::SNS::Subscription", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties" : { - "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "Protocol" : "lambda", - "TopicArn" : { "Ref": "SNSIdentificationErrors" } - } - }, - "PermissionToInvokeLambdaLogsForwarderSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSIdentificationErrors" }, - "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } - } - }, - "AlarmErrorsLambdaBackupDDB": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaBackupDDB" } - } - ], - "Period": 86400, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSGEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSGEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSGEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSG"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSG" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSG" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateCloudTrailsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEvaluateCloudTrails": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateCloudTrails"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateCloudTrails" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateCloudTrails" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3ACLEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3ACLEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3ACLEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3ACL"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3ACL" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3ACL" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3PolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3PolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Policy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Policy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Policy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserKeysRotation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserKeysRotation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserInactiveKeys" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserInactiveKeys" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSVolumesEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSVolumes"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSVolumes" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSVolumes" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3EncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSQSPublicPolicy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSQSPublicPolicy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Encryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Encryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Encryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSEncryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSEncryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSEncryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateAMIPublicAccess" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateAMIPublicAccess" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRedshiftLoggingEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRedshiftLogging"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRedshiftLogging" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRedshiftLogging" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRedshiftLoggingEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRedshiftLoggingEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRedshiftLoggingEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" + "InitiateLambdaHandler": "initiate_to_desc_ecs_external_image_source_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_ecs_external_image_source_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate ECS image source evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSExternalImageSource"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSExternalImageSource", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSExternalImageSource", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } } - }, - "Outputs": { "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} } -} +} \ No newline at end of file diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 65d6c01d..899f3693 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -1,7 +1,8 @@ resource "aws_cloudformation_stack" "identification" { - name = "hammer-identification" + name = "hammer-identification-main" depends_on = [ "aws_s3_bucket_object.identification-cfn", + "aws_s3_bucket_object.identification-nested-cfn", "aws_s3_bucket_object.logs-forwarder", "aws_s3_bucket_object.ddb-tables-backup", "aws_s3_bucket_object.sg-issues-identification", @@ -23,6 +24,7 @@ resource "aws_cloudformation_stack" "identification" { parameters { SourceS3Bucket = "${var.s3bucket}" + NestedStackTemplate = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-nested-cfn.id}" ResourcesPrefix = "${var.resources-prefix}" IdentificationIAMRole = "${var.identificationIAMRole}" IdentificationCheckRateExpression = "${var.identificationCheckRateExpression}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index 19e55097..9d486b3c 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -4,6 +4,12 @@ resource "aws_s3_bucket_object" "identification-cfn" { source = "${path.module}/../../../cf-templates/identification.json" } +resource "aws_s3_bucket_object" "identification-nested-cfn" { + bucket = "${var.s3bucket}" + key = "cfn/${format("identification-nested-%s.json", "${md5(file("${path.module}/../../../cf-templates/identification-nested.json"))}")}" + source = "${path.module}/../../../cf-templates/identification-nested.json" +} + resource "aws_s3_bucket_object" "logs-forwarder" { bucket = "${var.s3bucket}" key = "lambda/${format("logs-forwarder-%s.zip", "${md5(file("${path.module}/../../../packages/logs-forwarder.zip"))}")}" diff --git a/docs/_data/sidebars/mydoc_sidebar.yml b/docs/_data/sidebars/mydoc_sidebar.yml index c9c4bf6c..afdd05d0 100644 --- a/docs/_data/sidebars/mydoc_sidebar.yml +++ b/docs/_data/sidebars/mydoc_sidebar.yml @@ -119,3 +119,6 @@ entries: - title: RDS Unencrypted instances url: /playbook12_rds_unencryption.html output: web, pdf + - title: Redshift Audit Logging + url: /playbook17_redshift_audit_logging.html + output: web, pdf diff --git a/docs/pages/deployment_cloudformation.md b/docs/pages/deployment_cloudformation.md index c7331eb7..9a470b22 100644 --- a/docs/pages/deployment_cloudformation.md +++ b/docs/pages/deployment_cloudformation.md @@ -98,6 +98,7 @@ You will need to set the following parameters: * **SourceIdentificationSQSPublicPolicy**: the relative path to the Lambda package that identifies SQS public queue issues. The default value is **sqs-public-policy-identification.zip**. * **SourceIdentificationS3Encryption**: the relative path to the Lambda package that identifies S3 un-encrypted bucket issues. The default value is **s3-unencrypted-bucket-issues-identification.zip**. * **SourceIdentificationRDSEncryption**: the relative path to the Lambda package that identifies RDS unencrypted instances. The default value is **rds-unencrypted-instance-identification.zip**. +* **SourceIdentificationRedshiftLogging**: the relative path to the Lambda package that identifies audit logging redshift issues. The default value is **redshift-audit-logging-issues-identification.zip**. **VPC config (optional)**: * **LambdaSubnets**: comma-separated list, without spaces, of subnet IDs in your VPC to run identification lambdas in. diff --git a/docs/pages/editconfig.md b/docs/pages/editconfig.md index 23ff0938..38896a04 100644 --- a/docs/pages/editconfig.md +++ b/docs/pages/editconfig.md @@ -386,4 +386,17 @@ Parameters: * **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-rds-unencrypted`. * **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); * **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); -* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; \ No newline at end of file +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; + +### 2.17. Redshift logging issues + +This section describes how to detect whether you have redshift logging enabled or not. Refer to [issue-specific playbook](playbook17_redshift_audit_logging.html) for further details. + +Edit the **redshift_logging** section of the `config.json` file to configure the handling of this issue. + +Parameters: +* **enabled**: enables/disables issue identification. The default value is `true`; +* **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-redshift-logging`. +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +* **remediation**: defines whether Dow Jones Hammer will automatically remediate the detected issue. The default value is `false`; +* **remediation_retention_period**: the amount of days that should pass between the detection of an issue and its automatic remediation by Dow Jones Hammer. The default value is `0`. diff --git a/docs/pages/features.md b/docs/pages/features.md index 3b830f91..8d08417d 100644 --- a/docs/pages/features.md +++ b/docs/pages/features.md @@ -21,5 +21,6 @@ Dow Jones Hammer can identify and report the following issues: |[SQS Policy Public Access](playbook10_sqs_public_policy.html) |Detects publicly accessible SQS policy |Any of SQS queues is worldwide accessible by policy | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html) |Detects not encrypted at reset S3 buckets |Any of S3 bucket is not encrypted at rest | |[RDS Unencrypted instances](playbook12_rds_unencryption.html) |Detects not encrypted at rest RDS instances |Any one of RDS instances is not encrypted at reset | +|[Redshift Logging Issues](playbook17_redshift_audit_logging.html) |Detects Redshift logging issues |Any one of Redshift cluster logging is not enabled | -Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**. \ No newline at end of file +Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues**, **RDS Unencrypted instances** and **Redshift Audit Logging**. \ No newline at end of file diff --git a/docs/pages/playbook17_redshift_audit_logging.md b/docs/pages/playbook17_redshift_audit_logging.md new file mode 100644 index 00000000..d91df3bd --- /dev/null +++ b/docs/pages/playbook17_redshift_audit_logging.md @@ -0,0 +1,178 @@ +--- +title: Redshift audit logging issues +keywords: playbook17 +sidebar: mydoc_sidebar +permalink: playbook17_redshift_audit_logging.html +--- + +# Playbook 17: Redshift audit logging issues + +## Introduction + +This playbook describes how to configure Dow Jones Hammer to detect Redshift audit logging issues. + +## 1. Issue Identification + +Dow Jones Hammer identifies those Redshift clusters logging enabled or not. + +When Dow Jones Hammer detects an issue, it writes the issue to the designated DynamoDB table. + +According to the [Dow Jones Hammer architecture](/index.html), the issue identification functionality uses two Lambda functions. +The table lists the Python modules that implement this functionality: + +|Designation |Path | +|--------------|:--------------------:| +|Initialization|`hammer/identification/lambdas/redshift-audit-logging-issues-identification/initiate_to_desc_redshift_logging_issues.py`| +|Identification|`hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py`| + +## 2. Issue Reporting + +You can configure automatic reporting of cases when Dow Jones Hammer identifies an issue of this type. Dow Jones Hammer supports integration with [JIRA](https://www.atlassian.com/software/jira) and [Slack](https://slack.com/). +These types of reporting are independent from one another and you can turn them on/off in the Dow Jones Hammer configuration. + +Thus, in case you have turned on the reporting functionality for this issue and configured corresponding integrations, Dow Jones Hammer, as [defined in the configuration](#43-the-ticket_ownersjson-file), can: +* raise a JIRA ticket and assign it to a specific person in your organization; +* send the issue notification to the Slack channel or directly to a Slack user. + +Additionally Dow Jones Hammer tries to detect person to report issue to by examining Redshift cluster logging status. In case the cluster logging is not enable **valid JIRA/Slack user**: +* for JIRA: `jira_owner` parameter from [ticket_owners.json](#43-the-ticket_ownersjson-file) **is ignored** and discovered `owner` **is used instead** as a JIRA assignee; +* for Slack: discovered `owner` **is used in addition to** `slack_owner` value from [ticket_owners.json](#43-the-ticket_ownersjson-file). + +This Python module implements the issue reporting functionality: +``` +hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py +``` + + +## 3. Setup Instructions For This Issue + +To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: + +### 3.1. The config.json File + +The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. +To identify and report issues of this type, you should add the following parameters in the **redshift_logging** section of the **config.json** file: + +|Parameter Name |Description | Default Value| +|------------------------------|---------------------------------------|:------------:| +|`enabled` |Toggles issue detection for this issue |`true`| +|`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-redshift-logging` | +|`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| + +Sample **config.json** section: +``` +""" +"redshift_logging": { + "enabled": true, + "ddb.table_name": "hammer-redshift-logging", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 + } +``` + +### 3.2. The whitelist.json File + +You can define exceptions to the general automatic remediation settings for specific Redshift clusters. To configure such exceptions, you should edit the **redshift_logging** section of the **whitelist.json** configuration file as follows: + +|Parameter Key | Parameter Value(s)| +|:------------:|:-----------------:| +|AWS Account ID|Redshift cluster ids(s)| + +Sample **whitelist.json** section: +``` +"redshift_logging": { + "123456789012": ["redshift_id1", "redshift_id2"] +} +``` + +### 3.3. The ticket_owners.json File + +You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. + +You can configure these parameters for specific AWS accounts and globally. Account-specific settings precede the global settings in the **ticket_owners.json** configuration file. + +Check the following table for parameters: + +|Parameter Name |Description |Sample Value | +|---------------------|--------------------------------------------------------------------|:---------------:| +|`jira_project` |The name of the JIRA project where Dow Jones Hammer will create the issue | `AWSSEC` | +|`jira_owner` |The name of the JIRA user to whom Dow Jones Hammer will assign the issue | `Support-Cloud` | +|`jira_parent_ticket` |The JIRA ticket to which Dow Jones Hammer will link the new ticket it creates | `AWSSEC-1234` | +|`slack_owner` |Name(s) of the Slack channels (prefixed by `#`) and/or Slack users that will receive issue reports from Dow Jones Hammer | `["#devops-channel", "bob"]` | + +Sample **ticket_owners.json** section: + +Account-specific settings: +``` +{ + "account": { + "123456789012": { + "jira_project": "", + "jira_owner": "Support-Cloud", + "jira_parent_ticket": "", + "slack_owner": "" + } + }, + "jira_project": "AWSSEC", + "jira_owner": "Support-General", + "jira_parent_ticket": "AWSSEC-1234", + "slack_owner": ["#devops-channel", "bob"] +} +``` + +## 4. Logging + +Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. + +Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. + +### 4.1. Issue Identification Logging + +Dow Jones Hammer issue identification functionality uses two Lambda functions: + +* Initialization: this Lambda function selects slave accounts to check for this issue as designated in the Dow Jones Hammer configuration files and triggers the check. +* Identification: this Lambda function identifies this issue for each account/region selected at the previous step. + +You can see the logs for each of these Lambda functions in the following Log Groups: + +|Lambda Function|CloudWatch Log Group Name | +|---------------|--------------------------------------------| +|Initialization |`/aws/lambda/initiate-redshift-logging`| +|Identification |`/aws/lambda/describe-redshift-logging`| + +### 4.2. Issue Reporting Logging + +Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: + +|Designation|CloudWatch Log Stream Name | +|-----------|---------------------------------------------------------| +|Reporting |`reporting.create_redshift_logging_issue_tickets`| + + +### 4.3. Slack Reports + +In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). + +Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. + +### 4.4. Using CloudWatch Logs for Dow Jones Hammer + +To access Dow Jones Hammer logs, proceed as follows: + +1. Open **AWS Management Console**. +2. Select **CloudWatch** service. +3. Select **Logs** from the CloudWatch sidebar. +4. Select the log group you want to explore. The log group will open. +5. Select the log stream you want to explore. + +Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. + +## 5. Issue specific details in DynamoDB + +Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. + +|Key |Type |Description |Example | +|-------------|:----:|----------------------------------|------------------------------------------------| +|`id` |string|redshift id |`redshift-id` | +|`tags` |map |Tags associated with Redshift id |`{"Name": "TestKey", "service": "archive"}`| \ No newline at end of file diff --git a/docs/pages/remediation_backup_rollback.md b/docs/pages/remediation_backup_rollback.md index d05fe010..1675f2d7 100644 --- a/docs/pages/remediation_backup_rollback.md +++ b/docs/pages/remediation_backup_rollback.md @@ -27,6 +27,7 @@ The following table gives an overview of Dow Jones Hammer remediation functional |[SQS Queue Public Access](playbook10_sqs_public_policy.html#3-issue-remediation) | Yes | Yes | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html#3-issue-remediation) | Yes | Yes | |[RDS Unencrypted instances](playbook12_rds_unencryption.html#3-issue-remediation) | `No` | `No` | +|[Redshift Logging issues](playbook17_redshift_audit_logging.html#3-issue-remediation) | `No` | `No` | ## 2. How Remediation Backup Works diff --git a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py index ea2180c9..bb33f81a 100644 --- a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py +++ b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py @@ -4,7 +4,7 @@ from library.logger import set_logging from library.config import Config from library.aws.redshift import RedshiftLoggingChecker -from library.aws.utility import Account +from library.aws.utility import Account, DDB from library.ddb_issues import IssueStatus, RedshiftLoggingIssue from library.ddb_issues import Operations as IssueOperations from library.aws.utility import Sns @@ -20,7 +20,8 @@ def lambda_handler(event, context): account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() - # region = payload['region'] + # if request_id is present in payload then this lambda was called from the API + request_id = payload.get('request_id', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -65,10 +66,14 @@ def lambda_handler(event, context): # as we already checked it open_issues.pop(cluster.name, None) - logging.debug(f"Redshift Clusters in DDB:\n{open_issues.keys()}") - # all other unresolved issues in DDB are for removed/remediated clusters - for issue in open_issues.values(): - IssueOperations.set_status_resolved(ddb_table, issue) + logging.debug(f"Redshift Clusters in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated clusters + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + # track the progress of API request to scan specific account/region/feature + if request_id: + api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) + DDB.track_progress(api_table, request_id) except Exception: logging.exception(f"Failed to check Redshift clusters for '{account_id} ({account_name})'") return diff --git a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/initiate_to_desc_redshift_logging_issues.py b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/initiate_to_desc_redshift_logging_issues.py index 29358f2d..9efefa04 100644 --- a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/initiate_to_desc_redshift_logging_issues.py +++ b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/initiate_to_desc_redshift_logging_issues.py @@ -12,7 +12,7 @@ def lambda_handler(event, context): logging.debug("Initiating Redshift Cluster logging checking") try: - sns_arn = os.environ["SNS_REDSHIFT_LOGGING_ARN"] + sns_arn = os.environ["SNS_ARN"] config = Config() if not config.redshift_logging.enabled: diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 6d068096..8d8142d7 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -249,7 +249,6 @@ def get_cluster(self, name): return cluster return None - def check(self, clusters=None): """ Walk through clusters in the account/region and check them. diff --git a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py index aaa53e64..78e8b537 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py @@ -51,7 +51,7 @@ def create_tickets_redshift_logging(self): # Adding label with "whitelisted" to jira ticket. jira.add_label( ticket_id=issue.jira_details.ticket, - labels=IssueStatus.Whitelisted + label=IssueStatus.Whitelisted.value ) jira.close_issue( ticket_id=issue.jira_details.ticket, From 7cead4dbf9d8a91f4724d29067321b3b8a62fe65 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 13:18:12 +0530 Subject: [PATCH 064/193] Updated with Redshift encryption issue documentation. Updated with Redshift encryption issue documentation. --- docs/_data/sidebars/mydoc_sidebar.yml | 3 + docs/pages/deployment_cloudformation.md | 5 +- docs/pages/editconfig.md | 14 +- docs/pages/features.md | 3 +- .../pages/playbook15_redshift_unencryption.md | 177 ++++++++++++++++++ docs/pages/remediation_backup_rollback.md | 1 + 6 files changed, 199 insertions(+), 4 deletions(-) create mode 100644 docs/pages/playbook15_redshift_unencryption.md diff --git a/docs/_data/sidebars/mydoc_sidebar.yml b/docs/_data/sidebars/mydoc_sidebar.yml index c9c4bf6c..93bb2936 100644 --- a/docs/_data/sidebars/mydoc_sidebar.yml +++ b/docs/_data/sidebars/mydoc_sidebar.yml @@ -119,3 +119,6 @@ entries: - title: RDS Unencrypted instances url: /playbook12_rds_unencryption.html output: web, pdf + - title: Redshift Unencrypted clusters + url: /playbook15_redshift_unencryption.html + output: web, pdf diff --git a/docs/pages/deployment_cloudformation.md b/docs/pages/deployment_cloudformation.md index 89e15e3d..7a638db1 100644 --- a/docs/pages/deployment_cloudformation.md +++ b/docs/pages/deployment_cloudformation.md @@ -5,14 +5,14 @@ sidebar: mydoc_sidebar permalink: deployment_cloudformation.html --- -You should perform the steps mentioned below to deploy Dow Jones Hammer using CloudFormation: +You should perform the following steps to deploy Dow Jones Hammer using CloudFormation: 1. Accomplish the preliminary steps 2. Put the Dow Jones Hammer packages into the Dow Jones Hammer deployment bucket 3. Deploy CloudFormation stacks to the master AWS account 4. Deploy CloudFormation stacks to the slave AWS accounts -## 1. Preliminary steps +## 1. Preliminary Steps Check [this section](configuredeploy_overview.html#2-preliminary-steps) to make sure you have performed all necessary steps before proceeding further. @@ -98,6 +98,7 @@ You will need to set the following parameters: * **SourceIdentificationSQSPublicPolicy**: the relative path to the Lambda package that identifies SQS public queue issues. The default value is **sqs-public-policy-identification.zip**. * **SourceIdentificationS3Encryption**: the relative path to the Lambda package that identifies S3 un-encrypted bucket issues. The default value is **s3-unencrypted-bucket-issues-identification.zip**. * **SourceIdentificationRDSEncryption**: the relative path to the Lambda package that identifies RDS unencrypted instances. The default value is **rds-unencrypted-instance-identification.zip**. +* **SourceIdentificationRedshiftClusterEncryption**: the relative path to the Lambda package that identifies unencrypted redshift cluster issues. The default value is **redshift-unencrypted-cluster-identification.zip**. **VPC config (optional)**: * **LambdaSubnets**: comma-separated list, without spaces, of subnet IDs in your VPC to run identification lambdas in. diff --git a/docs/pages/editconfig.md b/docs/pages/editconfig.md index 23ff0938..7d083078 100644 --- a/docs/pages/editconfig.md +++ b/docs/pages/editconfig.md @@ -386,4 +386,16 @@ Parameters: * **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-rds-unencrypted`. * **accounts**: *optional* comma-separated list of accounts to check and report for issue in square brackets. Use this key to override accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); * **ignore_accounts**: *optional* comma-separated list of accounts to ignore during check. Use this key to exclude accounts from **aws.accounts** in [config.json](#11-master-aws-account-settings); -* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; \ No newline at end of file +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +### 2.15. Redshift unencrypted cluster issues + +This section describes how to detect whether you have unencrypted redshift cluster issues or not. Refer to [issue-specific playbook](playbook15_redshift_unencryption.html) for further details. + +Edit the **redshift_encryption** section of the `config.json` file to configure the handling of this issue. + +Parameters: +* **enabled**: enables/disables issue identification. The default value is `true`; +* **ddb.table_name**: the name of the DynamoDB table where Dow Jones Hammer will put detection results. The default value is `hammer-redshift-unencrypted`. +* **reporting**: defines whether Dow Jones Hammer will report detected issues to JIRA/Slack. The default value is `false`; +* **remediation**: defines whether Dow Jones Hammer will automatically remediate the detected issue. The default value is `false`; +* **remediation_retention_period**: the amount of days that should pass between the detection of an issue and its automatic remediation by Dow Jones Hammer. The default value is `0`. diff --git a/docs/pages/features.md b/docs/pages/features.md index 3b830f91..beda1d6c 100644 --- a/docs/pages/features.md +++ b/docs/pages/features.md @@ -21,5 +21,6 @@ Dow Jones Hammer can identify and report the following issues: |[SQS Policy Public Access](playbook10_sqs_public_policy.html) |Detects publicly accessible SQS policy |Any of SQS queues is worldwide accessible by policy | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html) |Detects not encrypted at reset S3 buckets |Any of S3 bucket is not encrypted at rest | |[RDS Unencrypted instances](playbook12_rds_unencryption.html) |Detects not encrypted at rest RDS instances |Any one of RDS instances is not encrypted at reset | +|[Redshift Unencrypted Clusters](playbook15_redshift_unencryption.html) |Detects Redshift unencrypted cluster issues |Any one of Redshift cluster is not encrypted at rest | -Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**. \ No newline at end of file +Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**, **Redshift Unencrypted Clusters**. \ No newline at end of file diff --git a/docs/pages/playbook15_redshift_unencryption.md b/docs/pages/playbook15_redshift_unencryption.md new file mode 100644 index 00000000..b29368ad --- /dev/null +++ b/docs/pages/playbook15_redshift_unencryption.md @@ -0,0 +1,177 @@ +--- +title: Redshift unencrypted cluster issues +keywords: playbook15 +sidebar: mydoc_sidebar +permalink: playbook15_redshift_unencryption.html +--- + +# Playbook 15: Redshift unencrypted cluster issues + +## Introduction + +This playbook describes how to configure Dow Jones Hammer to detect Redshift unencrypted cluster issues. + +## 1. Issue Identification + +Dow Jones Hammer identifies those Redshift clusters 'Encrypted' parameters. + +When Dow Jones Hammer detects an issue, it writes the issue to the designated DynamoDB table. + +According to the [Dow Jones Hammer architecture](/index.html), the issue identification functionality uses two Lambda functions. +The table lists the Python modules that implement this functionality: + +|Designation |Path | +|--------------|:--------------------:| +|Initialization|`hammer/identification/lambdas/redshift-unencrypted-cluster-identification/initiate_to_desc_redshift_encryption.py`| +|Identification|`hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py`| + +## 2. Issue Reporting + +You can configure automatic reporting of cases when Dow Jones Hammer identifies an issue of this type. Dow Jones Hammer supports integration with [JIRA](https://www.atlassian.com/software/jira) and [Slack](https://slack.com/). +These types of reporting are independent from one another and you can turn them on/off in the Dow Jones Hammer configuration. + +Thus, in case you have turned on the reporting functionality for this issue and configured corresponding integrations, Dow Jones Hammer, as [defined in the configuration](#43-the-ticket_ownersjson-file), can: +* raise a JIRA ticket and assign it to a specific person in your organization; +* send the issue notification to the Slack channel or directly to a Slack user. + +Additionally Dow Jones Hammer tries to detect person to report issue to by examining Redshift cluster encryption status. In case the cluster is unencrypted **valid JIRA/Slack user**: +* for JIRA: `jira_owner` parameter from [ticket_owners.json](#43-the-ticket_ownersjson-file) **is ignored** and discovered `owner` **is used instead** as a JIRA assignee; +* for Slack: discovered `owner` **is used in addition to** `slack_owner` value from [ticket_owners.json](#43-the-ticket_ownersjson-file). + +This Python module implements the issue reporting functionality: +``` +hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +``` + + +## 3. Setup Instructions For This Issue + +To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: + +### 3.1. The config.json File + +The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. +To identify and report issues of this type, you should add the following parameters in the **redshift_encryption** section of the **config.json** file: + +|Parameter Name |Description | Default Value| +|------------------------------|---------------------------------------|:------------:| +|`enabled` |Toggles issue detection for this issue |`true`| +|`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-redshift-unencrypted` | +|`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| + +Sample **config.json** section: +``` +"redshift_encryption": { + "enabled": true, + "ddb.table_name": "hammer-redshift-unencrypted", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 + } +``` + +### 3.2. The whitelist.json File + +You can define exceptions to the general automatic remediation settings for specific Redshift clusters. To configure such exceptions, you should edit the **redshift_encryption** section of the **whitelist.json** configuration file as follows: + +|Parameter Key | Parameter Value(s)| +|:------------:|:-----------------:| +|AWS Account ID|Redshift cluster ids(s)| + +Sample **whitelist.json** section: +``` +"redshift_encryption": { + "123456789012": ["redshift_id1", "redshift_id2"] +} +``` + +### 3.3. The ticket_owners.json File + +You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. + +You can configure these parameters for specific AWS accounts and globally. Account-specific settings precede the global settings in the **ticket_owners.json** configuration file. + +Check the following table for parameters: + +|Parameter Name |Description |Sample Value | +|---------------------|--------------------------------------------------------------------|:---------------:| +|`jira_project` |The name of the JIRA project where Dow Jones Hammer will create the issue | `AWSSEC` | +|`jira_owner` |The name of the JIRA user to whom Dow Jones Hammer will assign the issue | `Support-Cloud` | +|`jira_parent_ticket` |The JIRA ticket to which Dow Jones Hammer will link the new ticket it creates | `AWSSEC-1234` | +|`slack_owner` |Name(s) of the Slack channels (prefixed by `#`) and/or Slack users that will receive issue reports from Dow Jones Hammer | `["#devops-channel", "bob"]` | + +Sample **ticket_owners.json** section: + +Account-specific settings: +``` +{ + "account": { + "123456789012": { + "jira_project": "", + "jira_owner": "Support-Cloud", + "jira_parent_ticket": "", + "slack_owner": "" + } + }, + "jira_project": "AWSSEC", + "jira_owner": "Support-General", + "jira_parent_ticket": "AWSSEC-1234", + "slack_owner": ["#devops-channel", "bob"] +} +``` + +## 4. Logging + +Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. + +Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. + +### 4.1. Issue Identification Logging + +Dow Jones Hammer issue identification functionality uses two Lambda functions: + +* Initialization: this Lambda function selects slave accounts to check for this issue as designated in the Dow Jones Hammer configuration files and triggers the check. +* Identification: this Lambda function identifies this issue for each account/region selected at the previous step. + +You can see the logs for each of these Lambda functions in the following Log Groups: + +|Lambda Function|CloudWatch Log Group Name | +|---------------|--------------------------------------------| +|Initialization |`/aws/lambda/initiate-redshift-encryption`| +|Identification |`/aws/lambda/describe-redshift-encryption`| + +### 4.2. Issue Reporting Logging + +Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: + +|Designation|CloudWatch Log Stream Name | +|-----------|---------------------------------------------------------| +|Reporting |`reporting.create_redshift_unencrypted_cluster_issue_tickets`| + + +### 4.3. Slack Reports + +In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). + +Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. + +### 4.4. Using CloudWatch Logs for Dow Jones Hammer + +To access Dow Jones Hammer logs, proceed as follows: + +1. Open **AWS Management Console**. +2. Select **CloudWatch** service. +3. Select **Logs** from the CloudWatch sidebar. +4. Select the log group you want to explore. The log group will open. +5. Select the log stream you want to explore. + +Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. + +## 5. Issue specific details in DynamoDB + +Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. + +|Key |Type |Description |Example | +|-------------|:----:|----------------------------------|------------------------------------------------| +|`id` |string|redshift id |`redshift-id` | +|`tags` |map |Tags associated with Redshift id |`{"Name": "TestKey", "service": "archive"}`| \ No newline at end of file diff --git a/docs/pages/remediation_backup_rollback.md b/docs/pages/remediation_backup_rollback.md index d05fe010..9f404b93 100644 --- a/docs/pages/remediation_backup_rollback.md +++ b/docs/pages/remediation_backup_rollback.md @@ -27,6 +27,7 @@ The following table gives an overview of Dow Jones Hammer remediation functional |[SQS Queue Public Access](playbook10_sqs_public_policy.html#3-issue-remediation) | Yes | Yes | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html#3-issue-remediation) | Yes | Yes | |[RDS Unencrypted instances](playbook12_rds_unencryption.html#3-issue-remediation) | `No` | `No` | +|[Redshift Unencryption issues](playbook15__unencryption.html#3-issue-remediation) | `No` | `No` | ## 2. How Remediation Backup Works From dc5bbbbf248be8ee6fd2185eb5de77199cdd90d4 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 13:23:00 +0530 Subject: [PATCH 065/193] Updated with nested template changes. Updated with nested template changes. --- deployment/cf-templates/identification.json | 24 ++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 9e8f5a3e..f887e66c 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -1122,7 +1122,7 @@ } } }, - "StackEvaluateECSExternalImageSource": { + "StackEvaluateRedshiftLogging": { "Type": "AWS::CloudFormation::Stack", "Properties": { "TemplateURL": {"Ref": "NestedStackTemplate"}, @@ -1137,26 +1137,26 @@ "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, - "IdentificationLambdaSource": { "Ref": "SourceIdentificationECSExternalImageSource" }, - "InitiateLambdaDescription": "Lambda function for initiate to identify ECS image source is internal or external.", - "EvaluateLambdaDescription": "Lambda function to describe ECS image source is internal or external.", + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRedshiftLogging" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify Redshift logging issues.", + "EvaluateLambdaDescription": "Lambda function to describe Redshift logging issues.", "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateECSExternalImageSourceLambdaFunctionName", "value"] } ] + { "Fn::FindInMap": ["NamingStandards", "InitiateRedshiftLoggingLambdaFunctionName", "value"] } ] ]}, "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyECSExternalImageSourceLambdaFunctionName", "value"] } ] + { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftLoggingLambdaFunctionName", "value"] } ] ]}, - "InitiateLambdaHandler": "initiate_to_desc_ecs_external_image_source_issues.lambda_handler", - "EvaluateLambdaHandler": "describe_ecs_external_image_source_issues.lambda_handler", + "InitiateLambdaHandler": "initiate_to_desc_redshift_logging_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_redshift_logging_issues.lambda_handler", "EvaluateLambdaMemorySize": 256, "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, - "EventRuleDescription": "Hammer ScheduledRule to initiate ECS image source evaluations", - "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationECSExternalImageSource"] ] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Redshift logging issues evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRedshiftLogging"] ] }, "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameECSExternalImageSource", "value"] } ] + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRedshiftLogging", "value"] } ] ]}, "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSExternalImageSource", "value"] } ] + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftLogging", "value"] } ] ]}, "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } From 27c00f178607c1da260b5e004ce8b6ac81575279 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 14:08:10 +0530 Subject: [PATCH 066/193] Updated with Redshift public access issue deployment changes. Updated with Redshift public access issue deployment changes. --- deployment/cf-templates/ddb.json | 47 +- .../cf-templates/identification-nested.json | 267 ++ deployment/cf-templates/identification.json | 3403 +++-------------- .../modules/identification/identification.tf | 4 +- .../modules/identification/sources.tf | 6 + docs/pages/remediation_backup_rollback.md | 2 +- ...describe_redshift_cluster_public_access.py | 17 +- ..._to_desc_redshift_cluster_public_access.py | 2 +- hammer/library/ddb_issues.py | 9 +- ...te_redshift_public_access_issue_tickets.py | 2 +- 10 files changed, 837 insertions(+), 2922 deletions(-) create mode 100644 deployment/cf-templates/identification-nested.json diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index cf91f971..decaa797 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -24,7 +24,7 @@ } ], "ProvisionedThroughput": { - "ReadCapacityUnits": "10", + "ReadCapacityUnits": "25", "WriteCapacityUnits": "2" }, "SSESpecification": { @@ -426,7 +426,7 @@ "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "rds-unencrypted" ] ]} } }, - "DynamoDBRedshiftClusterPublicAccess": { + "DynamoDBAMIPublicAccess": { "Type": "AWS::DynamoDB::Table", "DeletionPolicy": "Retain", "DependsOn": ["DynamoDBCredentials"], @@ -455,63 +455,62 @@ "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, - "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-public-access" ] ]} - } + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ec2-public-ami" ] ]} + } }, - - "DynamoDBAMIPublicAccess": { + "DynamoDBApiRequests": { "Type": "AWS::DynamoDB::Table", - "DeletionPolicy": "Retain", - "DependsOn": ["DynamoDBCredentials"], + "DependsOn": ["DynamoDBCredentials", "DynamoDBSQSPublicPolicy"], "Properties": { "AttributeDefinitions": [ { - "AttributeName": "account_id", - "AttributeType": "S" - }, - { - "AttributeName": "issue_id", + "AttributeName": "request_id", "AttributeType": "S" } ], "KeySchema": [ { - "AttributeName": "account_id", + "AttributeName": "request_id", "KeyType": "HASH" - }, - { - "AttributeName": "issue_id", - "KeyType": "RANGE" } ], "ProvisionedThroughput": { "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, - "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "ec2-public-ami" ] ]} + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} } }, - "DynamoDBApiRequests": { + "DynamoDBRedshiftClusterPublicAccess": { "Type": "AWS::DynamoDB::Table", - "DependsOn": ["DynamoDBCredentials", "DynamoDBSQSPublicPolicy"], + "DeletionPolicy": "Retain", + "DependsOn": ["DynamoDBCredentials"], "Properties": { "AttributeDefinitions": [ { - "AttributeName": "request_id", + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", "AttributeType": "S" } ], "KeySchema": [ { - "AttributeName": "request_id", + "AttributeName": "account_id", "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" } ], "ProvisionedThroughput": { "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, - "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "api-requests" ] ]} + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-public-access" ] ]} } } } diff --git a/deployment/cf-templates/identification-nested.json b/deployment/cf-templates/identification-nested.json new file mode 100644 index 00000000..53d2fd81 --- /dev/null +++ b/deployment/cf-templates/identification-nested.json @@ -0,0 +1,267 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Hammer identification child stack", + "Parameters": { + "SourceS3Bucket": { + "Type": "String", + "Default": "" + }, + "IdentificationIAMRole": { + "Type": "String", + "Default": "cloudsec-master-id" + }, + "IdentificationCheckRateExpression": { + "Type": "String" + }, + "LambdaSubnets": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas in default system-managed VPC (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "LambdaSecurityGroups": { + "Type" : "String", + "Description" : "Comma-separated list, without spaces. Leave empty to run lambdas with default access rules (recommended). All specified security groups and subnets must be in the same VPC.", + "Default": "" + }, + "IdentificationLambdaSource": { + "Type": "String", + "Default": "sg-issues-identification.zip" + }, + "InitiateLambdaDescription": { + "Type": "String", + "Default": "Lambda that triggers the process of issues identification" + }, + "EvaluateLambdaDescription": { + "Type": "String", + "Default": "Lambda that performs issues identification" + }, + "InitiateLambdaName": { + "Type": "String" + }, + "EvaluateLambdaName": { + "Type": "String" + }, + "InitiateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaHandler": { + "Type": "String" + }, + "EvaluateLambdaMemorySize": { + "Type": "String", + "Default": "256" + }, + "LambdaLogsForwarderArn": { + "Type": "String" + }, + "EventRuleDescription": { + "Type": "String", + "Default": "Triggers initiate lambda" + }, + "EventRuleName": { + "Type": "String" + }, + "SNSDisplayName": { + "Type": "String" + }, + "SNSTopicName": { + "Type": "String" + }, + "SNSIdentificationErrors": { + "Type": "String" + } + }, + "Conditions": { + "LambdaSubnetsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSubnets"}, "" ] + }, + "LambdaSecurityGroupsEmpty": { + "Fn::Equals": [ {"Ref": "LambdaSecurityGroups"}, "" ] + } + }, + "Resources": { + "LambdaInitiateEvaluation": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LogGroupLambdaInitiateEvaluation"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "Environment": { + "Variables": { + "SNS_ARN": { "Ref": "SNSNotifyLambdaEvaluate" } + } + }, + "Description": { "Ref": "InitiateLambdaDescription" }, + "FunctionName": { "Ref": "InitiateLambdaName" }, + "Handler": {"Ref": "InitiateLambdaHandler"}, + "MemorySize": 128, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "InitiateLambdaName" } ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaInitiateEvaluation": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaInitiateEvaluation"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEvaluation" } + } + }, + "LambdaEvaluate": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "IdentificationLambdaSource" } + }, + "VpcConfig": { + "SecurityGroupIds": { + "Fn::If": [ + "LambdaSecurityGroupsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } + ] + }, + "SubnetIds": { + "Fn::If": [ + "LambdaSubnetsEmpty", + [], + { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } + ] + } + }, + "Description": {"Ref": "EvaluateLambdaDescription"}, + "FunctionName": { "Ref": "EvaluateLambdaName" }, + "Handler": {"Ref": "EvaluateLambdaHandler"}, + "MemorySize": {"Ref": "EvaluateLambdaMemorySize"}, + "Timeout": "300", + "Role": { "Ref": "IdentificationIAMRole" }, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaEvaluate": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", { "Ref": "EvaluateLambdaName"} ] ] }, + "RetentionInDays": "7" + } + }, + "SubscriptionFilterLambdaLambdaEvaluate": { + "Type" : "AWS::Logs::SubscriptionFilter", + "DependsOn": ["LogGroupLambdaEvaluate"], + "Properties" : { + "DestinationArn" : { "Ref" : "LambdaLogsForwarderArn" }, + "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", + "LogGroupName" : { "Ref": "LogGroupLambdaEvaluate" } + } + }, + "EventInitiateEvaluation": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "Description": {"Ref": "EventRuleDescription"}, + "Name": {"Ref": "EventRuleName"}, + "ScheduleExpression": { "Ref": "IdentificationCheckRateExpression" }, + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaInitiateEvaluation", "Arn"] }, + "Id": {"Ref": "LambdaInitiateEvaluation"} + } + ] + } + }, + "PermissionToInvokeLambdaInitiateEvaluationCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaInitiateEvaluation", "EventInitiateEvaluation"], + "Properties": { + "FunctionName": { "Ref": "LambdaInitiateEvaluation" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluation", "Arn"] } + } + }, + "SNSNotifyLambdaEvaluate": { + "Type": "AWS::SNS::Topic", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "DisplayName": { "Ref": "SNSDisplayName" }, + "TopicName": { "Ref": "SNSTopicName" }, + "Subscription": [{ + "Endpoint": { + "Fn::GetAtt": ["LambdaEvaluate", "Arn"] + }, + "Protocol": "lambda" + }] + } + }, + "PermissionToInvokeLambdaEvaluateSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSNotifyLambdaEvaluate", "LambdaEvaluate"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSNotifyLambdaEvaluate" }, + "FunctionName": { "Fn::GetAtt": ["LambdaEvaluate", "Arn"] } + } + }, + "AlarmErrorsLambdaInitiateEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaInitiateEvaluation"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEvaluation" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaInitiateEvaluation" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "AlarmErrorsLambdaEvaluation": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["LambdaEvaluate"], + "Properties": { + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluate" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaEvaluate" } + } + ], + "Period": 3600, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + } + } +} diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 04e2139e..e41b9df2 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -27,8 +27,8 @@ "SourceIdentificationEBSVolumes", "SourceIdentificationEBSSnapshots", "SourceIdentificationRDSSnapshots", - "SourceIdentificationRedshiftPublicAccess", - "SourceIdentificationAMIPublicAccess" + "SourceIdentificationAMIPublicAccess", + "SourceIdentificationRedshiftPublicAccess" ] }, { @@ -91,11 +91,11 @@ "SourceIdentificationRDSSnapshots": { "default": "Relative path to public RDS snapshots lambda sources" }, - "SourceIdentificationRedshiftPublicAccess":{ - "default": "Relative path to publicly accessible Redshift Cluster sources" - }, "SourceIdentificationAMIPublicAccess":{ "default": "Relative path to Public AMI sources" + }, + "SourceIdentificationRedshiftPublicAccess":{ + "default": "Relative path to publicly accessible Redshift Cluster sources" } } } @@ -110,6 +110,10 @@ "Type": "String", "Default": "" }, + "NestedStackTemplate": { + "Type": "String", + "Default": "" + }, "IdentificationIAMRole": { "Type": "String", "Default": "cloudsec-master-id" @@ -465,7 +469,6 @@ "RetentionInDays": "7" } }, - "LambdaBackupDDB": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaBackupDDB"], @@ -513,3022 +516,654 @@ "LogGroupName" : { "Ref": "LogGroupLambdaBackupDDB" } } }, - - "LambdaInitiateSGEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LogGroupLambdaInitiateSGEvaluation"], + "EventBackupDDB": { + "Type": "AWS::Events::Rule", + "DependsOn": ["LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "Environment": { - "Variables": { - "SNS_SG_ARN": { "Ref": "SNSNotifyLambdaEvaluateSG" } - } - }, - "Description": "Lambda function for initiate to identify bad security groups", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_sec_grps.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "Description": "Hammer ScheduledRule for DDB tables backup", + "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, + "ScheduleExpression": "rate(1 day)", + "State": "ENABLED", + "Targets": [ + { + "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, + "Id": "LambdaBackupDDB" + } + ] } }, - "LogGroupLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaLogsForwarder"], + "Properties": { + "FunctionName": { "Ref": "LambdaLogsForwarder" }, + "Action": "lambda:InvokeFunction", + "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, + "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} } }, - "SubscriptionFilterLambdaInitiateSGEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSGEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSGEvaluation" } + "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], + "Properties": { + "FunctionName": { "Ref": "LambdaBackupDDB" }, + "Action": "lambda:InvokeFunction", + "Principal": "events.amazonaws.com", + "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } } }, - - "LambdaEvaluateSG": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSG"], + "SNSIdentificationErrors": { + "Type": "AWS::SNS::Topic", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSG" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe security groups unrestricted access.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_sec_grps_unrestricted_access.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" + "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] + ]} } }, - "LogGroupLambdaEvaluateSG": { - "Type" : "AWS::Logs::LogGroup", + "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { + "Type" : "AWS::SNS::Subscription", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySecurityGroupLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" + "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, + "Protocol" : "lambda", + "TopicArn" : { "Ref": "SNSIdentificationErrors" } } }, - "SubscriptionFilterLambdaLambdaEvaluateSG": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSG"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSG" } + "PermissionToInvokeLambdaLogsForwarderSNS": { + "Type": "AWS::Lambda::Permission", + "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], + "Properties": { + "Action": "lambda:InvokeFunction", + "Principal": "sns.amazonaws.com", + "SourceArn": { "Ref": "SNSIdentificationErrors" }, + "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } } }, - - "LambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LogGroupLambdaInitiateCloudTrailsEvaluation"], + "AlarmErrorsLambdaBackupDDB": { + "Type": "AWS::CloudWatch::Alarm", + "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "Environment": { - "Variables": { - "SNS_CLOUDTRAILS_ARN": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" } + "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], + "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], + "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, + "EvaluationPeriods": 1, + "Namespace": "AWS/Lambda", + "MetricName": "Errors", + "Dimensions": [ + { + "Name": "FunctionName", + "Value": { "Ref": "LambdaBackupDDB" } } - }, - "Description": "Lambda function for initiate identification of CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_cloudtrails.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + ], + "Period": 86400, + "Statistic": "Maximum", + "ComparisonOperator" : "GreaterThanThreshold", + "Threshold": 0, + "TreatMissingData": "notBreaching" + } + }, + "StackEvaluateSG": { + "Type": "AWS::CloudFormation::Stack", + "Properties": { + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateCloudTrailsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateCloudTrailsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateCloudTrailsEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": {"Ref": "SourceIdentificationSG"}, + "InitiateLambdaDescription": "Lambda function for initiate to identify bad security groups", + "EvaluateLambdaDescription": "Lambda function to describe security groups unrestricted access.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sec_grps.lambda_handler", + "EvaluateLambdaHandler": "describe_sec_grps_unrestricted_access.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Security Groups evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateCloudTrails": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateCloudTrails"], + "StackEvaluateCloudTrails": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationCloudTrails" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe CloudTrail issues", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_cloudtrails.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyCloudTrailsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateCloudTrails": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateCloudTrails"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateCloudTrails" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationCloudTrails" }, + "InitiateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "EvaluateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_cloudtrails.lambda_handler", + "EvaluateLambdaHandler": "describe_cloudtrails.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate CloudTrails evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateS3ACLEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3ACL", "LogGroupLambdaInitiateS3ACLEvaluation"], + "StackEvaluateS3ACL": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "Environment": { - "Variables": { - "SNS_S3_ACL_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3ACLEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3ACLEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3ACLEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3ACL" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_acl.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_acl.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 ACL evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3ACL"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateS3ACL": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3ACL"], + "StackEvaluateS3Policy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3ACL" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_acl.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3ACLLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3ACL": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3ACL"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3ACL" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Policy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", + "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_bucket_policy.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 Policy evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Policy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LogGroupLambdaInitiateS3PolicyEvaluation"], + "StackEvaluateIAMUserKeysRotation": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "Environment": { - "Variables": { - "SNS_S3_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" } - } - }, - "Description": "Lambda function for initiate to identify public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3PolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3PolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3PolicyEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserKeysRotation" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which to be rotate.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys to be rotated.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_key_rotation.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserKeysRotation evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserKeysRotation"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateS3Policy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Policy"], + "StackEvaluateIAMUserInactiveKeys": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Policy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public s3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_bucket_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3PolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Policy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Policy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Policy" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserInactiveKeys" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which last used.", + "EvaluateLambdaDescription": "Lambda function to describe IAM user keys last used.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_iam_access_keys.lambda_handler", + "EvaluateLambdaHandler": "describe_iam_accesskey_details.lambda_handler", + "EvaluateLambdaMemorySize": 128, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate IAMUserInactiveKeys evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationIAMUserInactiveKeys"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], + "StackEvaluateEBSVolumes": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_KEYS_ROTATION_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which to be rotate.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserKeysRotationEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSVolumes" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted EBS volumes.", + "EvaluateLambdaDescription": "Lambda function to describe unencrypted ebs volumes.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_unencrypted_volumes.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS volumes evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserKeysRotation"], + "StackEvaluateEBSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserKeysRotation" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys to be rotated.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_key_rotation.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserKeysRotationLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateIAMUserKeysRotation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserKeysRotation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserKeysRotation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public EBS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public ebs snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_ebs_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 512, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate EBS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], + "StackEvaluateRDSSnapshots": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "Environment": { - "Variables": { - "SNS_IAM_USER_INACTIVE_KEYS_ARN": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" } - } - }, - "Description": "Lambda function for initiate to identify IAM user keys which last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_iam_access_keys.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateIAMUserInactiveKeysEvaluation" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSSnapshots" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public RDS snapshots.", + "EvaluateLambdaDescription": "Lambda function to describe public RDS snapshots.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_rds_public_snapshots.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_public_snapshots.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate RDS snapshots evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateIAMUserInactiveKeys"], + "StackEvaluateSQSPublicPolicy": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationIAMUserInactiveKeys" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe IAM user keys last used.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_iam_accesskey_details.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyIAMUserInactiveKeysLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateIAMUserInactiveKeys": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateIAMUserInactiveKeys"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateIAMUserInactiveKeys" } + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationSQSPublicPolicy" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public SQS queues.", + "EvaluateLambdaDescription": "Lambda function to describe public SQS queues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_sqs_public_policy.lambda_handler", + "EvaluateLambdaHandler": "describe_sqs_public_policy.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate SQS queue evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LogGroupLambdaInitiateEBSVolumesEvaluation"], + "StackEvaluateS3Encryption": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "Environment": { - "Variables": { - "SNS_EBS_VOLUMES_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted EBS volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Encryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify S3 unencrypted buckets.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted S3 buckets.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] + ]}, + "InitiateLambdaHandler": "initiate_to_desc_s3_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_s3_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate S3 encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3Encryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - "LogGroupLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSVolumesEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSVolumesEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSVolumesEvaluation" } - } - }, - - "LambdaEvaluateEBSVolumes": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSVolumes"], + "StackEvaluateRDSEncryption": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSVolumes" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe unencrypted ebs volumes.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_ebs_unencrypted_volumes.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSVolumesLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSVolumes": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSVolumes"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSVolumes" } - } - }, - - "LambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_EBS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public EBS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSEncryption" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted RDS instances.", + "EvaluateLambdaDescription": "Lambda function to describe un-encrypted RDS instances.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateEBSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateEBSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateEBSSnapshotsEvaluation" } - } - }, - - "LambdaEvaluateEBSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateEBSSnapshots"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationEBSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public ebs snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_ebs_public_snapshots.lambda_handler", - "MemorySize": 512, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyEBSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateEBSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateEBSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateEBSSnapshots" } + "InitiateLambdaHandler": "initiate_to_desc_rds_instance_encryption.lambda_handler", + "EvaluateLambdaHandler": "describe_rds_instance_encryption.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate rds instance encryption evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], + "StackEvaluateAmiPublicAccess": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "Environment": { - "Variables": { - "SNS_RDS_SNAPSHOTS_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" } - } - }, - "Description": "Lambda function for initiate to identify public RDS snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_rds_public_snapshots.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSSnapshotsEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSSnapshotsEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSSnapshotsEvaluation" } - } - }, - - "LambdaEvaluateRDSSnapshots": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSSnapshots"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSSnapshots" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public rds snapshots.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "45 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationAMIPublicAccess" }, + "InitiateLambdaDescription": "Lambda function for initiate to identify public AMI access issues.", + "EvaluateLambdaDescription": "Lambda function to describe public AMI issues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_rds_public_snapshots.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSSnapshotsLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSSnapshots": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSSnapshots"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSSnapshots" } - } - }, - "LambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "Environment": { - "Variables": { - "SNS_SQS_POLICY_ARN": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" } - } - }, - "Description": "Lambda function for initiate to identify public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_sqs_public_policy.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateSQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateSQSPublicPolicyEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateSQSPublicPolicyEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateSQSPublicPolicyEvaluation" } + "InitiateLambdaHandler": "initiate_to_desc_public_ami_issues.lambda_handler", + "EvaluateLambdaHandler": "describe_public_ami_issues.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate public AMI access evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + } } }, - - "LambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateSQSPublicPolicy"], + "StackEvaluateRedshiftPublicAccess": { + "Type": "AWS::CloudFormation::Stack", "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationSQSPublicPolicy" } - }, - "VpcConfig": { - "SecurityGroupIds": { - "Fn::If": [ - "LambdaSecurityGroupsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSecurityGroups" }] } - ] - }, - "SubnetIds": { - "Fn::If": [ - "LambdaSubnetsEmpty", - [], - { "Fn::Split" : [",", { "Ref": "LambdaSubnets" }] } - ] - } - }, - "Description": "Lambda function to describe public SQS queues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_sqs_public_policy.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + "TemplateURL": {"Ref": "NestedStackTemplate"}, + "Parameters": { + "SourceS3Bucket": { "Ref": "SourceS3Bucket" }, + "IdentificationIAMRole": {"Fn::Join" : ["", [ "arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/", { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifySQSPublicPolicyLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateSQSPublicPolicy": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateSQSPublicPolicy"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateSQSPublicPolicy" } - } - }, - - "LambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Environment": { - "Variables": { - "SNS_S3_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" } - } - }, - "Description": "Lambda function for initiate to identify S3 unencrypted buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_s3_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateS3EncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateS3EncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateS3EncryptionEvaluation" } - } - }, - "LambdaEvaluateS3Encryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateS3Encryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationS3Encryption" } - }, - "Description": "Lambda function to describe un-encrypted S3 buckets.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_s3_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyS3EncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateS3Encryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateS3Encryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateS3Encryption" } - } - }, - - "LambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Environment": { - "Variables": { - "SNS_RDS_ENCRYPT_ARN": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" } - } - }, - "Description": "Lambda function for initiate to identify unencrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_rds_instance_encryption.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRDSEncryptionEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRDSEncryptionEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRDSEncryptionEvaluation" } - } - }, - "LambdaEvaluateRDSEncryption": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRDSEncryption"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRDSEncryption" } - }, - "Description": "Lambda function to describe un-encrypted RDS instances.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_rds_instance_encryption.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRDSEncryptionLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRDSEncryption": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRDSEncryption"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRDSEncryption" } - } - }, - "LambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Environment": { - "Variables": { - "SNS_PUBLIC_AMI_ARN": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" } - } - }, - "Description": "Lambda function for initiate to identify public AMI access issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "initiate_to_desc_public_ami_issues.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateAMIPublicAccessEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateAMIPublicAccessEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateAMIPublicAccessEvaluation" } - } - }, - "LambdaEvaluateAMIPublicAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationAMIPublicAccess" } - }, - "Description": "Lambda function to describe public AMI issues.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] - ]}, - "Handler": "describe_public_ami_issues.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyAMIPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateAMIPublicAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateAMIPublicAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateAMIPublicAccess" } - } - }, - "LambdaInitiateRedshiftPublicAccessEvaluation": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftPublicAccess", "LogGroupLambdaInitiateRedshiftPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRedshiftPublicAccess" } - }, - "Environment": { - "Variables": { - "SNS_REDSHIFT_PUBLIC_ACCESS_ARN": { "Ref": "SNSNotifyLambdaEvaluateRedshiftPublicAccess" } - } - }, - "Description": "Lambda function for initiate to identify publicly accessible Redshift clusters.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "LambdaSubnets": {"Ref": "LambdaSubnets"}, + "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, + "IdentificationLambdaSource": { "Ref": "SourceIdentificationRedshiftPublicAccess" }, + "InitiateLambdaDescription": "Lambda function to initiate to identify Redshift public access issues.", + "EvaluateLambdaDescription": "Lambda function to describe Redshift public access issues.", + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "InitiateRedshiftPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "initiate_to_desc_redshift_cluster_public_access.lambda_handler", - "MemorySize": 128, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaInitiateRedshiftPublicAccessEvaluation": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "InitiateRedshiftPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaInitiateRedshiftPublicAccessEvaluation": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaInitiateRedshiftPublicAccessEvaluation"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaInitiateRedshiftPublicAccessEvaluation" } - } - }, - "LambdaEvaluateRedshiftPublicAccess": { - "Type": "AWS::Lambda::Function", - "DependsOn": ["LogGroupLambdaEvaluateRedshiftPublicAccess"], - "Properties": { - "Code": { - "S3Bucket": { "Ref": "SourceS3Bucket" }, - "S3Key": { "Ref": "SourceIdentificationRedshiftPublicAccess" } - }, - "Description": "Lambda function to describe publicly accessible Redshift clusters.", - "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftPublicAccessLambdaFunctionName", "value"] } ] ]}, - "Handler": "describe_redshift_cluster_public_access.lambda_handler", - "MemorySize": 256, - "Timeout": "300", - "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", - { "Ref": "AWS::AccountId" }, - ":role/", - { "Ref": "ResourcesPrefix" }, - { "Ref": "IdentificationIAMRole" } - ] ]}, - "Runtime": "python3.6" - } - }, - "LogGroupLambdaEvaluateRedshiftPublicAccess": { - "Type" : "AWS::Logs::LogGroup", - "Properties" : { - "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", - { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", - "IdentifyRedshiftPublicAccessLambdaFunctionName", - "value"] - } ] ] }, - "RetentionInDays": "7" - } - }, - "SubscriptionFilterLambdaEvaluateRedshiftPublicAccess": { - "Type" : "AWS::Logs::SubscriptionFilter", - "DependsOn": ["LambdaLogsForwarder", - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs", - "LogGroupLambdaEvaluateRedshiftPublicAccess"], - "Properties" : { - "DestinationArn" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "FilterPattern" : "[level != START && level != END && level != DEBUG, ...]", - "LogGroupName" : { "Ref": "LogGroupLambdaEvaluateRedshiftPublicAccess" } - } - }, - "EventBackupDDB": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaBackupDDB"], - "Properties": { - "Description": "Hammer ScheduledRule for DDB tables backup", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "BackupDDB"] ] }, - "ScheduleExpression": "rate(1 day)", - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaBackupDDB", "Arn"] }, - "Id": "LambdaBackupDDB" - } - ] - } - }, - "EventInitiateEvaluationS3IAM": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", - "LambdaInitiateIAMUserInactiveKeysEvaluation", - "LambdaInitiateS3EncryptionEvaluation", - "LambdaInitiateS3ACLEvaluation", - "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate S3 and IAM evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationS3IAM"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserKeysRotationEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserKeysRotationEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "Arn"] }, - "Id": "LambdaInitiateIAMUserInactiveKeysEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3EncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3EncryptionEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3ACLEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3ACLEvaluation" - }, - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateS3PolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateS3PolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationCloudTrails": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate CloudTrails evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationCloudTrails"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateCloudTrailsEvaluation", "Arn"] }, - "Id": "LambdaInitiateCloudTrailsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSVolumes": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS volumes evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSVolumes"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSVolumesEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSVolumesEvaluation" - } - ] - } - }, - "EventInitiateEvaluationEBSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate EBS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationEBSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateEBSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateEBSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSSnapshots": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate RDS snapshots evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSSnapshots"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSSnapshotsEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSSnapshotsEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSG": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSGEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate Security Groups evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSG"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSGEvaluation", "Arn"] }, - "Id": "LambdaInitiateSGEvaluation" - } - ] - } - }, - "EventInitiateEvaluationSQSPublicPolicy": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate SQS queue evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationSQSPublicPolicy"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateSQSPublicPolicyEvaluation", "Arn"] }, - "Id": "LambdaInitiateSQSPublicPolicyEvaluation" - } - ] - } - }, - "EventInitiateEvaluationRDSEncryption": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate rds instance encryption evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRDSEncryption"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRDSEncryptionEvaluation", "Arn"] }, - "Id": "LambdaInitiateRDSEncryptionEvaluation" - } - ] - } - }, - - "EventInitiateEvaluationRedshiftPublicAccess": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateRedshiftPublicAccessEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate publicly accessible Redshift cluster evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRedshiftPublicAccess"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateRedshiftPublicAccessEvaluation", "Arn"] }, - "Id": "LambdaInitiateRedshiftPublicAccessEvaluation" - } - ] - } - }, - "EventInitiateEvaluationAMIPublicAccess": { - "Type": "AWS::Events::Rule", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "Description": "Hammer ScheduledRule to initiate public AMI access evaluations", - "Name": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, - "ScheduleExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, - "State": "ENABLED", - "Targets": [ - { - "Arn": { "Fn::GetAtt": ["LambdaInitiateAMIPublicAccessEvaluation", "Arn"] }, - "Id": "LambdaInitiateAMIPublicAccessEvaluation" - } - ] - } - }, - "PermissionToInvokeLambdaLogsForwarderCloudWatchLogs": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaLogsForwarder"], - "Properties": { - "FunctionName": { "Ref": "LambdaLogsForwarder" }, - "Action": "lambda:InvokeFunction", - "Principal": {"Fn::Join": ["", [ "logs.", { "Ref": "AWS::Region" }, ".amazonaws.com" ] ]}, - "SourceArn": {"Fn::Join": ["", [ "arn:aws:logs:", { "Ref": "AWS::Region" }, ":", { "Ref": "AWS::AccountId" }, ":log-group:*" ] ]} - } - }, - "PermissionToInvokeLambdaBackupDDBCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaBackupDDB", "EventBackupDDB"], - "Properties": { - "FunctionName": { "Ref": "LambdaBackupDDB" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventBackupDDB", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSGEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSGEvaluation", "EventInitiateEvaluationSG"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSGEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateCloudTrailsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateCloudTrailsEvaluation", "EventInitiateEvaluationCloudTrails"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3ACLEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3ACLEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3ACLEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3PolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3PolicyEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3PolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateIAMUserKeysRotationEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserKeysRotationEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { - "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] + "InitiateLambdaHandler": "initiate_to_desc_redshift_cluster_public_access.lambda_handler", + "EvaluateLambdaHandler": "describe_redshift_cluster_public_access.lambda_handler", + "EvaluateLambdaMemorySize": 256, + "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, + "EventRuleDescription": "Hammer ScheduledRule to initiate Redshift public access issues evaluations", + "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationRedshiftPublicAccess"] ] }, + "SNSDisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRedshiftPublicAccess", "value"] } ] + ]}, + "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftPublicAccess", "value"] } ] + ]}, + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } - }, - "PermissionToInvokeLambdaInitiateIAMUserInactiveKeysEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateIAMUserInactiveKeysEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSVolumesEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSVolumesEvaluation", "EventInitiateEvaluationEBSVolumes"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateEBSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateEBSSnapshotsEvaluation", "EventInitiateEvaluationEBSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSSnapshotsEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSSnapshotsEvaluation", "EventInitiateEvaluationRDSSnapshots"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateSQSPublicPolicyEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateSQSPublicPolicyEvaluation", "EventInitiateEvaluationSQSPublicPolicy"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateS3EncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateS3EncryptionEvaluation", "EventInitiateEvaluationS3IAM"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationS3IAM", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRDSEncryptionEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRDSEncryptionEvaluation", "EventInitiateEvaluationRDSEncryption"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRDSEncryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateRedshiftPublicAccessEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateRedshiftPublicAccessEvaluation", "EventInitiateEvaluationRedshiftPublicAccess"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateRedshiftPublicAccessEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationRedshiftPublicAccess", "Arn"] } - } - }, - "PermissionToInvokeLambdaInitiateAMIPublicAccessEvaluationCloudWatchEvents": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["LambdaInitiateAMIPublicAccessEvaluation", "EventInitiateEvaluationAMIPublicAccess"], - "Properties": { - "FunctionName": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, - "Action": "lambda:InvokeFunction", - "Principal": "events.amazonaws.com", - "SourceArn": { "Fn::GetAtt": ["EventInitiateEvaluationAMIPublicAccess", "Arn"] } - } - }, - "SNSNotifyLambdaEvaluateSG": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateSG"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSecurityGroups", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateCloudTrails": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateCloudTrails"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameCloudTrails", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3ACL": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3ACL"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3ACL", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Policy": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateS3Policy"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Policy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserKeysRotation": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserKeysRotation", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateIAMUserInactiveKeys": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameIAMUserInactiveKeys", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSVolumes": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSVolumes"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSVolumes", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateEBSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": ["LambdaEvaluateEBSSnapshots"], - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameEBSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSSnapshots": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSSnapshots", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSSnapshots", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateSQSPublicPolicy": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateSQSPublicPolicy", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameSQSPublicPolicy", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateS3Encryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateS3Encryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameS3Encryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRDSEncryption": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRDSEncryption", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRDSEncryption", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateRedshiftPublicAccess": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateRedshiftPublicAccess", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameRedshiftPublicAccess", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftPublicAccess", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateRedshiftPublicAccess", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "SNSNotifyLambdaEvaluateAMIPublicAccess": { - "Type": "AWS::SNS::Topic", - "DependsOn": "LambdaEvaluateAMIPublicAccess", - "Properties": { - "DisplayName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSDisplayNameAMIPublicAccess", "value"] } ] - ]}, - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] - ]}, - "Subscription": [{ - "Endpoint": { - "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] - }, - "Protocol": "lambda" - }] - } - }, - "PermissionToInvokeLambdaEvaluateSgSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSG", "LambdaEvaluateSG"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSG" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSG", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateCloudTrailsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateCloudTrails", "LambdaEvaluateCloudTrails"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateCloudTrails" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateCloudTrails", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3AclSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": "SNSNotifyLambdaEvaluateS3ACL", - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3ACL" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3ACL", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3PolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Policy", "LambdaEvaluateS3Policy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Policy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Policy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserKeysRotationSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserKeysRotation", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserKeysRotation" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserKeysRotation", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateIAMUserInactiveKeysSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateIAMUserInactiveKeys", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateIAMUserInactiveKeys" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateIAMUserInactiveKeys", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSVolumesSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSVolumes", "LambdaEvaluateEBSVolumes"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSVolumes" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSVolumes", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateEBSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateEBSSnapshots", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateEBSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateEBSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSSnapshotsSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSSnapshots", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSSnapshots" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSSnapshots", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateSQSPublicPolicySNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateSQSPublicPolicy", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateSQSPublicPolicy" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateSQSPublicPolicy", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateS3EncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateS3Encryption", "LambdaEvaluateS3Encryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateS3Encryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateS3Encryption", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateRDSEncryptionSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRDSEncryption", "LambdaEvaluateRDSEncryption"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRDSEncryption" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRDSEncryption", "Arn"] } - } - }, - - "PermissionToInvokeLambdaEvaluateRedshiftPublicAccessSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateRedshiftPublicAccess", "LambdaEvaluateRedshiftPublicAccess"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateRedshiftPublicAccess" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateRedshiftPublicAccess", "Arn"] } - } - }, - "PermissionToInvokeLambdaEvaluateAMIPublicAccessSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSNotifyLambdaEvaluateAMIPublicAccess", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSNotifyLambdaEvaluateAMIPublicAccess" }, - "FunctionName": { "Fn::GetAtt": ["LambdaEvaluateAMIPublicAccess", "Arn"] } - } - }, - "SNSIdentificationErrors": { - "Type": "AWS::SNS::Topic", - "Properties": { - "TopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIdentificationErrors", "value"] } ] - ]} - } - }, - "SubscriptionSNSIdentificationErrorsLambdaLogsForwarder": { - "Type" : "AWS::SNS::Subscription", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties" : { - "Endpoint" : { "Fn::GetAtt" : [ "LambdaLogsForwarder", "Arn" ] }, - "Protocol" : "lambda", - "TopicArn" : { "Ref": "SNSIdentificationErrors" } - } - }, - "PermissionToInvokeLambdaLogsForwarderSNS": { - "Type": "AWS::Lambda::Permission", - "DependsOn": ["SNSIdentificationErrors", "LambdaLogsForwarder"], - "Properties": { - "Action": "lambda:InvokeFunction", - "Principal": "sns.amazonaws.com", - "SourceArn": { "Ref": "SNSIdentificationErrors" }, - "FunctionName": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] } - } - }, - "AlarmErrorsLambdaBackupDDB": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaBackupDDB"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaBackupDDB" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaBackupDDB" } - } - ], - "Period": 86400, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSGEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSGEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSGEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSGEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSG"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSG" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSG" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateCloudTrailsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateCloudTrailsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateCloudTrailsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateCloudTrailsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEvaluateCloudTrails": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateCloudTrails"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateCloudTrails" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateCloudTrails" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3ACLEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3ACLEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3ACLEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3ACLEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3ACL"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3ACL" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3ACL" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3PolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3PolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3PolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3PolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Policy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Policy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Policy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserKeysRotationEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserKeysRotationEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserKeysRotationEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserKeysRotation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserKeysRotation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserKeysRotation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateIAMUserInactiveKeysEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateIAMUserInactiveKeysEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaIAMUserInactiveKeysEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateIAMUserInactiveKeys"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateIAMUserInactiveKeys" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateIAMUserInactiveKeys" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSVolumesEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSVolumesEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSVolumesEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSVolumesEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSVolumes"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSVolumes" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSVolumes" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateEBSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateEBSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaEBSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateEBSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateEBSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateEBSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSSnapshotsEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSSnapshotsEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSSnapshotsEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSSnapshots"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSSnapshots" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSSnapshots" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateSQSPublicPolicyEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateSQSPublicPolicyEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateS3EncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateS3EncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateS3EncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaSQSPublicPolicyEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateSQSPublicPolicy"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateSQSPublicPolicy" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateSQSPublicPolicy" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaS3EncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateS3Encryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateS3Encryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateS3Encryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRDSEncryptionEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRDSEncryptionEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRDSEncryptionEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRDSEncryptionEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRDSEncryption"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRDSEncryption" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRDSEncryption" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateRedshiftPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateRedshiftPublicAccessEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateRedshiftPublicAccessEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateRedshiftPublicAccessEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaInitiateAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaInitiateAMIPublicAccessEvaluation"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaInitiateAMIPublicAccessEvaluation" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaRedshiftPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateRedshiftPublicAccess"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateRedshiftPublicAccess" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateRedshiftPublicAccess" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } - }, - "AlarmErrorsLambdaAMIPublicAccessEvaluation": { - "Type": "AWS::CloudWatch::Alarm", - "DependsOn": ["SNSIdentificationErrors", "LambdaEvaluateAMIPublicAccess"], - "Properties": { - "AlarmActions": [ { "Ref": "SNSIdentificationErrors" } ], - "OKActions": [ { "Ref": "SNSIdentificationErrors" } ], - "AlarmName": {"Fn::Join": ["/", [ { "Ref": "LambdaEvaluateAMIPublicAccess" }, "LambdaError" ] ]}, - "EvaluationPeriods": 1, - "Namespace": "AWS/Lambda", - "MetricName": "Errors", - "Dimensions": [ - { - "Name": "FunctionName", - "Value": { "Ref": "LambdaEvaluateAMIPublicAccess" } - } - ], - "Period": 3600, - "Statistic": "Maximum", - "ComparisonOperator" : "GreaterThanThreshold", - "Threshold": 0, - "TreatMissingData": "notBreaching" - } } }, - "Outputs": { "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} } -} +} \ No newline at end of file diff --git a/deployment/terraform/modules/identification/identification.tf b/deployment/terraform/modules/identification/identification.tf index 9adf40ad..6eb954cc 100755 --- a/deployment/terraform/modules/identification/identification.tf +++ b/deployment/terraform/modules/identification/identification.tf @@ -1,7 +1,8 @@ resource "aws_cloudformation_stack" "identification" { - name = "hammer-identification" + name = "hammer-identification-main" depends_on = [ "aws_s3_bucket_object.identification-cfn", + "aws_s3_bucket_object.identification-nested-cfn", "aws_s3_bucket_object.logs-forwarder", "aws_s3_bucket_object.ddb-tables-backup", "aws_s3_bucket_object.sg-issues-identification", @@ -23,6 +24,7 @@ resource "aws_cloudformation_stack" "identification" { parameters { SourceS3Bucket = "${var.s3bucket}" + NestedStackTemplate = "https://${var.s3bucket}.s3.amazonaws.com/${aws_s3_bucket_object.identification-nested-cfn.id}" ResourcesPrefix = "${var.resources-prefix}" IdentificationIAMRole = "${var.identificationIAMRole}" IdentificationCheckRateExpression = "${var.identificationCheckRateExpression}" diff --git a/deployment/terraform/modules/identification/sources.tf b/deployment/terraform/modules/identification/sources.tf index b1124915..d9ff601c 100755 --- a/deployment/terraform/modules/identification/sources.tf +++ b/deployment/terraform/modules/identification/sources.tf @@ -4,6 +4,12 @@ resource "aws_s3_bucket_object" "identification-cfn" { source = "${path.module}/../../../cf-templates/identification.json" } +resource "aws_s3_bucket_object" "identification-nested-cfn" { + bucket = "${var.s3bucket}" + key = "cfn/${format("identification-nested-%s.json", "${md5(file("${path.module}/../../../cf-templates/identification-nested.json"))}")}" + source = "${path.module}/../../../cf-templates/identification-nested.json" +} + resource "aws_s3_bucket_object" "logs-forwarder" { bucket = "${var.s3bucket}" key = "lambda/${format("logs-forwarder-%s.zip", "${md5(file("${path.module}/../../../packages/logs-forwarder.zip"))}")}" diff --git a/docs/pages/remediation_backup_rollback.md b/docs/pages/remediation_backup_rollback.md index 9c833126..d590cb20 100644 --- a/docs/pages/remediation_backup_rollback.md +++ b/docs/pages/remediation_backup_rollback.md @@ -27,7 +27,7 @@ The following table gives an overview of Dow Jones Hammer remediation functional |[SQS Queue Public Access](playbook10_sqs_public_policy.html#3-issue-remediation) | Yes | Yes | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html#3-issue-remediation) | Yes | Yes | |[RDS Unencrypted instances](playbook12_rds_unencryption.html#3-issue-remediation) | `No` | `No` | -|[Redshift Public Access issues](playbook16_redshift_public_clusters.html#3-issue-remediation) | `No` | `No` | +|[Redshift Public Access issues](playbook16_redshift_public_clusters.html#3-issue-remediation) | `Yes` | `No` | ## 2. How Remediation Backup Works diff --git a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py index 2412a219..7410515c 100644 --- a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py +++ b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py @@ -4,7 +4,7 @@ from library.logger import set_logging from library.config import Config from library.aws.redshift import RedshiftClusterChecker -from library.aws.utility import Account +from library.aws.utility import Account, DDB from library.ddb_issues import IssueStatus, RedshiftPublicAccessIssue from library.ddb_issues import Operations as IssueOperations from library.aws.utility import Sns @@ -20,7 +20,8 @@ def lambda_handler(event, context): account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() - # region = payload['region'] + # if request_id is present in payload then this lambda was called from the API + request_id = payload.get('request_id', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -65,10 +66,14 @@ def lambda_handler(event, context): # as we already checked it open_issues.pop(cluster.name, None) - logging.debug(f"Redshift Clusters in DDB:\n{open_issues.keys()}") - # all other unresolved issues in DDB are for removed/remediated clusters - for issue in open_issues.values(): - IssueOperations.set_status_resolved(ddb_table, issue) + logging.debug(f"Redshift Clusters in DDB:\n{open_issues.keys()}") + # all other unresolved issues in DDB are for removed/remediated clusters + for issue in open_issues.values(): + IssueOperations.set_status_resolved(ddb_table, issue) + # track the progress of API request to scan specific account/region/feature + if request_id: + api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name) + DDB.track_progress(api_table, request_id) except Exception: logging.exception(f"Failed to check Redshift clusters for '{account_id} ({account_name})'") return diff --git a/hammer/identification/lambdas/redshift-cluster-public-access-identification/initiate_to_desc_redshift_cluster_public_access.py b/hammer/identification/lambdas/redshift-cluster-public-access-identification/initiate_to_desc_redshift_cluster_public_access.py index f6265a98..2fc69218 100644 --- a/hammer/identification/lambdas/redshift-cluster-public-access-identification/initiate_to_desc_redshift_cluster_public_access.py +++ b/hammer/identification/lambdas/redshift-cluster-public-access-identification/initiate_to_desc_redshift_cluster_public_access.py @@ -12,7 +12,7 @@ def lambda_handler(event, context): logging.debug("Initiating Redshift Cluster public access checking") try: - sns_arn = os.environ["SNS_REDSHIFT_PUBLIC_ACCESS_ARN"] + sns_arn = os.environ["SNS_ARN"] config = Config() if not config.redshift_public_access.enabled: diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index 954309f5..e7b9a347 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -233,15 +233,16 @@ def __init__(self, *args): super().__init__(*args) -class RedshiftPublicAccessIssue(Issue): - def __init__(self, *args): - super().__init__(*args) - class PublicAMIIssue(Issue): def __init__(self, *args): super().__init__(*args) +class RedshiftPublicAccessIssue(Issue): + def __init__(self, *args): + super().__init__(*args) + + class Operations(object): @staticmethod def find(ddb_table, issue): diff --git a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py index 016111ca..cb759809 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py @@ -51,7 +51,7 @@ def create_tickets_redshift_public_access(self): # Adding label with "whitelisted" to jira ticket. jira.add_label( ticket_id=issue.jira_details.ticket, - labels=IssueStatus.Whitelisted + label=IssueStatus.Whitelisted.value ) jira.close_issue( ticket_id=issue.jira_details.ticket, From 0b640b9d5e01771f97a00a9930027dbf34c00ba2 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 15:53:18 +0530 Subject: [PATCH 067/193] Updated with Redshift public access deployment issues Updated with Redshift public access deployment issues --- hammer/library/aws/redshift.py | 1 - hammer/library/ddb_issues.py | 4 ++-- .../remediation/clean_redshift_public_access.py | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 271592bd..2c68083c 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -126,7 +126,6 @@ def get_cluster(self, name): return cluster return None - def check(self, clusters=None): """ Walk through clusters in the account/region and check them. diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index e7b9a347..d642af0f 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -234,12 +234,12 @@ def __init__(self, *args): class PublicAMIIssue(Issue): - def __init__(self, *args): + def __init__(self, *args): super().__init__(*args) class RedshiftPublicAccessIssue(Issue): - def __init__(self, *args): + def __init__(self, *args): super().__init__(*args) diff --git a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py index 91d310ea..167aad0b 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py @@ -12,7 +12,7 @@ from library.slack_utility import SlackNotification from library.ddb_issues import Operations as IssueOperations from library.ddb_issues import IssueStatus, RedshiftPublicAccessIssue -from library.aws.redshift import RedshiftClusterPublicAccessChecker +from library.aws.redshift import RedshiftClusterChecker from library.aws.utility import Account from library.utility import confirm from library.utility import SingletonInstance, SingletonInstanceException @@ -82,7 +82,7 @@ def clean_redshift_public_access(self, batch=False): if account.session is None: continue - checker = RedshiftClusterPublicAccessChecker(account=account) + checker = RedshiftClusterChecker(account=account) checker.check(clusters=[cluster_id]) cluster_details = checker.get_cluster(cluster_id) From fcb2575f98adf6a9890641aa955332c9f221c08c Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 17:46:57 +0530 Subject: [PATCH 068/193] Updated with deployment issue changes. Updated with deployment issue changes. --- deployment/cf-templates/identification.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index ec769b39..07f1a35a 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -1146,7 +1146,7 @@ "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftClusterEncryptionLambdaFunctionName", "value"] } ] ]}, - "InitiateLambdaHandler": ".initiate_to_desc_redshift_encryptionlambda_handler", + "InitiateLambdaHandler": "initiate_to_desc_redshift_encryption.lambda_handler", "EvaluateLambdaHandler": "describe_redshift_encryption.lambda_handler", "EvaluateLambdaMemorySize": 256, "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, From 8874b996f4239185e1c4db49faf283ab58aa85be Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 18:07:50 +0530 Subject: [PATCH 069/193] Updated with deployment issues. Updated with deployment issues. --- .../describe_redshift_encryption.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py index 7f99775a..71674c5b 100644 --- a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py +++ b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py @@ -3,7 +3,7 @@ from library.logger import set_logging from library.config import Config -from library.aws.redshift import RedshiftEncryptionChecker +from library.aws.redshift import RedshiftClusterChecker from library.aws.utility import Account, DDB from library.ddb_issues import IssueStatus, RedshiftEncryptionIssue from library.ddb_issues import Operations as IssueOperations @@ -48,7 +48,7 @@ def lambda_handler(event, context): open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} logging.debug(f"Redshift clusters in DDB:\n{open_issues.keys()}") - checker = RedshiftEncryptionChecker(account=account) + checker = RedshiftClusterChecker(account=account) if checker.check(): for cluster in checker.clusters: logging.debug(f"Checking {cluster.name}") From df081429138c1a323930ec0bd1eefa48a51463f6 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 22:27:26 +0530 Subject: [PATCH 070/193] Removed unused imports. Removed unused imports. --- hammer/library/aws/redshift.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 8d8142d7..d6120a6a 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -1,15 +1,7 @@ -import json import logging -import mimetypes -import pathlib -from datetime import datetime, timezone -from io import BytesIO -from copy import deepcopy from botocore.exceptions import ClientError -from library.utility import jsonDumps from library.utility import timeit -from library.aws.security_groups import SecurityGroup from collections import namedtuple from library.aws.utility import convert_tags From 4947b8bae2cce30216f50a8c0473f7de2d1ce108 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 22:39:20 +0530 Subject: [PATCH 071/193] Removed unused imports. Removed unused imports. --- hammer/library/aws/redshift.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 271592bd..15eee4ff 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -1,15 +1,7 @@ -import json import logging -import mimetypes -import pathlib -from datetime import datetime, timezone -from io import BytesIO -from copy import deepcopy from botocore.exceptions import ClientError -from library.utility import jsonDumps from library.utility import timeit -from library.aws.security_groups import SecurityGroup from collections import namedtuple from library.aws.utility import convert_tags @@ -22,6 +14,7 @@ 'subnet_group_name' ]) + class RedshiftClusterOperations(object): @classmethod @@ -126,7 +119,6 @@ def get_cluster(self, name): return cluster return None - def check(self, clusters=None): """ Walk through clusters in the account/region and check them. From 18a8cca88263926978d4696d0e1484527019ce20 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 26 Jun 2019 22:42:44 +0530 Subject: [PATCH 072/193] Removed unused imports. Removed unused imports. --- hammer/library/aws/redshift.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 2c68083c..8fba21bb 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -1,15 +1,7 @@ -import json import logging -import mimetypes -import pathlib -from datetime import datetime, timezone -from io import BytesIO -from copy import deepcopy from botocore.exceptions import ClientError -from library.utility import jsonDumps from library.utility import timeit -from library.aws.security_groups import SecurityGroup from collections import namedtuple from library.aws.utility import convert_tags @@ -22,6 +14,7 @@ 'subnet_group_name' ]) + class RedshiftClusterOperations(object): @classmethod @@ -88,7 +81,6 @@ def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_lo self.is_public = is_public self.is_logging = is_logging - def modify_cluster(self, public_access): """ Modify cluster as private. From 1c41905d2a20e585f37647c4c001f9f0bbf4e5c3 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 14:11:06 +0530 Subject: [PATCH 073/193] Updated with ECS external image test case changes. Updated with ECS external image test case changes. --- hammer/library/aws/ecs.py | 4 ++ tests/mock_ecs.py | 46 ++++++++++++++ tests/test_ecs_external_image_source.py | 83 +++++++++++++++++++++++++ tox.ini | 1 + 4 files changed, 134 insertions(+) create mode 100644 tests/mock_ecs.py create mode 100644 tests/test_ecs_external_image_source.py diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index ac7f05d7..54f5bced 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -85,6 +85,10 @@ def __init__(self, account, name, arn, tags, is_logging=None, disabled_logging_c self.external_image = external_image self.container_image_details = container_image_details + def __str__(self): + return f"{self.__class__.__name__}(Name={self.name}, is_logging={self.is_logging}, " \ + f"is_privileged={self.is_privileged}, external_image={self.external_image})" + class ECSChecker(object): """ diff --git a/tests/mock_ecs.py b/tests/mock_ecs.py new file mode 100644 index 00000000..21764b72 --- /dev/null +++ b/tests/mock_ecs.py @@ -0,0 +1,46 @@ +import boto3 +import logging + +from moto import mock_ecs +from library.utility import jsonDumps + + +def start(): + """ + Entrypoint for mocking ecs. + :return: nothing + """ + # start ECS mocking with moto + mock = mock_ecs() + mock.start() + + +def create_env_task_definitions(task_definitions, region): + logging.debug(f"======> creating new ECS task definitions from {jsonDumps(task_definitions)}") + ecs_client = boto3.client("ecs", region_name=region) + + test_task_definitions = [] + + for task_definition, rule in task_definitions.items(): + task_definition_arn = ecs_client.register_task_definition( + family=task_definition, + containerDefinitions= rule["containerDefinitions"] + )["taskDefinition"]["taskDefinitionArn"] + task_definition_name = task_definition + test_task_definitions.append(task_definition_name) + + # remove moto precreated task definitions + task_definitions_list_to_check = ecs_client.client.list_task_definition_families() + for task_definition in task_definitions_list_to_check: + + if task_definition not in test_task_definitions: + ecs_client.deregister_task_definition( + taskDefinition=task_definition + ) + + task_definitions = ecs_client.client.list_task_definition_families() + logging.debug(f"{jsonDumps(task_definitions)}") + + # need to return task definitions + return test_task_definitions + diff --git a/tests/test_ecs_external_image_source.py b/tests/test_ecs_external_image_source.py new file mode 100644 index 00000000..337b34d6 --- /dev/null +++ b/tests/test_ecs_external_image_source.py @@ -0,0 +1,83 @@ +import boto3 + +from . import mock_ecs +from library.aws.ecs import ECSChecker +from library.aws.utility import Account + +region = "us-east-1" + +task_definitions = { + "tas_definition": { + "family":'test_ecs_task', + "Description": "Congainer image taken from external source", + "containerDefinitions": [ + { + 'name': 'hello_world1', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'privileged': True + }, + { + 'name': 'hello_world2', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'privileged': True + } + ] + } +} + + +def find_task_definition_name(task_definition_details): + for taskDefinition, props in task_definitions.items(): + if props["Id"] == task_definition_details.name: + return taskDefinition + return None + + +def ident_task_definition_test(task_definition_details): + """ + Used to build identification string for each autogenerated test (for easy recognition of failed tests). + + :param task_definition_details: dict with information about rules from + ECSChecker(...) + :return: identification string with task_definition_name. + """ + + name = find_task_definition_name(task_definition_details) + descr = task_definitions.get(name, {}).get("Description", "default description") + return f"params: {name} ({descr})" + + +def pytest_generate_tests(metafunc): + """ + Entrypoint for tests (built-in pytest function for dynamic generation of test cases). + """ + # Launch ECS mocking and env preparation + mock_ecs.start() + test_task_definitions = mock_ecs.create_env_task_definitions(task_definitions, region) + + account = Account(region=region) + + # validate ebs volumes in mocked env + checker = ECSChecker(account) + checker.check(ids=test_task_definitions) + # create test cases for each response + metafunc.parametrize("task_definition_details", checker.task_definitions, ids=ident_task_definition_test) + + +def test_task(task_definition_details): + """ + Actual testing function. + + :param task_definition_details: dict with information about rules from + ECSChecker(...) + :return: nothing, raises AssertionError if actual test result is not matched with expected + """ + name = find_task_definition_name(task_definition_details) + expected = task_definitions.get(name, {})["CheckShouldPass"] + assert expected == task_definition_details.external_image \ No newline at end of file diff --git a/tox.ini b/tox.ini index 058670e4..0fa8cbc5 100755 --- a/tox.ini +++ b/tox.ini @@ -23,6 +23,7 @@ python_paths = hammer/identification/lambdas/ebs-unencrypted-volume-identification hammer/identification/lambdas/ebs-public-snapshots-identification hammer/identification/lambdas/sqs-public-policy-identification + hammer/identification/lambdas/ecs-external-image-source-issues-identification hammer [flake8] From 4bbe28b9bf96bba0294b8f70dd83382cbfbf248b Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 17:38:58 +0530 Subject: [PATCH 074/193] Updated with ECS external image source issue config changes. Updated with ECS external image source issue config changes. --- .../cf-templates/identification-crossaccount-role.json | 9 +++++++++ deployment/cf-templates/identification-role.json | 9 +++++++++ .../reporting-remediation-crossaccount-role.json | 9 +++++++++ deployment/cf-templates/reporting-remediation-role.json | 9 +++++++++ deployment/configs/config.json | 6 ++++++ deployment/configs/whitelist.json | 4 ++++ 6 files changed, 46 insertions(+) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..5c39e6bf 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -115,6 +115,15 @@ "sqs:ListQueueTags" ], "Resource": "*" + }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index 288897f2..d00adf56 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -175,6 +175,15 @@ ], "Resource": "*" }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 4a1bda43..6a65c13b 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -138,6 +138,15 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index e4840a55..953daf60 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -206,6 +206,15 @@ ], "Resource": "*" }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 68bb3bef..a1fde022 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -161,5 +161,11 @@ "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true + }, + "ecs_external_image_source": { + "enabled": true, + "topic_name": "hammer-describe-ecs-external-image-source-lambda", + "ddb.table_name": "hammer-ecs-external-image-source", + "reporting": true } } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 3cd1ac81..8180493c 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -45,5 +45,9 @@ "s3_encryption": { }, "rds_encryption": { + }, + "ecs_external_image_source":{ + "__comment__": "Detects ECS task definitions which are configured with external image source - task definitions ARNs.", + "1234567890123": ["arn:aws:ecs:us-east-1:1234567890123:task-definition/test-admin:2993"] } } From f6c402089e88c48d4bd09c2bc518e9df1d94f510 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 17:48:49 +0530 Subject: [PATCH 075/193] Updated with ECS permissions and configs. Updated with ECS permissions and configs. --- .../cf-templates/identification-crossaccount-role.json | 9 +++++++++ deployment/cf-templates/identification-role.json | 9 +++++++++ .../reporting-remediation-crossaccount-role.json | 9 +++++++++ .../cf-templates/reporting-remediation-role.json | 9 +++++++++ deployment/configs/config.json | 10 +++++----- deployment/configs/whitelist.json | 4 ++-- 6 files changed, 43 insertions(+), 7 deletions(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e0badae3..517d8a37 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -115,6 +115,15 @@ "sqs:ListQueueTags" ], "Resource": "*" + }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index 288897f2..d00adf56 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -175,6 +175,15 @@ ], "Resource": "*" }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 4a1bda43..6a65c13b 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -138,6 +138,15 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index e4840a55..953daf60 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -206,6 +206,15 @@ ], "Resource": "*" }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/configs/config.json b/deployment/configs/config.json index dd08d027..073a2aaf 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -161,12 +161,12 @@ "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true - } + }, "ecs_logging": { "enabled": true, - "ddb.table_name": "djif-hammer-ecs-logging", - "reporting": true, - "remediation": false, - "remediation_retention_period": 21 + "ddb.table_name": "hammer-ecs-logging", + "topic_name": "hammer-describe-ecs-logging-lambda", + "reporting": true } + } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 7ef55627..f0b5554f 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -50,6 +50,6 @@ }, "ecs_logging":{ "__comment__": "Detects ECS task definitions which are not enabled logging - task definitions ARNs.", - "1234567890123": ["arn:aws:ecs:us-east-1:1234567890123:task-definition/dev-admin:2993"] - } + "1234567890123": ["arn:aws:ecs:us-east-1:1234567890123:task-definition/test-admin:2993"] + } } From dd99926813d9c6e0456fec7f6dd8782d8af9545a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 17:58:46 +0530 Subject: [PATCH 076/193] Updated with ECS permissions and config changes. Updated with ECS permissions and config changes. --- .../cf-templates/identification-crossaccount-role.json | 9 +++++++++ deployment/cf-templates/identification-role.json | 9 +++++++++ .../reporting-remediation-crossaccount-role.json | 9 +++++++++ deployment/cf-templates/reporting-remediation-role.json | 9 +++++++++ deployment/configs/config.json | 6 ++++++ deployment/configs/whitelist.json | 4 ++++ 6 files changed, 46 insertions(+) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..5c39e6bf 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -115,6 +115,15 @@ "sqs:ListQueueTags" ], "Resource": "*" + }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index 288897f2..d00adf56 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -175,6 +175,15 @@ ], "Resource": "*" }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 4a1bda43..6a65c13b 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -138,6 +138,15 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index e4840a55..953daf60 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -206,6 +206,15 @@ ], "Resource": "*" }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 68bb3bef..687557d6 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -161,5 +161,11 @@ "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true + }, + "ecs_privileged_access": { + "enabled": true, + "ddb.table_name": "hammer-ecs-privileged-access", + "topic_name": "hammer-describe-ecs-privileged-access-lambda", + "reporting": true } } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 3cd1ac81..ff177735 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -45,5 +45,9 @@ "s3_encryption": { }, "rds_encryption": { + }, + "ecs_privileged_access":{ + "__comment__": "Detects ECS task definitions which are not enabled logging - task definitions ARNs.", + "1234567890123": ["arn:aws:ecs:us-east-1:1234567890123:task-definition/dev-admin:2993"] } } From 36e4a12e4169c08dc23bb8f73314b881b2e751c4 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 18:02:41 +0530 Subject: [PATCH 077/193] Updated with ECS permissions. Updated with ECS permissions. --- .../cf-templates/identification-crossaccount-role.json | 9 +++++++++ deployment/cf-templates/identification-role.json | 9 +++++++++ .../reporting-remediation-crossaccount-role.json | 9 +++++++++ deployment/cf-templates/reporting-remediation-role.json | 9 +++++++++ 4 files changed, 36 insertions(+) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index d5dc0895..08e2da41 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -114,6 +114,15 @@ "sqs:ListQueueTags" ], "Resource": "*" + }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index cdd38698..55821810 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -174,6 +174,15 @@ ], "Resource": "*" }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index cd014b9c..caeed7a0 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -136,6 +136,15 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index ad1eaa2e..68e613f7 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -196,6 +196,15 @@ ], "Resource": "*" }, + { + "Sid": "ECSIssues", + "Effect": "Allow", + "Action": [ + "ecs:Describe*", + "ecs:List*" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", From 90a61d47714f42b954b0fcf4cfafd7c89b0bdfce Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 18:24:11 +0530 Subject: [PATCH 078/193] Updated with Elasticsearch permissions and configs. Updated with Elasticsearch permissions and config changes. --- .../identification-crossaccount-role.json | 12 ++++++++++++ .../cf-templates/identification-role.json | 12 ++++++++++++ ...eporting-remediation-crossaccount-role.json | 18 ++++++++++++++++++ .../reporting-remediation-role.json | 18 ++++++++++++++++++ deployment/configs/config.json | 6 ++++++ deployment/configs/whitelist.json | 6 +++++- 6 files changed, 71 insertions(+), 1 deletion(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..a4e5eee3 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -115,6 +115,18 @@ "sqs:ListQueueTags" ], "Resource": "*" + }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index 288897f2..2a593e0e 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -175,6 +175,18 @@ ], "Resource": "*" }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 4a1bda43..9d5e59f1 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -138,6 +138,24 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags", + "es:UpdateElasticsearchDomainConfig", + "logs:DescribeLogGroups", + "logs:CreateLogStream", + "logs:CreateLogGroup", + "logs:PutLogEvents", + "logs:PutResourcePolicy" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index e4840a55..f01fd8de 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -206,6 +206,24 @@ ], "Resource": "*" }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags", + "es:UpdateElasticsearchDomainConfig", + "logs:DescribeLogGroups", + "logs:CreateLogStream", + "logs:CreateLogGroup", + "logs:PutLogEvents", + "logs:PutResourcePolicy" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 68bb3bef..cf1870b9 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -161,5 +161,11 @@ "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true + }, + "es_unencrypted_domain": { + "enabled": true, + "ddb.table_name": "hammer-es-unencrypted-domain", + "topic_name": "hammer-describe-es-encryption-lambda", + "reporting": true } } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 3cd1ac81..81a49ddd 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -45,5 +45,9 @@ "s3_encryption": { }, "rds_encryption": { - } + }, + "es_unencrypted_domain": { + "__comment__": "Detects Unencrypted Elasticsearch domains - domain ARNs.", + "1234567890123": ["arn:aws:es:us-east-2:1234567890123:domain/new-domain"] + } } From 46c8728f43e23a5d0eeea1e1769a4a5fe79f2c59 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 19:40:29 +0530 Subject: [PATCH 079/193] Updated with Elasticsearch review comment changes. Updated with Elasticsearch review comment changes. --- ...cribe_elasticsearch_unencrypted_domains.py | 4 ++- hammer/library/aws/elasticsearch.py | 34 ++++++++++++------- ...elasticsearch_unencrypted_issue_tickets.py | 31 ++++++++++++++--- 3 files changed, 50 insertions(+), 19 deletions(-) diff --git a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py index b73fabe3..8c5f1c56 100644 --- a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py +++ b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py @@ -52,12 +52,14 @@ def lambda_handler(event, context): checker = ESDomainChecker(account=account) if checker.check(): for domain in checker.domains: - if not domain.encrypted: + if not (domain.encrypted_at_rest and domain.encrypted_at_transit): issue = ESEncryptionIssue(account_id, domain.name) issue.issue_details.region = domain.account.region issue.issue_details.id = domain.id issue.issue_details.arn = domain.arn issue.issue_details.tags = domain.tags + issue.issue_details.encrypted_at_rest = domain.encrypted_at_rest + issue.issue_details.encrypted_at_transit = domain.encrypted_at_transit if config.esEncrypt.in_whitelist(account_id, domain.name): issue.status = IssueStatus.Whitelisted diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 52146849..ec831023 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -180,21 +180,27 @@ class ESDomainDetails(object): """ - def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted=None, policy=None): + def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted_at_rest=None, encrypted_at_transit= None, policy=None): """ - :param account: `Account` instance where ECS task definition is present - - :param name: name of the task definition - :param arn: arn of the task definition - :param arn: tags of task definition. - :param is_logging: logging enabled or not. + + :param account: `Account` instance where Elasticsearch domain is present + :param name: name of the Elasticsearch domain + :param id: Elasticsearch domain id. + :param arn: arn of the Elasticsearch domain + :param tags: tags of Elasticsearch domain. + :param is_logging: flag for logging enabled or not. + :param encrypted_at_rest: flag for encryption enabled at rest or not + :param encrypted_at_transit: flag for encryption enabled at transit or not + :param policy: """ + self.account = account self.name = name self.id = id self.arn = arn self.is_logging = is_logging - self.encrypted = encrypted + self.encrypted_at_rest = encrypted_at_rest + self.encrypted_at_transit = encrypted_at_transit self._policy = json.loads(policy) if policy else {} self.backup_filename = pathlib.Path(f"{self.name}.json") self.tags = convert_tags(tags) @@ -318,16 +324,17 @@ def check(self, ids=None): for domain_detail in domain_details: is_logging = False - domain_encrypted = False + domain_encrypted_at_rest = False + domain_encrypted_at_transit = False domain_name = domain_detail["DomainName"] domain_id = domain_detail["DomainId"] domain_arn = domain_detail["ARN"] encryption_at_rest = domain_detail.get("EncryptionAtRestOptions") node_to_node_encryption = domain_detail.get("NodeToNodeEncryptionOptions") if encryption_at_rest and encryption_at_rest["Enabled"]: - domain_encrypted = True - elif node_to_node_encryption and node_to_node_encryption["Enabled"]: - domain_encrypted = True + domain_encrypted_at_rest = True + if node_to_node_encryption and node_to_node_encryption["Enabled"]: + domain_encrypted_at_transit = True logging_details = domain_detail.get("LogPublishingOptions") @@ -350,7 +357,8 @@ def check(self, ids=None): arn=domain_arn, tags=tags, is_logging=is_logging, - encrypted=domain_encrypted, + encrypted_at_rest=domain_encrypted_at_rest, + encrypted_at_transit=domain_encrypted_at_transit, policy=access_policy) self.domains.append(domain) return True \ No newline at end of file diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index df6358d7..72debd8c 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -36,6 +36,8 @@ def create_tickets_elasticsearch_unencryption(self): domain_name = issue.issue_id region = issue.issue_details.region tags = issue.issue_details.tags + encrypted_at_rest = issue.issue_details.encrypted_at_rest + encrypted_at_transit = issue.issue_details.encrypted_at_transit # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner @@ -87,8 +89,28 @@ def create_tickets_elasticsearch_unencryption(self): bu = tags.get("bu", None) product = tags.get("product", None) - issue_description = ( - f"Elasticsearch domain needs to be encrypted.\n\n" + issue_description = "" + if not encrypted_at_rest: + issue_description +=( + f"Elasticsearch domain needs to encrypted at rest. \n\n" + ) + issue_summary = (f"Elasticsearch unencrypted domain '{domain_name}' unencrypted at rest" + f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + elif not encrypted_at_transit: + issue_description += ( + f"Elasticsearch domain needs to be encrypt at transit. \n\n" + ) + issue_summary = (f"Elasticsearch domain '{domain_name}' unencrypted at transit" + f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + else: + issue_description += ( + f"Elasticsearch domain needs to be encrypt at rest and transit. \n\n" + ) + issue_summary = (f"Elasticsearch unencrypted domain '{domain_name}' " + f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + + issue_description += ( f"*Risk*: High\n\n" f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" @@ -101,15 +123,14 @@ def create_tickets_elasticsearch_unencryption(self): issue_description += ( f"*Recommendation*: Encrypt Elasticsearch domain. To enable encryption follow below steps: \n" f"1. Choose to create new domain. \n" - f"2. Enable node-node encryption or encryption at rest options.\n" + f"2. Enable both node-node encryption and encryption at rest options.\n" f"3. Fill other domain configuration details and navigate to review page. \n" f"4. On the Review page, review your domain configuration, and then choose 'Confirm' to " f"create new domain. \n " f"5. After creation of new domain, migrate your data to new domain. \n " ) - issue_summary = (f"Elasticsearch unencrypted domain '{domain_name}' " - f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") + try: response = jira.add_issue( From baa492246a809776ac4581d68a25ce160c434a7b Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 21:41:32 +0530 Subject: [PATCH 080/193] Updated with Elasticsearch logging issue review comments changes. Updated with Elasticsearch logging issue review comments changes. --- .../identification-crossaccount-role.json | 12 +++++ .../cf-templates/identification-role.json | 12 +++++ ...porting-remediation-crossaccount-role.json | 18 +++++++ .../reporting-remediation-role.json | 18 +++++++ deployment/configs/config.json | 8 +++ deployment/configs/whitelist.json | 6 ++- hammer/library/aws/elasticsearch.py | 52 +++++++++++-------- ...sticsearch_domain_logging_issue_tickets.py | 5 +- 8 files changed, 107 insertions(+), 24 deletions(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..a4e5eee3 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -115,6 +115,18 @@ "sqs:ListQueueTags" ], "Resource": "*" + }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index 288897f2..2a593e0e 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -175,6 +175,18 @@ ], "Resource": "*" }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 4a1bda43..9d5e59f1 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -138,6 +138,24 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags", + "es:UpdateElasticsearchDomainConfig", + "logs:DescribeLogGroups", + "logs:CreateLogStream", + "logs:CreateLogGroup", + "logs:PutLogEvents", + "logs:PutResourcePolicy" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index e4840a55..f01fd8de 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -206,6 +206,24 @@ ], "Resource": "*" }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags", + "es:UpdateElasticsearchDomainConfig", + "logs:DescribeLogGroups", + "logs:CreateLogStream", + "logs:CreateLogGroup", + "logs:PutLogEvents", + "logs:PutResourcePolicy" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 68bb3bef..8bb39458 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -161,5 +161,13 @@ "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true + }, + "es_domain_logging": { + "enabled": true, + "ddb.table_name": "hammer-es-domain-logging", + "topic_name": "hammer-describe-es-logging-lambda", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 } } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 3cd1ac81..5835c210 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -45,5 +45,9 @@ "s3_encryption": { }, "rds_encryption": { - } + }, + "es_domain_logging": { + "__comment__": "Detects Elasticsearch domains which are not enabled logging - domain ARNs.", + "1234567890123": ["arn:aws:es:us-east-2:1234567890123:domain/new-domain"] + } } diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 52146849..d4e4f5c4 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -72,7 +72,7 @@ def retrieve_loggroup_arn(cw_client, domain_log_group_name): """ This method used to retrieve cloud watch log group arn details if log group is available. If not, create a cloudwatch log group and returns arn of newly created log group - + :param cw_client: cloudwatch logs boto3 client :param domain_log_group_name: Elasticsearch domain's log group name :return: @@ -94,7 +94,7 @@ def retrieve_loggroup_arn(cw_client, domain_log_group_name): Adding resource policy that grants above access. """ - policy_name = "AES-"+domain_log_group_name+"-Application-logs" + policy_name = "AES-" + domain_log_group_name + "-Application-logs" policy_doc = {} statement = {} principal = {} @@ -118,7 +118,7 @@ def retrieve_loggroup_arn(cw_client, domain_log_group_name): @staticmethod def set_domain_logging(es_client, cw_client, domain_name): """ - + :param es_client: elastic search boto3 client :param cw_client: cloudwatch logs boto3 client :param domain_name: elastic search domain name @@ -134,10 +134,10 @@ def set_domain_logging(es_client, cw_client, domain_name): DomainName=domain_name, LogPublishingOptions={ 'ES_APPLICATION_LOGS': - { - 'CloudWatchLogsLogGroupArn': log_group_arn, - 'Enabled': True - } + { + 'CloudWatchLogsLogGroupArn': log_group_arn, + 'Enabled': True + } } ) @@ -180,21 +180,28 @@ class ESDomainDetails(object): """ - def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted=None, policy=None): + def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted_at_rest=None, + encrypted_at_transit=None, policy=None): """ - :param account: `Account` instance where ECS task definition is present - :param name: name of the task definition - :param arn: arn of the task definition - :param arn: tags of task definition. - :param is_logging: logging enabled or not. + :param account: `Account` instance where Elasticsearch domain is present + :param name: name of the Elasticsearch domain + :param id: Elasticsearch domain id. + :param arn: arn of the Elasticsearch domain + :param tags: tags of Elasticsearch domain. + :param is_logging: flag for logging enabled or not. + :param encrypted_at_rest: flag for encryption enabled at rest or not + :param encrypted_at_transit: flag for encryption enabled at transit or not + :param policy: """ + self.account = account self.name = name self.id = id self.arn = arn self.is_logging = is_logging - self.encrypted = encrypted + self.encrypted_at_rest = encrypted_at_rest + self.encrypted_at_transit = encrypted_at_transit self._policy = json.loads(policy) if policy else {} self.backup_filename = pathlib.Path(f"{self.name}.json") self.tags = convert_tags(tags) @@ -253,11 +260,12 @@ def restrict_policy(self): def set_logging(self): """ - + :return: """ try: - ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), self.name) + ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), + self.name) except Exception: logging.exception(f"Failed to enable {self.name} logging") return False @@ -318,16 +326,17 @@ def check(self, ids=None): for domain_detail in domain_details: is_logging = False - domain_encrypted = False + domain_encrypted_at_rest = False + domain_encrypted_at_transit = False domain_name = domain_detail["DomainName"] domain_id = domain_detail["DomainId"] domain_arn = domain_detail["ARN"] encryption_at_rest = domain_detail.get("EncryptionAtRestOptions") node_to_node_encryption = domain_detail.get("NodeToNodeEncryptionOptions") if encryption_at_rest and encryption_at_rest["Enabled"]: - domain_encrypted = True - elif node_to_node_encryption and node_to_node_encryption["Enabled"]: - domain_encrypted = True + domain_encrypted_at_rest = True + if node_to_node_encryption and node_to_node_encryption["Enabled"]: + domain_encrypted_at_transit = True logging_details = domain_detail.get("LogPublishingOptions") @@ -350,7 +359,8 @@ def check(self, ids=None): arn=domain_arn, tags=tags, is_logging=is_logging, - encrypted=domain_encrypted, + encrypted_at_rest=domain_encrypted_at_rest, + encrypted_at_transit=domain_encrypted_at_transit, policy=access_policy) self.domains.append(domain) return True \ No newline at end of file diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py index 32af1a8e..4057a6f2 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py @@ -100,8 +100,9 @@ def create_tickets_elasticsearch_domain_logging(self): issue_description += JiraOperations.build_tags_table(tags) - auto_remediation_date = (self.config.now + self.config.esLogging.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.esLogging.remediation: + auto_remediation_date = (self.config.now + self.config.esLogging.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += ( f"*Recommendation*: " From 0910d383de38931c7142c4c834a42386c2ad256f Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 21:56:30 +0530 Subject: [PATCH 081/193] Updated with ES public policy issue review changes. Updated with ES public policy issue review changes. --- .../identification-crossaccount-role.json | 12 +++++ .../cf-templates/identification-role.json | 12 +++++ ...porting-remediation-crossaccount-role.json | 18 +++++++ .../reporting-remediation-role.json | 18 +++++++ deployment/configs/config.json | 10 +++- deployment/configs/whitelist.json | 6 ++- hammer/library/aws/elasticsearch.py | 52 +++++++++++-------- ...asticsearch_public_access_issue_tickets.py | 5 +- 8 files changed, 108 insertions(+), 25 deletions(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..a4e5eee3 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -115,6 +115,18 @@ "sqs:ListQueueTags" ], "Resource": "*" + }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index 288897f2..2a593e0e 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -175,6 +175,18 @@ ], "Resource": "*" }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 4a1bda43..9d5e59f1 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -138,6 +138,24 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags", + "es:UpdateElasticsearchDomainConfig", + "logs:DescribeLogGroups", + "logs:CreateLogStream", + "logs:CreateLogGroup", + "logs:PutLogEvents", + "logs:PutResourcePolicy" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index e4840a55..f01fd8de 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -206,6 +206,24 @@ ], "Resource": "*" }, + { + "Sid": "ESIssues", + "Effect": "Allow", + "Action": [ + "es:ListDomainNames", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomainConfig", + "es:DescribeElasticsearchDomains", + "es:ListTags", + "es:UpdateElasticsearchDomainConfig", + "logs:DescribeLogGroups", + "logs:CreateLogStream", + "logs:CreateLogGroup", + "logs:PutLogEvents", + "logs:PutResourcePolicy" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 68bb3bef..9c621902 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -161,5 +161,13 @@ "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true - } + }, + "es_public_access_domain": { + "enabled": true, + "ddb.table_name": "hammer-es-public-access-domain", + "topic_name": "hammer-describe-es-public-access-lambda", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 + }, } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 3cd1ac81..a7733262 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -45,5 +45,9 @@ "s3_encryption": { }, "rds_encryption": { - } + }, + "es_public_access_domain": { + "__comment__": "Detects Unencrypted Elasticsearch publicly accessible domains - domain ARNs.", + "1234567890123": ["arn:aws:es:us-east-2:1234567890123:domain/new-domain"] + } } diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 52146849..d4e4f5c4 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -72,7 +72,7 @@ def retrieve_loggroup_arn(cw_client, domain_log_group_name): """ This method used to retrieve cloud watch log group arn details if log group is available. If not, create a cloudwatch log group and returns arn of newly created log group - + :param cw_client: cloudwatch logs boto3 client :param domain_log_group_name: Elasticsearch domain's log group name :return: @@ -94,7 +94,7 @@ def retrieve_loggroup_arn(cw_client, domain_log_group_name): Adding resource policy that grants above access. """ - policy_name = "AES-"+domain_log_group_name+"-Application-logs" + policy_name = "AES-" + domain_log_group_name + "-Application-logs" policy_doc = {} statement = {} principal = {} @@ -118,7 +118,7 @@ def retrieve_loggroup_arn(cw_client, domain_log_group_name): @staticmethod def set_domain_logging(es_client, cw_client, domain_name): """ - + :param es_client: elastic search boto3 client :param cw_client: cloudwatch logs boto3 client :param domain_name: elastic search domain name @@ -134,10 +134,10 @@ def set_domain_logging(es_client, cw_client, domain_name): DomainName=domain_name, LogPublishingOptions={ 'ES_APPLICATION_LOGS': - { - 'CloudWatchLogsLogGroupArn': log_group_arn, - 'Enabled': True - } + { + 'CloudWatchLogsLogGroupArn': log_group_arn, + 'Enabled': True + } } ) @@ -180,21 +180,28 @@ class ESDomainDetails(object): """ - def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted=None, policy=None): + def __init__(self, account, name, id, arn, tags=None, is_logging=None, encrypted_at_rest=None, + encrypted_at_transit=None, policy=None): """ - :param account: `Account` instance where ECS task definition is present - :param name: name of the task definition - :param arn: arn of the task definition - :param arn: tags of task definition. - :param is_logging: logging enabled or not. + :param account: `Account` instance where Elasticsearch domain is present + :param name: name of the Elasticsearch domain + :param id: Elasticsearch domain id. + :param arn: arn of the Elasticsearch domain + :param tags: tags of Elasticsearch domain. + :param is_logging: flag for logging enabled or not. + :param encrypted_at_rest: flag for encryption enabled at rest or not + :param encrypted_at_transit: flag for encryption enabled at transit or not + :param policy: """ + self.account = account self.name = name self.id = id self.arn = arn self.is_logging = is_logging - self.encrypted = encrypted + self.encrypted_at_rest = encrypted_at_rest + self.encrypted_at_transit = encrypted_at_transit self._policy = json.loads(policy) if policy else {} self.backup_filename = pathlib.Path(f"{self.name}.json") self.tags = convert_tags(tags) @@ -253,11 +260,12 @@ def restrict_policy(self): def set_logging(self): """ - + :return: """ try: - ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), self.name) + ElasticSearchOperations.set_domain_logging(self.account.client("es"), self.account.client("logs"), + self.name) except Exception: logging.exception(f"Failed to enable {self.name} logging") return False @@ -318,16 +326,17 @@ def check(self, ids=None): for domain_detail in domain_details: is_logging = False - domain_encrypted = False + domain_encrypted_at_rest = False + domain_encrypted_at_transit = False domain_name = domain_detail["DomainName"] domain_id = domain_detail["DomainId"] domain_arn = domain_detail["ARN"] encryption_at_rest = domain_detail.get("EncryptionAtRestOptions") node_to_node_encryption = domain_detail.get("NodeToNodeEncryptionOptions") if encryption_at_rest and encryption_at_rest["Enabled"]: - domain_encrypted = True - elif node_to_node_encryption and node_to_node_encryption["Enabled"]: - domain_encrypted = True + domain_encrypted_at_rest = True + if node_to_node_encryption and node_to_node_encryption["Enabled"]: + domain_encrypted_at_transit = True logging_details = domain_detail.get("LogPublishingOptions") @@ -350,7 +359,8 @@ def check(self, ids=None): arn=domain_arn, tags=tags, is_logging=is_logging, - encrypted=domain_encrypted, + encrypted_at_rest=domain_encrypted_at_rest, + encrypted_at_transit=domain_encrypted_at_transit, policy=access_policy) self.domains.append(domain) return True \ No newline at end of file diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py index 4b74cfb4..6e5cec57 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -102,8 +102,9 @@ def create_tickets_elasticsearch_public_access(self): issue_description += JiraOperations.build_tags_table(tags) - auto_remediation_date = (self.config.now + self.config.esPublicAccess.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.esPublicAccess.remediated: + auto_remediation_date = (self.config.now + self.config.esPublicAccess.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += ( f"*Recommendation*: " From d545f9d437fcdd241300bbf920da0769052072ce Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 23:09:09 +0530 Subject: [PATCH 082/193] Updated with Redshift encryption issue config changes. Updated with Redshift encryption issue config changes. --- .../identification-crossaccount-role.json | 16 ++++++++++++++-- .../cf-templates/identification-role.json | 12 ++++++++++++ ...reporting-remediation-crossaccount-role.json | 17 +++++++++++++++++ .../reporting-remediation-role.json | 17 +++++++++++++++++ deployment/configs/config.json | 6 ++++++ deployment/configs/whitelist.json | 4 ++++ ...edshift_unencrypted_cluster_issue_tickets.py | 17 +++++++++++++---- 7 files changed, 83 insertions(+), 6 deletions(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..81bcb787 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -47,7 +47,7 @@ "ec2:DescribeInstances", "ec2:DescribeRouteTables", "ec2:DescribeSubnets", - "ec2:DescribeImages", + "ec2:DescribeImages" ], "Resource": "*" }, @@ -115,7 +115,19 @@ "sqs:ListQueueTags" ], "Resource": "*" - } + }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters" + ], + "Resource": "*" + } ] } }] diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index 288897f2..f07a279c 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -175,6 +175,18 @@ ], "Resource": "*" }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 4a1bda43..9bfa486e 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -138,6 +138,23 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters", + "redshift:EnableLogging", + "redshift:DisableLogging", + "redshift:AuthorizeClusterSecurityGroupIngress", + "redshift:ModifyCluster", + "redshift:RevokeClusterSecurityGroupIngress" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index e4840a55..198e88b1 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -206,6 +206,23 @@ ], "Resource": "*" }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters", + "redshift:EnableLogging", + "redshift:DisableLogging", + "redshift:AuthorizeClusterSecurityGroupIngress", + "redshift:ModifyCluster", + "redshift:RevokeClusterSecurityGroupIngress" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 68bb3bef..3c03367a 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -161,5 +161,11 @@ "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true + }, + "redshift_encryption": { + "enabled": true, + "ddb.table_name": "hammer-redshift-unencrypted", + "topic_name": "hammer-describe-redshift-cluster-encryption-lambda", + "reporting": true } } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 3cd1ac81..43a3a1f8 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -45,5 +45,9 @@ "s3_encryption": { }, "rds_encryption": { + }, + "redshift_encryption":{ + "__comment__": "Detects unencrypted clusters.", + "123456789012": ["test-cluster"] } } diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py index 0c57db30..1ee4d54e 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -98,15 +98,24 @@ def create_tickets_redshift_unencrypted_cluster(self): f"*Region*: {region}\n" f"*Redshift Cluster ID*: {cluster_id}\n") - auto_remediation_date = (self.config.now + self.config.redshiftEncrypt.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.redshiftEncrypt.remediation: + auto_remediation_date = (self.config.now + self.config.redshiftEncrypt.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) issue_description += "\n" issue_description += ( - f"*Recommendation*: " - f"Encrypt Redshift cluster.") + f"*Recommendation*: \n" + f"Modify an unencrypted cluster using AWS Key Management Service (AWS KMS) encryption. " + f"Follow below steps to encrypt redshift cluster:\n " + f"1. Sign in to the AWS Management Console and open the Amazon Redshift console.\n" + f"2. In navigation pane, choose Clusters, and then choose cluster that you want to modify.\n" + f"3. Choose Cluster, and then choose Modify.\n" + f"4. Choose KMS to enable encryption for Encrypt database field\n" + f"5. For Master Key, choose Enter a key ARN and enter the ARN in the ARN field.\n" + f"6. Choose Modify.\n\n" + ) try: response = jira.add_issue( From 6e4b198cf9ff49ef16179b870c2d0cedf42b9261 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 23:26:12 +0530 Subject: [PATCH 083/193] Updated with RedshiftLogging issue permissions. Updated with RedshiftLogging issue permissions. --- .../identification-crossaccount-role.json | 16 ++++++++++++++-- .../cf-templates/identification-role.json | 12 ++++++++++++ ...reporting-remediation-crossaccount-role.json | 17 +++++++++++++++++ .../reporting-remediation-role.json | 17 +++++++++++++++++ deployment/configs/config.json | 6 ++++++ deployment/configs/whitelist.json | 7 ++++++- .../create_redshift_logging_issue_tickets.py | 17 +++++++++++++---- 7 files changed, 85 insertions(+), 7 deletions(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..14b2efd9 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -47,7 +47,7 @@ "ec2:DescribeInstances", "ec2:DescribeRouteTables", "ec2:DescribeSubnets", - "ec2:DescribeImages", + "ec2:DescribeImages" ], "Resource": "*" }, @@ -115,7 +115,19 @@ "sqs:ListQueueTags" ], "Resource": "*" - } + }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters" + ], + "Resource": "*" + } ] } }] diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index 288897f2..f07a279c 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -175,6 +175,18 @@ ], "Resource": "*" }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 4a1bda43..9bfa486e 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -138,6 +138,23 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters", + "redshift:EnableLogging", + "redshift:DisableLogging", + "redshift:AuthorizeClusterSecurityGroupIngress", + "redshift:ModifyCluster", + "redshift:RevokeClusterSecurityGroupIngress" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index e4840a55..198e88b1 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -206,6 +206,23 @@ ], "Resource": "*" }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters", + "redshift:EnableLogging", + "redshift:DisableLogging", + "redshift:AuthorizeClusterSecurityGroupIngress", + "redshift:ModifyCluster", + "redshift:RevokeClusterSecurityGroupIngress" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 68bb3bef..5703aac8 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -161,5 +161,11 @@ "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true + }, + "redshift_logging": { + "enabled": true, + "ddb.table_name": "hammer-redshift-logging", + "topic_name": "hammer-describe-redshift-logging-lambda", + "reporting": true } } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 3cd1ac81..189b8262 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -45,5 +45,10 @@ "s3_encryption": { }, "rds_encryption": { - } + }, + "redshift_logging": { + "__comment__": "Detects Redshift clusters which are audit logging is not enabled.", + "123456789012": ["test-cluster"] + } + } diff --git a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py index 78e8b537..aa285e5c 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py @@ -98,15 +98,24 @@ def create_tickets_redshift_logging(self): f"*Region*: {region}\n" f"*Redshift Cluster ID*: {cluster_id}\n") - auto_remediation_date = (self.config.now + self.config.redshift_logging.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" - issue_description += JiraOperations.build_tags_table(tags) issue_description += "\n" issue_description += ( f"*Recommendation*: " - f"Enable logging for Redshift cluster.") + f"Enable logging for Redshift cluster. To enable logging, follow below steps:\n\n" + f"1. Sign in to the AWS Management Console and open the Amazon Redshift console.\n" + f"2. In the navigation pane, click Clusters.\n" + f"3. In the list, click the cluster for which you want to enable logging.\n" + f"4. In the cluster details page, click Database, and then click Configure Audit Logging.\n" + f"5. In the Configure Audit Logging dialog box, in the Enable Audit Logging box, click Yes.\n" + f"6. For S3 Bucket, do one of the following:\n" + f" (a)If you already have an S3 bucket that you want to use, " + f"select Use Existing and then select the bucket from the Bucket list.\n" + f" (b)If you need a new S3 bucket,select Create New, and in New Bucket Name box, type a name.\n" + f"7. Optionally, in the S3 Key Prefix box, type a prefix to add to the S3 bucket.\n" + f"8. Click Save \n\n" + ) try: response = jira.add_issue( From 2569a0edb2bdf579d35b4dd406ba0eac3c5725ed Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 23:29:39 +0530 Subject: [PATCH 084/193] Removed auto-remedation related configurations. Removed auto-remedation related configurations. --- .../create_redshift_unencrypted_cluster_issue_tickets.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py index 1ee4d54e..ea588015 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -98,10 +98,6 @@ def create_tickets_redshift_unencrypted_cluster(self): f"*Region*: {region}\n" f"*Redshift Cluster ID*: {cluster_id}\n") - if self.config.redshiftEncrypt.remediation: - auto_remediation_date = (self.config.now + self.config.redshiftEncrypt.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" - issue_description += JiraOperations.build_tags_table(tags) issue_description += "\n" From f5b31b26f2058e3c0ccdd34e029bffc90f099d95 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 23:38:16 +0530 Subject: [PATCH 085/193] Updated with Redshift public access issue permissions. Updated with Redshift public access issue permissions. --- .../identification-crossaccount-role.json | 12 ++++++++++++ .../cf-templates/identification-role.json | 12 ++++++++++++ ...reporting-remediation-crossaccount-role.json | 17 +++++++++++++++++ .../reporting-remediation-role.json | 17 +++++++++++++++++ deployment/configs/config.json | 5 +++-- deployment/configs/whitelist.json | 3 ++- ...eate_redshift_public_access_issue_tickets.py | 5 +++-- 7 files changed, 66 insertions(+), 5 deletions(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..945a0765 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -63,6 +63,18 @@ ], "Resource": "*" }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters" + ], + "Resource": "*" + }, { "Sid": "IamIssues", "Effect": "Allow", diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index 288897f2..f07a279c 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -175,6 +175,18 @@ ], "Resource": "*" }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 4a1bda43..9bfa486e 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -138,6 +138,23 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters", + "redshift:EnableLogging", + "redshift:DisableLogging", + "redshift:AuthorizeClusterSecurityGroupIngress", + "redshift:ModifyCluster", + "redshift:RevokeClusterSecurityGroupIngress" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index e4840a55..198e88b1 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -206,6 +206,23 @@ ], "Resource": "*" }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters", + "redshift:EnableLogging", + "redshift:DisableLogging", + "redshift:AuthorizeClusterSecurityGroupIngress", + "redshift:ModifyCluster", + "redshift:RevokeClusterSecurityGroupIngress" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/configs/config.json b/deployment/configs/config.json index ec093524..81a903e2 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -164,9 +164,10 @@ }, "redshift_public_access": { "enabled": true, - "ddb.table_name": "djif-hammer-redshift-public-access", + "ddb.table_name": "hammer-redshift-public-access", + "topic_name": "hammer-describe-redshift-public-access-lambda", "reporting": true, "remediation": false, "remediation_retention_period": 21 - } + }, } diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 7890a4c5..151009ee 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -47,6 +47,7 @@ "rds_encryption": { }, "redshift_public_access":{ - + "__comment__": "Detects publicly accessible Redshift Clusters.", + "123456789012": ["test-cluster"] } } diff --git a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py index cb759809..cab00dcf 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py @@ -98,8 +98,9 @@ def create_tickets_redshift_public_access(self): f"*Region*: {region}\n" f"*Redshift Cluster ID*: {cluster_id}\n") - auto_remediation_date = (self.config.now + self.config.redshift_public_access.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.redshift_public_access.remediation: + auto_remediation_date = (self.config.now + self.config.redshift_public_access.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) From 963121aaa39431dcc37b69c102faa044c18becaf Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 23:39:14 +0530 Subject: [PATCH 086/193] Fixed template issues. Fixed template issues. --- deployment/cf-templates/identification-crossaccount-role.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index 945a0765..24ddbf26 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -47,7 +47,7 @@ "ec2:DescribeInstances", "ec2:DescribeRouteTables", "ec2:DescribeSubnets", - "ec2:DescribeImages", + "ec2:DescribeImages" ], "Resource": "*" }, From 32529a31665659c77707c20552bcffbf95cfb9e2 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 27 Jun 2019 23:52:08 +0530 Subject: [PATCH 087/193] Updated with Redshift permissions. Updated with Redshift permissions. --- .../identification-crossaccount-role.json | 14 +++++++++++++- .../cf-templates/identification-role.json | 12 ++++++++++++ ...reporting-remediation-crossaccount-role.json | 17 +++++++++++++++++ .../reporting-remediation-role.json | 17 +++++++++++++++++ hammer/library/aws/redshift.py | 1 + 5 files changed, 60 insertions(+), 1 deletion(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..70776629 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -47,7 +47,7 @@ "ec2:DescribeInstances", "ec2:DescribeRouteTables", "ec2:DescribeSubnets", - "ec2:DescribeImages", + "ec2:DescribeImages" ], "Resource": "*" }, @@ -115,6 +115,18 @@ "sqs:ListQueueTags" ], "Resource": "*" + }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/identification-role.json b/deployment/cf-templates/identification-role.json index 288897f2..f07a279c 100755 --- a/deployment/cf-templates/identification-role.json +++ b/deployment/cf-templates/identification-role.json @@ -175,6 +175,18 @@ ], "Resource": "*" }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 4a1bda43..9bfa486e 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -138,6 +138,23 @@ "sqs:SetQueueAttributes" ], "Resource": "*" + }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters", + "redshift:EnableLogging", + "redshift:DisableLogging", + "redshift:AuthorizeClusterSecurityGroupIngress", + "redshift:ModifyCluster", + "redshift:RevokeClusterSecurityGroupIngress" + ], + "Resource": "*" } ] } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index e4840a55..198e88b1 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -206,6 +206,23 @@ ], "Resource": "*" }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters", + "redshift:EnableLogging", + "redshift:DisableLogging", + "redshift:AuthorizeClusterSecurityGroupIngress", + "redshift:ModifyCluster", + "redshift:RevokeClusterSecurityGroupIngress" + ], + "Resource": "*" + }, { "Sid": "IAMassumeCrossAccountRole", "Effect": "Allow", diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 271592bd..4b2853e5 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -22,6 +22,7 @@ 'subnet_group_name' ]) + class RedshiftClusterOperations(object): @classmethod From 78b41bbf6071c11e4a1b16c6d3963bec9d7e8773 Mon Sep 17 00:00:00 2001 From: Michael Kaufman Date: Sat, 29 Jun 2019 16:38:35 -0400 Subject: [PATCH 088/193] Fix invalid JSON --- deployment/cf-templates/identification-crossaccount-role.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index e5c16c32..e0badae3 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -47,7 +47,7 @@ "ec2:DescribeInstances", "ec2:DescribeRouteTables", "ec2:DescribeSubnets", - "ec2:DescribeImages", + "ec2:DescribeImages" ], "Resource": "*" }, From 2cd289c18c5f2779c9d438f67e4f89e1a1fa4b03 Mon Sep 17 00:00:00 2001 From: Michael Kaufman Date: Sat, 29 Jun 2019 16:59:06 -0400 Subject: [PATCH 089/193] Fix missing topic --- deployment/configs/config.json | 1 + 1 file changed, 1 insertion(+) diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 68bb3bef..5f697516 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -136,6 +136,7 @@ "ec2_public_ami": { "enabled": true, "ddb.table_name": "hammer-ec2-public-ami", + "topic_name": "hammer-describe-ami-public-access-lambda", "reporting": false, "remediation": false, "remediation_retention_period": 21 From dbcca862acb23691af523a28fcde10cc6c4894a6 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 1 Jul 2019 15:06:08 +0530 Subject: [PATCH 090/193] Updated with ECS external image test case changes. Updated with ECS external image test case changes. --- tests/test_ecs_external_image_source.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/test_ecs_external_image_source.py b/tests/test_ecs_external_image_source.py index 337b34d6..f7a90b9e 100644 --- a/tests/test_ecs_external_image_source.py +++ b/tests/test_ecs_external_image_source.py @@ -1,5 +1,3 @@ -import boto3 - from . import mock_ecs from library.aws.ecs import ECSChecker from library.aws.utility import Account @@ -8,24 +6,23 @@ task_definitions = { "tas_definition": { - "family":'test_ecs_task', + "family": 'test_ecs_image_source', "Description": "Congainer image taken from external source", + "CheckShouldPass": True, "containerDefinitions": [ { 'name': 'hello_world1', 'image': 'docker/hello-world:latest', 'cpu': 1024, 'memory': 400, - 'essential': True, - 'privileged': True + 'essential': True }, { 'name': 'hello_world2', 'image': 'docker/hello-world:latest', 'cpu': 1024, 'memory': 400, - 'essential': True, - 'privileged': True + 'essential': True } ] } @@ -66,6 +63,7 @@ def pytest_generate_tests(metafunc): # validate ebs volumes in mocked env checker = ECSChecker(account) checker.check(ids=test_task_definitions) + # create test cases for each response metafunc.parametrize("task_definition_details", checker.task_definitions, ids=ident_task_definition_test) From fd5d92f09124e7bd1855c506c4fa9310844623ca Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 1 Jul 2019 15:19:58 +0530 Subject: [PATCH 091/193] Added ECS privileged access issue test case changes. Updated with ECS privileged access issue test case changes. --- tests/mock_ecs.py | 46 +++++++++++++++ tests/test_ecs_privileged_access.py | 90 +++++++++++++++++++++++++++++ tox.ini | 1 + 3 files changed, 137 insertions(+) create mode 100644 tests/mock_ecs.py create mode 100644 tests/test_ecs_privileged_access.py diff --git a/tests/mock_ecs.py b/tests/mock_ecs.py new file mode 100644 index 00000000..21764b72 --- /dev/null +++ b/tests/mock_ecs.py @@ -0,0 +1,46 @@ +import boto3 +import logging + +from moto import mock_ecs +from library.utility import jsonDumps + + +def start(): + """ + Entrypoint for mocking ecs. + :return: nothing + """ + # start ECS mocking with moto + mock = mock_ecs() + mock.start() + + +def create_env_task_definitions(task_definitions, region): + logging.debug(f"======> creating new ECS task definitions from {jsonDumps(task_definitions)}") + ecs_client = boto3.client("ecs", region_name=region) + + test_task_definitions = [] + + for task_definition, rule in task_definitions.items(): + task_definition_arn = ecs_client.register_task_definition( + family=task_definition, + containerDefinitions= rule["containerDefinitions"] + )["taskDefinition"]["taskDefinitionArn"] + task_definition_name = task_definition + test_task_definitions.append(task_definition_name) + + # remove moto precreated task definitions + task_definitions_list_to_check = ecs_client.client.list_task_definition_families() + for task_definition in task_definitions_list_to_check: + + if task_definition not in test_task_definitions: + ecs_client.deregister_task_definition( + taskDefinition=task_definition + ) + + task_definitions = ecs_client.client.list_task_definition_families() + logging.debug(f"{jsonDumps(task_definitions)}") + + # need to return task definitions + return test_task_definitions + diff --git a/tests/test_ecs_privileged_access.py b/tests/test_ecs_privileged_access.py new file mode 100644 index 00000000..f8237807 --- /dev/null +++ b/tests/test_ecs_privileged_access.py @@ -0,0 +1,90 @@ +from . import mock_ecs +from library.aws.ecs import ECSChecker +from library.aws.utility import Account + +region = "us-east-1" + +task_definitions = { + "tas_definition1": { + "family": 'test_ecs_privileged_access1', + "Description": "ECS task enabled privileged access", + "CheckShouldPass": False, + "containerDefinitions": [ + { + 'name': 'hello_world1', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'privileged': True + } + ] + }, + "tas_definition2": { + "family": 'test_ecs_privileged_access2', + "Description": "ECS task disabled privileged access", + "CheckShouldPass": True, + "containerDefinitions": [ + { + 'name': 'hello_world2', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'privileged': False + } + ] + } +} + + +def find_task_definition_name(task_definition_details): + for taskDefinition, props in task_definitions.items(): + if props["Id"] == task_definition_details.name: + return taskDefinition + return None + + +def ident_task_definition_test(task_definition_details): + """ + Used to build identification string for each autogenerated test (for easy recognition of failed tests). + + :param task_definition_details: dict with information about rules from + ECSChecker(...) + :return: identification string with task_definition_name. + """ + + name = find_task_definition_name(task_definition_details) + descr = task_definitions.get(name, {}).get("Description", "default description") + return f"params: {name} ({descr})" + + +def pytest_generate_tests(metafunc): + """ + Entrypoint for tests (built-in pytest function for dynamic generation of test cases). + """ + # Launch ECS mocking and env preparation + mock_ecs.start() + test_task_definitions = mock_ecs.create_env_task_definitions(task_definitions, region) + + account = Account(region=region) + + # validate ebs volumes in mocked env + checker = ECSChecker(account) + checker.check(ids=test_task_definitions) + + # create test cases for each response + metafunc.parametrize("task_definition_details", checker.task_definitions, ids=ident_task_definition_test) + + +def test_task(task_definition_details): + """ + Actual testing function. + + :param task_definition_details: dict with information about rules from + ECSChecker(...) + :return: nothing, raises AssertionError if actual test result is not matched with expected + """ + name = find_task_definition_name(task_definition_details) + expected = task_definitions.get(name, {})["CheckShouldPass"] + assert expected == task_definition_details.is_privileged \ No newline at end of file diff --git a/tox.ini b/tox.ini index 058670e4..673cfa4d 100755 --- a/tox.ini +++ b/tox.ini @@ -23,6 +23,7 @@ python_paths = hammer/identification/lambdas/ebs-unencrypted-volume-identification hammer/identification/lambdas/ebs-public-snapshots-identification hammer/identification/lambdas/sqs-public-policy-identification + hammer/identification/lambdas/ecs-privileged-access-issues-identification hammer [flake8] From 56b935959fc63e9699ae0cf15fa765ce89703dc0 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 1 Jul 2019 15:24:28 +0530 Subject: [PATCH 092/193] Added ECS logging issue test cases. Added ECS logging issue test cases. --- tests/mock_ecs.py | 46 +++++++++++++++++ tests/test_ecs_logging.py | 105 ++++++++++++++++++++++++++++++++++++++ tox.ini | 1 + 3 files changed, 152 insertions(+) create mode 100644 tests/mock_ecs.py create mode 100644 tests/test_ecs_logging.py diff --git a/tests/mock_ecs.py b/tests/mock_ecs.py new file mode 100644 index 00000000..21764b72 --- /dev/null +++ b/tests/mock_ecs.py @@ -0,0 +1,46 @@ +import boto3 +import logging + +from moto import mock_ecs +from library.utility import jsonDumps + + +def start(): + """ + Entrypoint for mocking ecs. + :return: nothing + """ + # start ECS mocking with moto + mock = mock_ecs() + mock.start() + + +def create_env_task_definitions(task_definitions, region): + logging.debug(f"======> creating new ECS task definitions from {jsonDumps(task_definitions)}") + ecs_client = boto3.client("ecs", region_name=region) + + test_task_definitions = [] + + for task_definition, rule in task_definitions.items(): + task_definition_arn = ecs_client.register_task_definition( + family=task_definition, + containerDefinitions= rule["containerDefinitions"] + )["taskDefinition"]["taskDefinitionArn"] + task_definition_name = task_definition + test_task_definitions.append(task_definition_name) + + # remove moto precreated task definitions + task_definitions_list_to_check = ecs_client.client.list_task_definition_families() + for task_definition in task_definitions_list_to_check: + + if task_definition not in test_task_definitions: + ecs_client.deregister_task_definition( + taskDefinition=task_definition + ) + + task_definitions = ecs_client.client.list_task_definition_families() + logging.debug(f"{jsonDumps(task_definitions)}") + + # need to return task definitions + return test_task_definitions + diff --git a/tests/test_ecs_logging.py b/tests/test_ecs_logging.py new file mode 100644 index 00000000..cdb13f0c --- /dev/null +++ b/tests/test_ecs_logging.py @@ -0,0 +1,105 @@ +from . import mock_ecs +from library.aws.ecs import ECSChecker +from library.aws.utility import Account + +region = "us-east-1" + +task_definitions = { + "tas_definition1": { + "family": 'test_ecs_logging1', + "Description": "ECS task definition's logging is enabled.", + "CheckShouldPass": False, + "containerDefinitions": [ + { + 'name': 'hello_world1', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'logConfiguration': {'logDriver': 'json-file'} + }, + { + 'name': 'hello_world2', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + }, + "tas_definition2": { + "family": 'test_ecs_logging2', + "Description": "ECS task definition's logging is not enabled.", + "CheckShouldPass": True, + "containerDefinitions": [ + { + 'name': 'hello_world3', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True + }, + { + 'name': 'hello_world4', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True + } + ] + } + +} + + +def find_task_definition_name(task_definition_details): + for taskDefinition, props in task_definitions.items(): + if props["Id"] == task_definition_details.name: + return taskDefinition + return None + + +def ident_task_definition_test(task_definition_details): + """ + Used to build identification string for each autogenerated test (for easy recognition of failed tests). + + :param task_definition_details: dict with information about rules from + ECSChecker(...) + :return: identification string with task_definition_name. + """ + + name = find_task_definition_name(task_definition_details) + descr = task_definitions.get(name, {}).get("Description", "default description") + return f"params: {name} ({descr})" + + +def pytest_generate_tests(metafunc): + """ + Entrypoint for tests (built-in pytest function for dynamic generation of test cases). + """ + # Launch ECS mocking and env preparation + mock_ecs.start() + test_task_definitions = mock_ecs.create_env_task_definitions(task_definitions, region) + + account = Account(region=region) + + # validate ebs volumes in mocked env + checker = ECSChecker(account) + checker.check(ids=test_task_definitions) + + # create test cases for each response + metafunc.parametrize("task_definition_details", checker.task_definitions, ids=ident_task_definition_test) + + +def test_task(task_definition_details): + """ + Actual testing function. + + :param task_definition_details: dict with information about rules from + ECSChecker(...) + :return: nothing, raises AssertionError if actual test result is not matched with expected + """ + name = find_task_definition_name(task_definition_details) + expected = task_definitions.get(name, {})["CheckShouldPass"] + assert expected == task_definition_details.is_logging diff --git a/tox.ini b/tox.ini index 058670e4..dc5dcdff 100755 --- a/tox.ini +++ b/tox.ini @@ -23,6 +23,7 @@ python_paths = hammer/identification/lambdas/ebs-unencrypted-volume-identification hammer/identification/lambdas/ebs-public-snapshots-identification hammer/identification/lambdas/sqs-public-policy-identification + hammer/identification/lambdas/ecs-logging-issues-identification hammer [flake8] From 28b954ba34e7e3f6732be4d0e9d351dda056a524 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 3 Jul 2019 15:48:42 +0530 Subject: [PATCH 093/193] Updated with testcase changes. Updated with testcase changes. --- tests/mock_ecs.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/mock_ecs.py b/tests/mock_ecs.py index 21764b72..43da8dd2 100644 --- a/tests/mock_ecs.py +++ b/tests/mock_ecs.py @@ -26,6 +26,8 @@ def create_env_task_definitions(task_definitions, region): family=task_definition, containerDefinitions= rule["containerDefinitions"] )["taskDefinition"]["taskDefinitionArn"] + + logging.debug(f"======> newly created task definition {task_definition_arn}") task_definition_name = task_definition test_task_definitions.append(task_definition_name) From e7935668790e9eb8aa1a5829ceb5508bb88a343a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 4 Jul 2019 17:19:34 +0530 Subject: [PATCH 094/193] Updated with Redshift changes. Updated with Redshift changes. --- hammer/library/aws/redshift.py | 144 +++------------------------------ 1 file changed, 11 insertions(+), 133 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index d6120a6a..6a88e389 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -5,17 +5,16 @@ from collections import namedtuple from library.aws.utility import convert_tags - # structure which describes EC2 instance RedshiftCluster_Details = namedtuple('RedshiftCluster_Details', [ # cluster_id 'id', # subnet_group_id 'subnet_group_name' - ]) +]) -class RedshiftClusterOperations(object): +class RedshiftClusterOperations(object): @classmethod @timeit def get_redshift_vpc_security_groups(cls, redshift_client, group_id): @@ -42,23 +41,6 @@ def get_redshift_vpc_security_groups(cls, redshift_client, group_id): return redshift_clusters - @staticmethod - def set_cluster_encryption(redshift_client, cluster_id, kms_master_key_id): - """ - Sets the cluster encryption using Server side encryption. - - :param redshift_client: Redshift boto3 client - :param cluster_id: Redshift cluster name which to encrypt - :param kms_master_key_id: Redshift cluster encryption key. default value is none. - - :return: nothing - """ - - redshift_client.modify_cluster( - ClusterIdentifier=cluster_id, - Encrypted=True - ) - @staticmethod def set_cluster_access(redshift_client, cluster_id, public_access): """ @@ -76,29 +58,13 @@ def set_cluster_access(redshift_client, cluster_id, public_access): PubliclyAccessible=public_access ) - @staticmethod - def enable_logging(redshift_client, cluster_id, s3_bucket): - """ - Enable cluster audit logging. - - :param redshift_client: Redshift boto3 client - :param cluster_id: Redshift cluster name which to make as private - :param s3_bucket: S3 bucket to store audit logs. - - :return: nothing - """ - - redshift_client.enable_logging( - ClusterIdentifier=cluster_id, - BucketName=s3_bucket - ) - class RedshiftCluster(object): """ Basic class for Redshift Cluster. Encapsulates `Owner`/`Tags`. """ + def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_logging=None): """ :param account: `Account` instance where redshift cluster is present @@ -108,25 +74,12 @@ def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_lo :param is_encrypted: encrypted or not. """ self.account = account - self.name =name + self.name = name self.tags = convert_tags(tags) self.is_encrypt = is_encrypted self.is_public = is_public self.is_logging = is_logging - def encrypt_cluster(self, kms_key_id=None): - """ - Encrypt bucket with SSL encryption. - :return: nothing - """ - try: - RedshiftClusterOperations.set_cluster_encryption(self.account.client("redshift"), self.name, kms_key_id) - except Exception: - logging.exception(f"Failed to encrypt {self.name} cluster ") - return False - - return True - def modify_cluster(self, public_access): """ Modify cluster as private. @@ -140,94 +93,17 @@ def modify_cluster(self, public_access): return True - def enable_cluster_logging(self, s3_bucket): - """ - Enable audit logging for cluster. - - @:param s3_bucket: s3 bucket to store audit logs. - :return: nothing - """ - try: - RedshiftClusterOperations.enable_logging(self.account.client("redshift"), self.name, s3_bucket) - except Exception: - logging.exception(f"Failed to enable logging for {self.name} cluster ") - return False - - return True - -class RedshiftEncryptionChecker(object): - """ - Basic class for checking Redshift cluster in account. - Encapsulates discovered Redshift cluster. +class RedshiftClusterChecker(object): """ - def __init__(self, account): - """ - :param account: `Account` instance with Redshift cluster to check - """ - self.account = account - self.clusters = [] - - def get_cluster(self, name): - """ - :return: `Redshift cluster` by name - """ - for cluster in self.clusters: - if cluster.name == name: - return cluster - return None - - def check(self, clusters=None): - """ - Walk through Redshift clusters in the account and check them (encrypted or not). - Put all gathered clusters to `self.clusters`. - - :param clusters: list with Redshift cluster names to check, if it is not supplied - all clusters must be checked - - :return: boolean. True - if check was successful, - False - otherwise - """ - try: - # AWS does not support filtering dirung list, so get all clusters for account - response = self.account.client("redshift").describe_clusters() - except ClientError as err: - if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: - logging.error(f"Access denied in {self.account} " - f"(redshift:{err.operation_name})") - else: - logging.exception(f"Failed to list cluster in {self.account}") - return False - - if "Clusters" in response: - for cluster_details in response["Clusters"]: - tags = {} - cluster_id = cluster_details["ClusterIdentifier"] - - if clusters is not None and cluster_id not in clusters: - continue - - is_encrypted = cluster_details["Encrypted"] - if "Tags" in cluster_details: - tags = cluster_details["Tags"] - - cluster = RedshiftCluster(account=self.account, - name=cluster_id, - tags=tags, - is_encrypted=is_encrypted) - self.clusters.append(cluster) - return True - - -class RedshiftClusterPublicAccessChecker(object): - - """ - Basic class for checking redshift clusters public access in account/region. + Basic class for checking redshift clusters public access and encryption in account/region. Encapsulates check settings and discovered clusters. """ + def __init__(self, account): """ :param account: `Account` clusters to check - + """ self.account = account self.clusters = [] @@ -271,12 +147,14 @@ def check(self, clusters=None): continue is_public = cluster_details["PubliclyAccessible"] + is_encrypted = cluster_details["Encrypted"] if "Tags" in cluster_details: tags = cluster_details["Tags"] cluster = RedshiftCluster(account=self.account, name=cluster_id, tags=tags, + is_encrypted=is_encrypted, is_public=is_public) self.clusters.append(cluster) @@ -329,7 +207,7 @@ def check(self, clusters=None): if "Clusters" in response: for cluster_details in response["Clusters"]: - logging_enabled = True + logging_enabled = False tags = {} cluster_id = cluster_details["ClusterIdentifier"] From 5d7a1f042f8b33af1923bc7e400b118362421b19 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 4 Jul 2019 18:27:37 +0530 Subject: [PATCH 095/193] Updated with ddb details. Updated with ddb details. --- deployment/cf-templates/ddb.json | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 06681176..12d48056 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -464,22 +464,14 @@ "Properties": { "AttributeDefinitions": [ { - "AttributeName": "account_id", - "AttributeType": "S" - }, - { - "AttributeName": "issue_id", + "AttributeName": "request_id", "AttributeType": "S" } ], "KeySchema": [ { - "AttributeName": "account_id", + "AttributeName": "request_id", "KeyType": "HASH" - }, - { - "AttributeName": "issue_id", - "KeyType": "RANGE" } ], "ProvisionedThroughput": { @@ -495,14 +487,22 @@ "Properties": { "AttributeDefinitions": [ { - "AttributeName": "request_id", + "AttributeName": "account_id", + "AttributeType": "S" + }, + { + "AttributeName": "issue_id", "AttributeType": "S" } ], "KeySchema": [ { - "AttributeName": "request_id", + "AttributeName": "account_id", "KeyType": "HASH" + }, + { + "AttributeName": "issue_id", + "KeyType": "RANGE" } ], "ProvisionedThroughput": { From 4dd258a5bd92fa919f3633ef6b73e8b28d47468a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 4 Jul 2019 20:16:54 +0530 Subject: [PATCH 096/193] Updated with remediation changes. Updated with remediation changes. --- .../remediation/clean_elasticsearch_domain_logging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py index aa8cbe78..f1facfc8 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py @@ -81,7 +81,7 @@ def clean_elasticsearch_domain_domain_logging_issues(self, batch=False): domain_details = checker.get_domain(domain_name) if domain_details is None: logging.debug(f"Elasticsearch domain {domain_name} was removed by user") - elif not domain_details.is_logging: + elif domain_details.is_logging: logging.debug(f"Elasticsearch domain {domain_name} logging issue was remediated by user") else: if not batch and \ From 7fa3d401261394770934ed567e49f16d67f1604c Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 4 Jul 2019 21:03:24 +0530 Subject: [PATCH 097/193] Updated with remediation flag changes. Updated with remediation flag changes. --- .../create_elasticsearch_public_access_issue_tickets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py index 6e5cec57..9931f2d8 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -102,7 +102,7 @@ def create_tickets_elasticsearch_public_access(self): issue_description += JiraOperations.build_tags_table(tags) - if self.config.esPublicAccess.remediated: + if self.config.esPublicAccess.remediation: auto_remediation_date = (self.config.now + self.config.esPublicAccess.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" From fab299c75e0d1695dbea3a2a06ccb0a0f146ecc7 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 4 Jul 2019 22:09:07 +0530 Subject: [PATCH 098/193] Updated with ESpublic access ticket chagnes. Updated with ESpublic access ticket chagnes. --- hammer/library/aws/elasticsearch.py | 14 +++++++++++++- ...te_elasticsearch_public_access_issue_tickets.py | 6 +++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index d4e4f5c4..6c1a986c 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -249,7 +249,19 @@ def restrict_policy(self): .. note:: This keeps self._policy unchanged. You need to recheck Elasticsearch domain policy to ensure that it was really restricted. """ - restricted_policy = S3Operations.restrict_policy(self._policy) + restricted_policy = {} + policy_statement = {} + principal = {} + statement = [] + + principal["AWS"] = "*" + policy_statement["Effect"] = "Deny" + policy_statement["Principal"] = principal + policy_statement["Action"] = "es*" + policy_statement["Resource"] = self.arn + "/*" + statement.append(policy_statement) + restricted_policy["Statement"] = statement + try: ElasticSearchOperations.put_domain_policy(self.account.client("es"), self.name, restricted_policy) except Exception: diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py index 9931f2d8..60dd5cf1 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -108,9 +108,9 @@ def create_tickets_elasticsearch_public_access(self): issue_description += ( f"*Recommendation*: " - f"Grant CloudFront OAI applicable permissions on domain " - f"or update domain permissions with VPC CIDRs ranges or ip addresses/ranges from " - f"[RFC1918|https://tools.ietf.org/html/rfc1918]. " + f"Deny public access to domain. Or" + f"Use AWS console which provides preconfigured access policies that can customize for specific" + f" needs of your domain. You also can import access policies from other Amazon ES domains." ) issue_summary = (f"Elasticsearch publicly accessible domain '{domain_name}' " From c6f5a88cbc92cb4cfd98021770d2b5998917a74a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 4 Jul 2019 22:12:09 +0530 Subject: [PATCH 099/193] Updated with ESpublic access ticket chagnes. Updated with ESpublic access ticket chagnes. --- .../create_elasticsearch_public_access_issue_tickets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py index 60dd5cf1..ae29eba5 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -109,8 +109,8 @@ def create_tickets_elasticsearch_public_access(self): issue_description += ( f"*Recommendation*: " f"Deny public access to domain. Or" - f"Use AWS console which provides preconfigured access policies that can customize for specific" - f" needs of your domain. You also can import access policies from other Amazon ES domains." + f"use AWS console which provides preconfigured access policies that can customize for specific" + f" needs of your domain. You also can import access policies from other Amazon ES domains. \n" ) issue_summary = (f"Elasticsearch publicly accessible domain '{domain_name}' " From c8fceeaac1cfa6a4c8b55b48492ec8084baae8ba Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 14:52:40 +0530 Subject: [PATCH 100/193] Updated with Redshift encryption issue changes. Updated with Redshift encryption issue changes and document updates. --- deployment/configs/config.json | 4 +- docs/pages/features.md | 2 +- .../pages/playbook15_redshift_unencryption.md | 44 +++-- docs/pages/remediation_backup_rollback.md | 2 +- hammer/library/aws/redshift.py | 35 +++- .../clean_redshift_cluster_unencrypted.py | 151 ++++++++++++++++++ ...shift_unencrypted_cluster_issue_tickets.py | 13 +- 7 files changed, 225 insertions(+), 26 deletions(-) create mode 100644 hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 3c03367a..f7e213ca 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -166,6 +166,8 @@ "enabled": true, "ddb.table_name": "hammer-redshift-unencrypted", "topic_name": "hammer-describe-redshift-cluster-encryption-lambda", - "reporting": true + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 } } diff --git a/docs/pages/features.md b/docs/pages/features.md index beda1d6c..14350b01 100644 --- a/docs/pages/features.md +++ b/docs/pages/features.md @@ -23,4 +23,4 @@ Dow Jones Hammer can identify and report the following issues: |[RDS Unencrypted instances](playbook12_rds_unencryption.html) |Detects not encrypted at rest RDS instances |Any one of RDS instances is not encrypted at reset | |[Redshift Unencrypted Clusters](playbook15_redshift_unencryption.html) |Detects Redshift unencrypted cluster issues |Any one of Redshift cluster is not encrypted at rest | -Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**, **Redshift Unencrypted Clusters**. \ No newline at end of file +Dow Jones Hammer can perform remediation for all issues [except](remediation_backup_rollback.html#1-overview) **EBS Unencrypted volumes**, **CloudTrail Logging Issues** and **RDS Unencrypted instances**. \ No newline at end of file diff --git a/docs/pages/playbook15_redshift_unencryption.md b/docs/pages/playbook15_redshift_unencryption.md index b29368ad..cacd1e6b 100644 --- a/docs/pages/playbook15_redshift_unencryption.md +++ b/docs/pages/playbook15_redshift_unencryption.md @@ -43,12 +43,30 @@ This Python module implements the issue reporting functionality: hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py ``` +## 3. Issue Remediation -## 3. Setup Instructions For This Issue +### 3.1 Automatic + +To reduce the workload of your DevOps engineers and mitigate the threats stemming from this issue, you can configure automatic remediation of issues. It means that in case Dow Jones Hammer has detected and reported an issue, but the assignee of the report has not remediated the issue within a timeframe specified in the configuration, the Dow Jones Hammer remediation job will encrypt Redshift cluster eliminate this vulnerability. + +This Python module implements the issue remediation functionality: +``` +hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py + +### 3.2 Manual + +To retain full control on the remediation functionality you can disable automatic remediation in [config.json] and launch it manually: +1. Login to Dow Jones Hammer reporting and remediation EC2 via SSH with **centos** user and ssh key you created during [deployment](configuredeploy_overview.html#25-create-ec2-key-pair-for-hammer): `ssh -l centos -i ` +2. Become **root** user: `sudo su -` +3. Change directory to Dow Jones Hammer sources: `cd /hammer-correlation-engine` +4. Launch Dow Jones Hammer remediation script: `python3.6 -m remediation.clean_redshift_cluster_unencrypted` +5. Confirm or refuse remediation of each issue separately + +## 4. Setup Instructions For This Issue To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: -### 3.1. The config.json File +### 4.1. The config.json File The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. To identify and report issues of this type, you should add the following parameters in the **redshift_encryption** section of the **config.json** file: @@ -58,6 +76,9 @@ To identify and report issues of this type, you should add the following paramet |`enabled` |Toggles issue detection for this issue |`true`| |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-redshift-unencrypted` | |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| +|`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| +|`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`0`| + Sample **config.json** section: ``` @@ -70,7 +91,7 @@ Sample **config.json** section: } ``` -### 3.2. The whitelist.json File +### 4.2. The whitelist.json File You can define exceptions to the general automatic remediation settings for specific Redshift clusters. To configure such exceptions, you should edit the **redshift_encryption** section of the **whitelist.json** configuration file as follows: @@ -85,7 +106,7 @@ Sample **whitelist.json** section: } ``` -### 3.3. The ticket_owners.json File +### 4.3. The ticket_owners.json File You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. @@ -120,13 +141,13 @@ Account-specific settings: } ``` -## 4. Logging +## 5. Logging Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. -### 4.1. Issue Identification Logging +### 5.1. Issue Identification Logging Dow Jones Hammer issue identification functionality uses two Lambda functions: @@ -140,22 +161,23 @@ You can see the logs for each of these Lambda functions in the following Log Gro |Initialization |`/aws/lambda/initiate-redshift-encryption`| |Identification |`/aws/lambda/describe-redshift-encryption`| -### 4.2. Issue Reporting Logging + +### 5.2. Issue Reporting/Remediation Logging Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: |Designation|CloudWatch Log Stream Name | |-----------|---------------------------------------------------------| |Reporting |`reporting.create_redshift_unencrypted_cluster_issue_tickets`| +|Remediation|`remediation.clean_redshift_cluster_unencrypted` | - -### 4.3. Slack Reports +### 5.3. Slack Reports In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. -### 4.4. Using CloudWatch Logs for Dow Jones Hammer +### 5.4. Using CloudWatch Logs for Dow Jones Hammer To access Dow Jones Hammer logs, proceed as follows: @@ -167,7 +189,7 @@ To access Dow Jones Hammer logs, proceed as follows: Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. -## 5. Issue specific details in DynamoDB +## 6. Issue specific details in DynamoDB Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. diff --git a/docs/pages/remediation_backup_rollback.md b/docs/pages/remediation_backup_rollback.md index 9f404b93..7a42cbc2 100644 --- a/docs/pages/remediation_backup_rollback.md +++ b/docs/pages/remediation_backup_rollback.md @@ -27,7 +27,7 @@ The following table gives an overview of Dow Jones Hammer remediation functional |[SQS Queue Public Access](playbook10_sqs_public_policy.html#3-issue-remediation) | Yes | Yes | |[S3 Unencrypted Buckets](playbook11_s3_unencryption.html#3-issue-remediation) | Yes | Yes | |[RDS Unencrypted instances](playbook12_rds_unencryption.html#3-issue-remediation) | `No` | `No` | -|[Redshift Unencryption issues](playbook15__unencryption.html#3-issue-remediation) | `No` | `No` | +|[Redshift Unencryption issues](playbook15__unencryption.html#3-issue-remediation) | `Yes` | `No` | ## 2. How Remediation Backup Works diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 15eee4ff..1bb98c6a 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -60,6 +60,21 @@ def set_cluster_access(redshift_client, cluster_id, public_access): PubliclyAccessible=public_access ) + @staticmethod + def cluster_encryption(redshift_client, cluster_id): + """ + + :param redshift_client: redshift client + :param cluster_id: cluster id which need to be encrypted. + + :return: + """ + # Modify cluster as encrypted. + redshift_client.modify_cluster( + ClusterIdentifier=cluster_id, + Encrypted=True + ) + class RedshiftCluster(object): """ @@ -75,13 +90,12 @@ def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_lo :param is_encrypted: encrypted or not. """ self.account = account - self.name =name + self.name = name self.tags = convert_tags(tags) self.is_encrypt = is_encrypted self.is_public = is_public self.is_logging = is_logging - def modify_cluster(self, public_access): """ Modify cluster as private. @@ -95,6 +109,19 @@ def modify_cluster(self, public_access): return True + def encrypt_cluster(self): + """ + Modify cluster as encrypted. + :return: nothing + """ + try: + RedshiftClusterOperations.cluster_encryption(self.account.client("redshift"), self.name) + except Exception: + logging.exception(f"Failed to modify {self.name} cluster encryption ") + return False + + return True + class RedshiftClusterChecker(object): @@ -156,7 +183,7 @@ def check(self, clusters=None): cluster = RedshiftCluster(account=self.account, name=cluster_id, tags=tags, - is_encrypted = is_encrypted, + is_encrypted=is_encrypted, is_public=is_public) self.clusters.append(cluster) @@ -229,4 +256,4 @@ def check(self, clusters=None): is_logging=logging_enabled) self.clusters.append(cluster) - return True \ No newline at end of file + return True diff --git a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py new file mode 100644 index 00000000..7fcc9c79 --- /dev/null +++ b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py @@ -0,0 +1,151 @@ +""" +Class to remediate Redshift cluster un-encryption issues. +""" +import sys +import logging +import argparse + + +from library.logger import set_logging, add_cw_logging +from library.config import Config +from library.jiraoperations import JiraReporting +from library.slack_utility import SlackNotification +from library.ddb_issues import Operations as IssueOperations +from library.ddb_issues import IssueStatus, RedshiftEncryptionIssue +from library.aws.redshift import RedshiftEncryptionChecker +from library.aws.utility import Account +from library.utility import confirm +from library.utility import SingletonInstance, SingletonInstanceException + + +class CleanRedshiftClusterUnencryption: + """ Class to remediate Redshift cluster un-encryption issues """ + def __init__(self, config): + self.config = config + + def cleanredshiftclusterunencryption(self, batch=False): + """ Class method to clean Redshift cluster which are violating aws best practices """ + main_account = Account(region=config.aws.region) + ddb_table = main_account.resource("dynamodb").Table(self.config.redshiftEncrypt.ddb_table_name) + + retention_period = self.config.redshiftEncrypt.remediation_retention_period + + jira = JiraReporting(self.config) + slack = SlackNotification(self.config) + + for account_id, account_name in self.config.aws.accounts.items(): + logging.debug(f"Checking '{account_name} / {account_id}'") + issues = IssueOperations.get_account_open_issues(ddb_table, account_id, RedshiftEncryptionIssue) + for issue in issues: + cluster_id = issue.issue_id + + in_whitelist = self.config.redshiftEncrypt.in_whitelist(account_id, cluster_id) + in_fixlist = True + + if in_whitelist: + logging.debug(f"Skipping {cluster_id} (in whitelist)") + # Adding label with "whitelisted" to jira ticket. + jira.add_label( + ticket_id=issue.jira_details.ticket, + labels=IssueStatus.Whitelisted + ) + continue + if not in_fixlist: + logging.debug(f"Skipping {cluster_id} (not in fixlist)") + continue + + if issue.timestamps.reported is None: + logging.debug(f"Skipping '{cluster_id}' (was not reported)") + continue + + if issue.timestamps.remediated is not None: + logging.debug(f"Skipping {cluster_id} (has been already remediated)") + continue + + updated_date = issue.timestamp_as_datetime + no_of_days_issue_created = (self.config.now - updated_date).days + + if no_of_days_issue_created >= retention_period: + owner = issue.jira_details.owner + bu = issue.jira_details.business_unit + product = issue.jira_details.product + + try: + if not batch and \ + not confirm(f"Do you want to remediate '{cluster_id}' Redshift cluster Un-encryption", False): + continue + + account = Account(id=account_id, + name=account_name, + region=issue.issue_details.region, + role_name=self.config.aws.role_name_reporting) + if account.session is None: + continue + + checker = RedshiftEncryptionChecker(account=account) + checker.check(clusters=[cluster_id]) + cluster_details = checker.get_cluster(cluster_id) + + if cluster_id is None: + logging.debug(f"Redshift Cluster {cluster_details.name} was removed by user") + elif cluster_details.is_encrypt: + logging.debug(f"Cluster {cluster_details.name} Un-encryption issue was remediated by user") + else: + logging.debug(f"Remediating '{cluster_details.name}' Un-encryption") + + remediation_succeed = True + if cluster_details.encrypt_cluster(): + comment = (f"Cluster '{cluster_details.name}' un-encryption issue " + f"in '{account_name} / {account_id}' account , '{issue.issue_details.region}' region" + f"was remediated by hammer") + else: + remediation_succeed = False + comment = (f"Failed to remediate cluster '{cluster_details.name}' un-encryption issue " + f"in '{account_name} / {account_id}' account , '{issue.issue_details.region}' region" + f"due to some limitations. Please, check manually") + + jira.remediate_issue( + ticket_id=issue.jira_details.ticket, + comment=comment, + reassign=remediation_succeed, + ) + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_remediated(ddb_table, issue) + except Exception: + logging.exception(f"Error occurred while updating cluster '{cluster_id}' un-encryption " + f"in '{account_name} / {account_id}'") + else: + logging.debug(f"Skipping '{cluster_id}' " + f"({retention_period - no_of_days_issue_created} days before remediation)") + + +if __name__ == "__main__": + module_name = sys.modules[__name__].__loader__.name + set_logging(level=logging.DEBUG, logfile=f"/var/log/hammer/{module_name}.log") + config = Config() + add_cw_logging(config.local.log_group, + log_stream=module_name, + level=logging.DEBUG, + region=config.aws.region) + + try: + si = SingletonInstance(module_name) + except SingletonInstanceException: + logging.error(f"Another instance of '{module_name}' is already running, quitting") + sys.exit(1) + + parser = argparse.ArgumentParser() + parser.add_argument('--batch', action='store_true', help='Do not ask confirmation for remediation') + args = parser.parse_args() + + try: + class_object = CleanRedshiftClusterUnencryption(config) + class_object.cleanredshiftclusterunencryption(batch=args.batch) + except Exception: + logging.exception("Failed to clean Redshift cluster unencryption") diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py index ea588015..25e212e0 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -100,17 +100,14 @@ def create_tickets_redshift_unencrypted_cluster(self): issue_description += JiraOperations.build_tags_table(tags) + if self.config.redshiftEncrypt.remediation: + auto_remediation_date = (self.config.now + self.config.redshiftEncrypt.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + issue_description += "\n" issue_description += ( f"*Recommendation*: \n" - f"Modify an unencrypted cluster using AWS Key Management Service (AWS KMS) encryption. " - f"Follow below steps to encrypt redshift cluster:\n " - f"1. Sign in to the AWS Management Console and open the Amazon Redshift console.\n" - f"2. In navigation pane, choose Clusters, and then choose cluster that you want to modify.\n" - f"3. Choose Cluster, and then choose Modify.\n" - f"4. Choose KMS to enable encryption for Encrypt database field\n" - f"5. For Master Key, choose Enter a key ARN and enter the ARN in the ARN field.\n" - f"6. Choose Modify.\n\n" + f"Modify an unencrypted cluster.\n\n" ) try: From 47b1a2c35961dabb8bf1423beb5e95c0edcb881f Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 15:00:58 +0530 Subject: [PATCH 101/193] Updated with new redshift changes. Updated with new redshift changes. --- hammer/library/aws/redshift.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 6a88e389..98307ab3 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -58,6 +58,21 @@ def set_cluster_access(redshift_client, cluster_id, public_access): PubliclyAccessible=public_access ) + @staticmethod + def cluster_encryption(redshift_client, cluster_id): + """ + + :param redshift_client: redshift client + :param cluster_id: cluster id which need to be encrypted. + + :return: + """ + # Modify cluster as encrypted. + redshift_client.modify_cluster( + ClusterIdentifier=cluster_id, + Encrypted=True + ) + class RedshiftCluster(object): """ @@ -93,6 +108,19 @@ def modify_cluster(self, public_access): return True + def encrypt_cluster(self): + """ + Modify cluster as encrypted. + :return: nothing + """ + try: + RedshiftClusterOperations.cluster_encryption(self.account.client("redshift"), self.name) + except Exception: + logging.exception(f"Failed to modify {self.name} cluster encryption ") + return False + + return True + class RedshiftClusterChecker(object): """ @@ -227,4 +255,4 @@ def check(self, clusters=None): is_logging=logging_enabled) self.clusters.append(cluster) - return True \ No newline at end of file + return True From f9f5e87ae5eb63d90df24dcb463be981c1c2e069 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 15:13:01 +0530 Subject: [PATCH 102/193] Added redshift logging unit test case changes. Added redshift logging unit test case changes. --- tests/mock_redshift.py | 55 ++++++++++++++++++++++++ tests/test_redshift_logging.py | 76 ++++++++++++++++++++++++++++++++++ tox.ini | 1 + 3 files changed, 132 insertions(+) create mode 100644 tests/mock_redshift.py create mode 100644 tests/test_redshift_logging.py diff --git a/tests/mock_redshift.py b/tests/mock_redshift.py new file mode 100644 index 00000000..29f85c10 --- /dev/null +++ b/tests/mock_redshift.py @@ -0,0 +1,55 @@ +import boto3 +import logging + +from moto import mock_redshift +from library.utility import jsonDumps + + +def start(): + """ + Entrypoint for mocking ecs. + :return: nothing + """ + # start ECS mocking with moto + mock = mock_redshift() + mock.start() + + +def create_env_clusters(clusters, region): + logging.debug(f"======> creating new Redshift clusters from {jsonDumps(clusters)}") + redshift_client = boto3.client("redshift", region_name=region) + + test_clusters = [] + clusters_list = [] + + for cluster, rule in clusters.items(): + cluster_id = redshift_client.create_cluster( + DBName=rule["DBName"], + ClusterIdentifier=rule["ClusterIdentifier"], + ClusterType=rule["ClusterType"], + NodeType=rule["NodeType"], + MasterUsername=rule["MasterUsername"], + MasterUserPassword=rule["MasterUserPassword"], + PubliclyAccessible=rule["PubliclyAccessible"], + Encrypted=rule["Encrypted"] + )["Cluster"]["ClusterIdentifier"] + + test_clusters.append(cluster_id) + + # remove moto precreated clusters + redshift_clusters_list_to_check = redshift_client.describe_clusters() + for cluster in redshift_clusters_list_to_check["Clusters"]: + + if cluster["ClusterIdentifier"] not in test_clusters: + redshift_client.delete_cluster( + ClusterIdentifier=cluster["ClusterIdentifier"], + SkipFinalClusterSnapshot=True + ) + else: + clusters_list.append(cluster["ClusterIdentifier"]) + + logging.debug(f"{jsonDumps(clusters_list)}") + + # need to return task definitions + return test_clusters + diff --git a/tests/test_redshift_logging.py b/tests/test_redshift_logging.py new file mode 100644 index 00000000..77800a08 --- /dev/null +++ b/tests/test_redshift_logging.py @@ -0,0 +1,76 @@ +import pytest + +from . import mock_redshift +from library.aws.redshift import RedshiftLoggingChecker +from library.aws.utility import Account + +region = "us-east-1" + +clusters = { + "cluster1": { + "DBName": "test1", + "ClusterIdentifier": "test1", + "ClusterType": "single-node", + "NodeType": "ds2.xlarge", + "MasterUsername": "user1", + "MasterUserPassword": "testUser1password123", + "Encrypted": True, + "PubliclyAccessible": False, + "CheckShouldPass": True + } +} + + +def find_cluster_name(cluster_details): + for cluster, props in clusters.items(): + if props["ClusterIdentifier"] == cluster_details.name: + return cluster + return None + + +def ident_cluster_test(cluster_details): + """ + Used to build identification string for each autogenerated test (for easy recognition of failed tests). + + :param cluster_details: dict with information about rules from + RedshiftClusterChecker(...) + :return: identification string with cluster name. + """ + + name = find_cluster_name(cluster_details) + descr = clusters.get(name, {}).get("Description", "default description") + return f"params: {name} ({descr})" + + +def pytest_generate_tests(metafunc): + """ + Entrypoint for tests (built-in pytest function for dynamic generation of test cases). + """ + # Launch Redshift mocking and env preparation + mock_redshift.start() + test_clusters = mock_redshift.create_env_clusters(clusters, region) + + account = Account(region=region) + + # validate ebs volumes in mocked env + checker = RedshiftLoggingChecker(account) + checker.check(ids=test_clusters) + + redshift_clusters = [(cluster, False) for cluster in checker.clusters] + + # create test cases for each response + metafunc.parametrize("cluster_details", redshift_clusters, ids=ident_cluster_test) + + +@pytest.mark.redshift_public_access +def test_cluster(cluster_details): + """ + Actual testing function. + + :param cluster_details: dict with information about rules from + RedshiftClusterChecker(...) + :return: nothing, raises AssertionError if actual test result is not matched with expected + """ + name = find_cluster_name(cluster_details) + expected = clusters.get(name, {})["CheckShouldPass"] + assert expected == cluster_details.is_logging diff --git a/tox.ini b/tox.ini index 058670e4..36a06348 100755 --- a/tox.ini +++ b/tox.ini @@ -23,6 +23,7 @@ python_paths = hammer/identification/lambdas/ebs-unencrypted-volume-identification hammer/identification/lambdas/ebs-public-snapshots-identification hammer/identification/lambdas/sqs-public-policy-identification + hammer/identification/lambdas/redshift-audit-logging-issues-identification hammer [flake8] From 34dbb7155361443011450a60754d547658732ebf Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 15:18:21 +0530 Subject: [PATCH 103/193] Added redshift encryption test case changes. Added redshift encryption test case changes. --- hammer/library/aws/redshift.py | 2 +- tests/mock_redshift.py | 55 ++++++++++++++++++ tests/test_redshift_encryption.py | 94 +++++++++++++++++++++++++++++++ tox.ini | 1 + 4 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 tests/mock_redshift.py create mode 100644 tests/test_redshift_encryption.py diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 1bb98c6a..21a735f8 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -236,7 +236,7 @@ def check(self, clusters=None): if "Clusters" in response: for cluster_details in response["Clusters"]: - logging_enabled = True + logging_enabled = False tags = {} cluster_id = cluster_details["ClusterIdentifier"] diff --git a/tests/mock_redshift.py b/tests/mock_redshift.py new file mode 100644 index 00000000..29f85c10 --- /dev/null +++ b/tests/mock_redshift.py @@ -0,0 +1,55 @@ +import boto3 +import logging + +from moto import mock_redshift +from library.utility import jsonDumps + + +def start(): + """ + Entrypoint for mocking ecs. + :return: nothing + """ + # start ECS mocking with moto + mock = mock_redshift() + mock.start() + + +def create_env_clusters(clusters, region): + logging.debug(f"======> creating new Redshift clusters from {jsonDumps(clusters)}") + redshift_client = boto3.client("redshift", region_name=region) + + test_clusters = [] + clusters_list = [] + + for cluster, rule in clusters.items(): + cluster_id = redshift_client.create_cluster( + DBName=rule["DBName"], + ClusterIdentifier=rule["ClusterIdentifier"], + ClusterType=rule["ClusterType"], + NodeType=rule["NodeType"], + MasterUsername=rule["MasterUsername"], + MasterUserPassword=rule["MasterUserPassword"], + PubliclyAccessible=rule["PubliclyAccessible"], + Encrypted=rule["Encrypted"] + )["Cluster"]["ClusterIdentifier"] + + test_clusters.append(cluster_id) + + # remove moto precreated clusters + redshift_clusters_list_to_check = redshift_client.describe_clusters() + for cluster in redshift_clusters_list_to_check["Clusters"]: + + if cluster["ClusterIdentifier"] not in test_clusters: + redshift_client.delete_cluster( + ClusterIdentifier=cluster["ClusterIdentifier"], + SkipFinalClusterSnapshot=True + ) + else: + clusters_list.append(cluster["ClusterIdentifier"]) + + logging.debug(f"{jsonDumps(clusters_list)}") + + # need to return task definitions + return test_clusters + diff --git a/tests/test_redshift_encryption.py b/tests/test_redshift_encryption.py new file mode 100644 index 00000000..6efe67cd --- /dev/null +++ b/tests/test_redshift_encryption.py @@ -0,0 +1,94 @@ +import pytest + +from . import mock_redshift +from library.aws.redshift import RedshiftClusterChecker +from library.aws.utility import Account + +region = "us-east-1" + +clusters = { + "cluster1": { + "DBName": "test1", + "ClusterIdentifier": "test1", + "ClusterType": "single-node", + "NodeType": "ds2.xlarge", + "MasterUsername": "user1", + "MasterUserPassword": "testUser1password123", + "Encrypted": True, + "PubliclyAccessible": False, + "CheckShouldPass": True + }, + "cluster2": { + "DBName": "test2", + "ClusterIdentifier": "test2", + "ClusterType": "single-node", + "NodeType": "ds2.xlarge", + "MasterUsername": "user2", + "MasterUserPassword": "testUser2password123", + "Encrypted": False, + "PubliclyAccessible": False, + "CheckShouldPass": False + } +} + + +def find_cluster_name(cluster_details): + for cluster, props in clusters.items(): + if props["ClusterIdentifier"] == cluster_details.name: + return cluster + return None + + +def ident_cluster_test(cluster_details): + """ + Used to build identification string for each autogenerated test (for easy recognition of failed tests). + + :param cluster_details: dict with information about rules from + RedshiftClusterChecker(...) + :return: identification string with cluster name. + """ + + name = find_cluster_name(cluster_details) + descr = clusters.get(name, {}).get("Description", "default description") + return f"params: {name} ({descr})" + + +def pytest_generate_tests(metafunc): + """ + Entrypoint for tests (built-in pytest function for dynamic generation of test cases). + """ + # Launch Redshift mocking and env preparation + mock_redshift.start() + test_clusters = mock_redshift.create_env_clusters(clusters, region) + + account = Account(region=region) + + # validate ebs volumes in mocked env + checker = RedshiftClusterChecker(account) + checker.check(ids=test_clusters) + + for cluster in checker.clusters: + cluster.encrypt_cluster() + + checker_remediated = RedshiftClusterChecker(account) + checker_remediated.check() + + redshift_clusters = [(cluster, False) for cluster in checker.clusters] + redshift_clusters += [(cluster, True) for cluster in checker_remediated.clusters] + + # create test cases for each response + metafunc.parametrize("cluster_details", redshift_clusters, ids=ident_cluster_test) + + +@pytest.mark.redshift_public_access +def test_cluster(cluster_details): + """ + Actual testing function. + + :param cluster_details: dict with information about rules from + RedshiftClusterChecker(...) + :return: nothing, raises AssertionError if actual test result is not matched with expected + """ + name = find_cluster_name(cluster_details) + expected = clusters.get(name, {})["CheckShouldPass"] + assert expected == cluster_details.is_encrypt diff --git a/tox.ini b/tox.ini index 058670e4..e9f4fc7f 100755 --- a/tox.ini +++ b/tox.ini @@ -23,6 +23,7 @@ python_paths = hammer/identification/lambdas/ebs-unencrypted-volume-identification hammer/identification/lambdas/ebs-public-snapshots-identification hammer/identification/lambdas/sqs-public-policy-identification + hammer/identification/lambdas/redshift-unencrypted-cluster-identification hammer [flake8] From 691933ee3b2d0a3a253b345a4d658511a810c18f Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 15:23:20 +0530 Subject: [PATCH 104/193] Added csv and cronjob changes. Added csv and cronjob changes. --- .../analytics/security_issues_csv_report.py | 5 ++++- .../reporting-remediation/cronjobs/automation_scheduler.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hammer/reporting-remediation/analytics/security_issues_csv_report.py b/hammer/reporting-remediation/analytics/security_issues_csv_report.py index a6c86fae..66086274 100755 --- a/hammer/reporting-remediation/analytics/security_issues_csv_report.py +++ b/hammer/reporting-remediation/analytics/security_issues_csv_report.py @@ -8,7 +8,9 @@ from library.aws.utility import AssumeRole from library.config import Config from library.ddb_issues import Operations as IssueOperations -from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue +from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, \ + IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, \ + RedshiftEncryptionIssue from analytics.add_excel_sheet_records import AddRecordsToSheet from library.slack_utility import SlackNotification from library.aws.s3 import S3Operations @@ -69,6 +71,7 @@ def generate(self): (self.config.ebsSnapshot.ddb_table_name, "EBS Public Snapshots", EBSPublicSnapshotIssue), (self.config.cloudtrails.ddb_table_name, "CloudTrail Logging Issues", CloudTrailIssue), (self.config.rdsSnapshot.ddb_table_name, "RDS Public Snapshots", RdsPublicSnapshotIssue), + (self.config.redshiftEncrypt.ddb_table_name, "Redshift Unencrypted Clusters", RedshiftEncryptionIssue) ] open_security_issues_workbook = xlwt.Workbook() diff --git a/hammer/reporting-remediation/cronjobs/automation_scheduler.py b/hammer/reporting-remediation/cronjobs/automation_scheduler.py index 8afd30f8..c822f71d 100755 --- a/hammer/reporting-remediation/cronjobs/automation_scheduler.py +++ b/hammer/reporting-remediation/cronjobs/automation_scheduler.py @@ -57,6 +57,8 @@ def automation_cronjob(config): ("SQS Public Access", config.sqspolicy, "create_sqs_policy_issue_tickets", "clean_sqs_policy_permissions"), ("S3 Unencrypted Buckets", config.s3Encrypt, "create_s3_unencrypted_bucket_issue_tickets", "clean_s3bucket_unencrypted"), ("RDS Unencrypted Instances", config.rdsEncrypt, "create_rds_unencrypted_instance_issue_tickets", None), + ("Redshift Unencrypted Clusters", config.redshiftEncrypt, "create_redshift_unencrypted_cluster_issue_tickets", + "clean_redshift_cluster_unencrypted"), ] for title, module_config, reporting_script, remediation_script in modules: From 4f057adf30769f211b833bee0ece1148eeb31624 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 15:30:23 +0530 Subject: [PATCH 105/193] Added redshift unit test cases. Added redshift unit test cases. --- hammer/library/aws/redshift.py | 36 ++++++- .../analytics/security_issues_csv_report.py | 5 +- .../cronjobs/automation_scheduler.py | 2 + tests/mock_redshift.py | 55 +++++++++++ tests/test_redshift_public_access.py | 94 +++++++++++++++++++ tox.ini | 1 + 6 files changed, 188 insertions(+), 5 deletions(-) create mode 100644 tests/mock_redshift.py create mode 100644 tests/test_redshift_public_access.py diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 8fba21bb..21a735f8 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -60,6 +60,21 @@ def set_cluster_access(redshift_client, cluster_id, public_access): PubliclyAccessible=public_access ) + @staticmethod + def cluster_encryption(redshift_client, cluster_id): + """ + + :param redshift_client: redshift client + :param cluster_id: cluster id which need to be encrypted. + + :return: + """ + # Modify cluster as encrypted. + redshift_client.modify_cluster( + ClusterIdentifier=cluster_id, + Encrypted=True + ) + class RedshiftCluster(object): """ @@ -75,7 +90,7 @@ def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_lo :param is_encrypted: encrypted or not. """ self.account = account - self.name =name + self.name = name self.tags = convert_tags(tags) self.is_encrypt = is_encrypted self.is_public = is_public @@ -94,6 +109,19 @@ def modify_cluster(self, public_access): return True + def encrypt_cluster(self): + """ + Modify cluster as encrypted. + :return: nothing + """ + try: + RedshiftClusterOperations.cluster_encryption(self.account.client("redshift"), self.name) + except Exception: + logging.exception(f"Failed to modify {self.name} cluster encryption ") + return False + + return True + class RedshiftClusterChecker(object): @@ -155,7 +183,7 @@ def check(self, clusters=None): cluster = RedshiftCluster(account=self.account, name=cluster_id, tags=tags, - is_encrypted = is_encrypted, + is_encrypted=is_encrypted, is_public=is_public) self.clusters.append(cluster) @@ -208,7 +236,7 @@ def check(self, clusters=None): if "Clusters" in response: for cluster_details in response["Clusters"]: - logging_enabled = True + logging_enabled = False tags = {} cluster_id = cluster_details["ClusterIdentifier"] @@ -228,4 +256,4 @@ def check(self, clusters=None): is_logging=logging_enabled) self.clusters.append(cluster) - return True \ No newline at end of file + return True diff --git a/hammer/reporting-remediation/analytics/security_issues_csv_report.py b/hammer/reporting-remediation/analytics/security_issues_csv_report.py index a6c86fae..a100a033 100755 --- a/hammer/reporting-remediation/analytics/security_issues_csv_report.py +++ b/hammer/reporting-remediation/analytics/security_issues_csv_report.py @@ -8,7 +8,9 @@ from library.aws.utility import AssumeRole from library.config import Config from library.ddb_issues import Operations as IssueOperations -from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue +from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, \ + IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, \ + RedshiftPublicAccessIssue from analytics.add_excel_sheet_records import AddRecordsToSheet from library.slack_utility import SlackNotification from library.aws.s3 import S3Operations @@ -69,6 +71,7 @@ def generate(self): (self.config.ebsSnapshot.ddb_table_name, "EBS Public Snapshots", EBSPublicSnapshotIssue), (self.config.cloudtrails.ddb_table_name, "CloudTrail Logging Issues", CloudTrailIssue), (self.config.rdsSnapshot.ddb_table_name, "RDS Public Snapshots", RdsPublicSnapshotIssue), + (self.config.redshift_public_access.ddb_table_name, "Redshift Public Clusters", RedshiftPublicAccessIssue) ] open_security_issues_workbook = xlwt.Workbook() diff --git a/hammer/reporting-remediation/cronjobs/automation_scheduler.py b/hammer/reporting-remediation/cronjobs/automation_scheduler.py index 8afd30f8..e6573048 100755 --- a/hammer/reporting-remediation/cronjobs/automation_scheduler.py +++ b/hammer/reporting-remediation/cronjobs/automation_scheduler.py @@ -57,6 +57,8 @@ def automation_cronjob(config): ("SQS Public Access", config.sqspolicy, "create_sqs_policy_issue_tickets", "clean_sqs_policy_permissions"), ("S3 Unencrypted Buckets", config.s3Encrypt, "create_s3_unencrypted_bucket_issue_tickets", "clean_s3bucket_unencrypted"), ("RDS Unencrypted Instances", config.rdsEncrypt, "create_rds_unencrypted_instance_issue_tickets", None), + ("Redshift Public Clusters", config.redshift_public_access, "create_redshift_public_access_issue_tickets", + "clean_redshift_public_access"), ] for title, module_config, reporting_script, remediation_script in modules: diff --git a/tests/mock_redshift.py b/tests/mock_redshift.py new file mode 100644 index 00000000..29f85c10 --- /dev/null +++ b/tests/mock_redshift.py @@ -0,0 +1,55 @@ +import boto3 +import logging + +from moto import mock_redshift +from library.utility import jsonDumps + + +def start(): + """ + Entrypoint for mocking ecs. + :return: nothing + """ + # start ECS mocking with moto + mock = mock_redshift() + mock.start() + + +def create_env_clusters(clusters, region): + logging.debug(f"======> creating new Redshift clusters from {jsonDumps(clusters)}") + redshift_client = boto3.client("redshift", region_name=region) + + test_clusters = [] + clusters_list = [] + + for cluster, rule in clusters.items(): + cluster_id = redshift_client.create_cluster( + DBName=rule["DBName"], + ClusterIdentifier=rule["ClusterIdentifier"], + ClusterType=rule["ClusterType"], + NodeType=rule["NodeType"], + MasterUsername=rule["MasterUsername"], + MasterUserPassword=rule["MasterUserPassword"], + PubliclyAccessible=rule["PubliclyAccessible"], + Encrypted=rule["Encrypted"] + )["Cluster"]["ClusterIdentifier"] + + test_clusters.append(cluster_id) + + # remove moto precreated clusters + redshift_clusters_list_to_check = redshift_client.describe_clusters() + for cluster in redshift_clusters_list_to_check["Clusters"]: + + if cluster["ClusterIdentifier"] not in test_clusters: + redshift_client.delete_cluster( + ClusterIdentifier=cluster["ClusterIdentifier"], + SkipFinalClusterSnapshot=True + ) + else: + clusters_list.append(cluster["ClusterIdentifier"]) + + logging.debug(f"{jsonDumps(clusters_list)}") + + # need to return task definitions + return test_clusters + diff --git a/tests/test_redshift_public_access.py b/tests/test_redshift_public_access.py new file mode 100644 index 00000000..5e5213bb --- /dev/null +++ b/tests/test_redshift_public_access.py @@ -0,0 +1,94 @@ +import pytest + +from . import mock_redshift +from library.aws.redshift import RedshiftClusterChecker +from library.aws.utility import Account + +region = "us-east-1" + +clusters = { + "cluster1": { + "DBName": "test1", + "ClusterIdentifier": "test1", + "ClusterType": "single-node", + "NodeType": "ds2.xlarge", + "MasterUsername": "user1", + "MasterUserPassword": "testUser1password123", + "PubliclyAccessible": True, + "Encrypted": True, + "CheckShouldPass": True + }, + "cluster2": { + "DBName": "test2", + "ClusterIdentifier": "test2", + "ClusterType": "single-node", + "NodeType": "ds2.xlarge", + "MasterUsername": "user2", + "MasterUserPassword": "testUser2password123", + "PubliclyAccessible": False, + "Encrypted": True, + "CheckShouldPass": False + } +} + + +def find_cluster_name(cluster_details): + for cluster, props in clusters.items(): + if props["ClusterIdentifier"] == cluster_details.name: + return cluster + return None + + +def ident_cluster_test(cluster_details): + """ + Used to build identification string for each autogenerated test (for easy recognition of failed tests). + + :param cluster_details: dict with information about rules from + RedshiftClusterChecker(...) + :return: identification string with cluster name. + """ + + name = find_cluster_name(cluster_details) + descr = clusters.get(name, {}).get("Description", "default description") + return f"params: {name} ({descr})" + + +def pytest_generate_tests(metafunc): + """ + Entrypoint for tests (built-in pytest function for dynamic generation of test cases). + """ + # Launch Redshift mocking and env preparation + mock_redshift.start() + test_clusters = mock_redshift.create_env_clusters(clusters, region) + + account = Account(region=region) + + # validate ebs volumes in mocked env + checker = RedshiftClusterChecker(account) + checker.check(ids=test_clusters) + + for cluster in checker.clusters: + cluster.modify_cluster(True) + + checker_remediated = RedshiftClusterChecker(account) + checker_remediated.check() + + redshift_clusters = [(cluster, False) for cluster in checker.clusters] + redshift_clusters += [(cluster, True) for cluster in checker_remediated.clusters] + + # create test cases for each response + metafunc.parametrize("cluster_details", redshift_clusters, ids=ident_cluster_test) + + +@pytest.mark.redshift_public_access +def test_cluster(cluster_details): + """ + Actual testing function. + + :param cluster_details: dict with information about rules from + RedshiftClusterChecker(...) + :return: nothing, raises AssertionError if actual test result is not matched with expected + """ + name = find_cluster_name(cluster_details) + expected = clusters.get(name, {})["CheckShouldPass"] + assert expected == cluster_details.is_public diff --git a/tox.ini b/tox.ini index 058670e4..518ff639 100755 --- a/tox.ini +++ b/tox.ini @@ -23,6 +23,7 @@ python_paths = hammer/identification/lambdas/ebs-unencrypted-volume-identification hammer/identification/lambdas/ebs-public-snapshots-identification hammer/identification/lambdas/sqs-public-policy-identification + hammer/identification/lambdas/redshift-cluster-public-access-identification hammer [flake8] From 4e331b6e4ba5e8fad5070cc4e9b171fad795874d Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 15:36:37 +0530 Subject: [PATCH 106/193] Added ECS image source unit test cases. Added ECS image source unit test cases. --- .../analytics/security_issues_csv_report.py | 7 ++++++- .../cronjobs/automation_scheduler.py | 1 + tests/mock_ecs.py | 19 +++++++++---------- tests/test_ecs_external_image_source.py | 5 ++++- 4 files changed, 20 insertions(+), 12 deletions(-) diff --git a/hammer/reporting-remediation/analytics/security_issues_csv_report.py b/hammer/reporting-remediation/analytics/security_issues_csv_report.py index a6c86fae..e03f3a19 100755 --- a/hammer/reporting-remediation/analytics/security_issues_csv_report.py +++ b/hammer/reporting-remediation/analytics/security_issues_csv_report.py @@ -8,7 +8,10 @@ from library.aws.utility import AssumeRole from library.config import Config from library.ddb_issues import Operations as IssueOperations -from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue +from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, \ + IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, \ + ECSExternalImageSourceIssue + from analytics.add_excel_sheet_records import AddRecordsToSheet from library.slack_utility import SlackNotification from library.aws.s3 import S3Operations @@ -69,6 +72,8 @@ def generate(self): (self.config.ebsSnapshot.ddb_table_name, "EBS Public Snapshots", EBSPublicSnapshotIssue), (self.config.cloudtrails.ddb_table_name, "CloudTrail Logging Issues", CloudTrailIssue), (self.config.rdsSnapshot.ddb_table_name, "RDS Public Snapshots", RdsPublicSnapshotIssue), + (self.config.ecs_external_image_source.ddb_table_name, "ECS External Image Sources", + ECSExternalImageSourceIssue) ] open_security_issues_workbook = xlwt.Workbook() diff --git a/hammer/reporting-remediation/cronjobs/automation_scheduler.py b/hammer/reporting-remediation/cronjobs/automation_scheduler.py index 8afd30f8..7b5d0468 100755 --- a/hammer/reporting-remediation/cronjobs/automation_scheduler.py +++ b/hammer/reporting-remediation/cronjobs/automation_scheduler.py @@ -57,6 +57,7 @@ def automation_cronjob(config): ("SQS Public Access", config.sqspolicy, "create_sqs_policy_issue_tickets", "clean_sqs_policy_permissions"), ("S3 Unencrypted Buckets", config.s3Encrypt, "create_s3_unencrypted_bucket_issue_tickets", "clean_s3bucket_unencrypted"), ("RDS Unencrypted Instances", config.rdsEncrypt, "create_rds_unencrypted_instance_issue_tickets", None), + ("ECS External Image Sources", config.ecs_external_image_source, "create_ecs_external_image_source_issue_tickets", None) ] for title, module_config, reporting_script, remediation_script in modules: diff --git a/tests/mock_ecs.py b/tests/mock_ecs.py index 21764b72..7dc1b434 100644 --- a/tests/mock_ecs.py +++ b/tests/mock_ecs.py @@ -22,23 +22,22 @@ def create_env_task_definitions(task_definitions, region): test_task_definitions = [] for task_definition, rule in task_definitions.items(): - task_definition_arn = ecs_client.register_task_definition( - family=task_definition, - containerDefinitions= rule["containerDefinitions"] - )["taskDefinition"]["taskDefinitionArn"] - task_definition_name = task_definition - test_task_definitions.append(task_definition_name) + ecs_client.register_task_definition( + family=rule["family"], + containerDefinitions=rule["containerDefinitions"] + ) - # remove moto precreated task definitions - task_definitions_list_to_check = ecs_client.client.list_task_definition_families() - for task_definition in task_definitions_list_to_check: + test_task_definitions.append(rule["family"]) + # remove moto precreated task definitions + task_definitions_list_to_check = ecs_client.list_task_definition_families() + for task_definition in task_definitions_list_to_check["families"]: if task_definition not in test_task_definitions: ecs_client.deregister_task_definition( taskDefinition=task_definition ) - task_definitions = ecs_client.client.list_task_definition_families() + task_definitions = ecs_client.list_task_definition_families()["families"] logging.debug(f"{jsonDumps(task_definitions)}") # need to return task definitions diff --git a/tests/test_ecs_external_image_source.py b/tests/test_ecs_external_image_source.py index f7a90b9e..061f3fc5 100644 --- a/tests/test_ecs_external_image_source.py +++ b/tests/test_ecs_external_image_source.py @@ -1,3 +1,5 @@ +import pytest + from . import mock_ecs from library.aws.ecs import ECSChecker from library.aws.utility import Account @@ -31,7 +33,7 @@ def find_task_definition_name(task_definition_details): for taskDefinition, props in task_definitions.items(): - if props["Id"] == task_definition_details.name: + if props["family"] == task_definition_details.name: return taskDefinition return None @@ -68,6 +70,7 @@ def pytest_generate_tests(metafunc): metafunc.parametrize("task_definition_details", checker.task_definitions, ids=ident_task_definition_test) +@pytest.mark.ecs_external_image_source def test_task(task_definition_details): """ Actual testing function. From 35fb28011f3a051dca1a700678b1206764398838 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 15:43:50 +0530 Subject: [PATCH 107/193] Added ECSPrivileged Access issue unit test cases. Added ECSPrivileged Access issue unit test cases. --- .../analytics/security_issues_csv_report.py | 6 +++++- .../cronjobs/automation_scheduler.py | 1 + tests/mock_ecs.py | 19 +++++++++---------- tests/test_ecs_privileged_access.py | 5 ++++- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/hammer/reporting-remediation/analytics/security_issues_csv_report.py b/hammer/reporting-remediation/analytics/security_issues_csv_report.py index a6c86fae..c0db6a1d 100755 --- a/hammer/reporting-remediation/analytics/security_issues_csv_report.py +++ b/hammer/reporting-remediation/analytics/security_issues_csv_report.py @@ -8,7 +8,10 @@ from library.aws.utility import AssumeRole from library.config import Config from library.ddb_issues import Operations as IssueOperations -from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue +from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, \ + IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, \ + ECSPrivilegedAccessIssue + from analytics.add_excel_sheet_records import AddRecordsToSheet from library.slack_utility import SlackNotification from library.aws.s3 import S3Operations @@ -69,6 +72,7 @@ def generate(self): (self.config.ebsSnapshot.ddb_table_name, "EBS Public Snapshots", EBSPublicSnapshotIssue), (self.config.cloudtrails.ddb_table_name, "CloudTrail Logging Issues", CloudTrailIssue), (self.config.rdsSnapshot.ddb_table_name, "RDS Public Snapshots", RdsPublicSnapshotIssue), + (self.config.ecs_privileged_access.ddb_table_name, "ECS Privileged Access Issues", ECSPrivilegedAccessIssue), ] open_security_issues_workbook = xlwt.Workbook() diff --git a/hammer/reporting-remediation/cronjobs/automation_scheduler.py b/hammer/reporting-remediation/cronjobs/automation_scheduler.py index 8afd30f8..46618cde 100755 --- a/hammer/reporting-remediation/cronjobs/automation_scheduler.py +++ b/hammer/reporting-remediation/cronjobs/automation_scheduler.py @@ -57,6 +57,7 @@ def automation_cronjob(config): ("SQS Public Access", config.sqspolicy, "create_sqs_policy_issue_tickets", "clean_sqs_policy_permissions"), ("S3 Unencrypted Buckets", config.s3Encrypt, "create_s3_unencrypted_bucket_issue_tickets", "clean_s3bucket_unencrypted"), ("RDS Unencrypted Instances", config.rdsEncrypt, "create_rds_unencrypted_instance_issue_tickets", None), + ("ECS Privileged Access Issues", config.ecs_privileged_access, "create_ecs_privileged_access_issue_tickets", None) ] for title, module_config, reporting_script, remediation_script in modules: diff --git a/tests/mock_ecs.py b/tests/mock_ecs.py index 21764b72..7dc1b434 100644 --- a/tests/mock_ecs.py +++ b/tests/mock_ecs.py @@ -22,23 +22,22 @@ def create_env_task_definitions(task_definitions, region): test_task_definitions = [] for task_definition, rule in task_definitions.items(): - task_definition_arn = ecs_client.register_task_definition( - family=task_definition, - containerDefinitions= rule["containerDefinitions"] - )["taskDefinition"]["taskDefinitionArn"] - task_definition_name = task_definition - test_task_definitions.append(task_definition_name) + ecs_client.register_task_definition( + family=rule["family"], + containerDefinitions=rule["containerDefinitions"] + ) - # remove moto precreated task definitions - task_definitions_list_to_check = ecs_client.client.list_task_definition_families() - for task_definition in task_definitions_list_to_check: + test_task_definitions.append(rule["family"]) + # remove moto precreated task definitions + task_definitions_list_to_check = ecs_client.list_task_definition_families() + for task_definition in task_definitions_list_to_check["families"]: if task_definition not in test_task_definitions: ecs_client.deregister_task_definition( taskDefinition=task_definition ) - task_definitions = ecs_client.client.list_task_definition_families() + task_definitions = ecs_client.list_task_definition_families()["families"] logging.debug(f"{jsonDumps(task_definitions)}") # need to return task definitions diff --git a/tests/test_ecs_privileged_access.py b/tests/test_ecs_privileged_access.py index f8237807..0affbe66 100644 --- a/tests/test_ecs_privileged_access.py +++ b/tests/test_ecs_privileged_access.py @@ -1,3 +1,5 @@ +import pytest + from . import mock_ecs from library.aws.ecs import ECSChecker from library.aws.utility import Account @@ -40,7 +42,7 @@ def find_task_definition_name(task_definition_details): for taskDefinition, props in task_definitions.items(): - if props["Id"] == task_definition_details.name: + if props["family"] == task_definition_details.name: return taskDefinition return None @@ -77,6 +79,7 @@ def pytest_generate_tests(metafunc): metafunc.parametrize("task_definition_details", checker.task_definitions, ids=ident_task_definition_test) +@pytest.mark.ecs_privileged_access def test_task(task_definition_details): """ Actual testing function. From f3d46e9cc4111be16284623ada5a18b0114edcfa Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 15:47:17 +0530 Subject: [PATCH 108/193] Added ECSLogging issue unit test cases. Added ECSLogging issue unit test cases. --- .../analytics/security_issues_csv_report.py | 4 +++- .../cronjobs/automation_scheduler.py | 1 + tests/mock_ecs.py | 19 ++++++++----------- tests/test_ecs_logging.py | 5 ++++- 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/hammer/reporting-remediation/analytics/security_issues_csv_report.py b/hammer/reporting-remediation/analytics/security_issues_csv_report.py index a6c86fae..8ccb2603 100755 --- a/hammer/reporting-remediation/analytics/security_issues_csv_report.py +++ b/hammer/reporting-remediation/analytics/security_issues_csv_report.py @@ -8,7 +8,8 @@ from library.aws.utility import AssumeRole from library.config import Config from library.ddb_issues import Operations as IssueOperations -from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue +from library.ddb_issues import SecurityGroupIssue, S3AclIssue, S3PolicyIssue, CloudTrailIssue, IAMKeyRotationIssue, \ + IAMKeyInactiveIssue, RdsPublicSnapshotIssue, EBSUnencryptedVolumeIssue, EBSPublicSnapshotIssue, ECSLoggingIssue from analytics.add_excel_sheet_records import AddRecordsToSheet from library.slack_utility import SlackNotification from library.aws.s3 import S3Operations @@ -69,6 +70,7 @@ def generate(self): (self.config.ebsSnapshot.ddb_table_name, "EBS Public Snapshots", EBSPublicSnapshotIssue), (self.config.cloudtrails.ddb_table_name, "CloudTrail Logging Issues", CloudTrailIssue), (self.config.rdsSnapshot.ddb_table_name, "RDS Public Snapshots", RdsPublicSnapshotIssue), + (self.config.ecs_logging.ddb_table_name, "ECS Logging Issues", ECSLoggingIssue) ] open_security_issues_workbook = xlwt.Workbook() diff --git a/hammer/reporting-remediation/cronjobs/automation_scheduler.py b/hammer/reporting-remediation/cronjobs/automation_scheduler.py index 8afd30f8..94979756 100755 --- a/hammer/reporting-remediation/cronjobs/automation_scheduler.py +++ b/hammer/reporting-remediation/cronjobs/automation_scheduler.py @@ -57,6 +57,7 @@ def automation_cronjob(config): ("SQS Public Access", config.sqspolicy, "create_sqs_policy_issue_tickets", "clean_sqs_policy_permissions"), ("S3 Unencrypted Buckets", config.s3Encrypt, "create_s3_unencrypted_bucket_issue_tickets", "clean_s3bucket_unencrypted"), ("RDS Unencrypted Instances", config.rdsEncrypt, "create_rds_unencrypted_instance_issue_tickets", None), + ("ECS Logging Issues", config.ecs_logging, "create_ecs_logging_issue_tickets", None) ] for title, module_config, reporting_script, remediation_script in modules: diff --git a/tests/mock_ecs.py b/tests/mock_ecs.py index 43da8dd2..7dc1b434 100644 --- a/tests/mock_ecs.py +++ b/tests/mock_ecs.py @@ -22,25 +22,22 @@ def create_env_task_definitions(task_definitions, region): test_task_definitions = [] for task_definition, rule in task_definitions.items(): - task_definition_arn = ecs_client.register_task_definition( - family=task_definition, - containerDefinitions= rule["containerDefinitions"] - )["taskDefinition"]["taskDefinitionArn"] + ecs_client.register_task_definition( + family=rule["family"], + containerDefinitions=rule["containerDefinitions"] + ) - logging.debug(f"======> newly created task definition {task_definition_arn}") - task_definition_name = task_definition - test_task_definitions.append(task_definition_name) + test_task_definitions.append(rule["family"]) # remove moto precreated task definitions - task_definitions_list_to_check = ecs_client.client.list_task_definition_families() - for task_definition in task_definitions_list_to_check: - + task_definitions_list_to_check = ecs_client.list_task_definition_families() + for task_definition in task_definitions_list_to_check["families"]: if task_definition not in test_task_definitions: ecs_client.deregister_task_definition( taskDefinition=task_definition ) - task_definitions = ecs_client.client.list_task_definition_families() + task_definitions = ecs_client.list_task_definition_families()["families"] logging.debug(f"{jsonDumps(task_definitions)}") # need to return task definitions diff --git a/tests/test_ecs_logging.py b/tests/test_ecs_logging.py index cdb13f0c..c1a3d7a1 100644 --- a/tests/test_ecs_logging.py +++ b/tests/test_ecs_logging.py @@ -1,3 +1,5 @@ +import pytest + from . import mock_ecs from library.aws.ecs import ECSChecker from library.aws.utility import Account @@ -55,7 +57,7 @@ def find_task_definition_name(task_definition_details): for taskDefinition, props in task_definitions.items(): - if props["Id"] == task_definition_details.name: + if props["family"] == task_definition_details.name: return taskDefinition return None @@ -92,6 +94,7 @@ def pytest_generate_tests(metafunc): metafunc.parametrize("task_definition_details", checker.task_definitions, ids=ident_task_definition_test) +@pytest.mark.ecs_logging def test_task(task_definition_details): """ Actual testing function. From fcfd61bd5f007d7eeee7455ca98f13b1b425d669 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 16:44:26 +0530 Subject: [PATCH 109/193] Updated with redshfit encryption remediation changes. Updated with redshfit encryption remediation changes. --- .../remediation/clean_redshift_cluster_unencrypted.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py index 7fcc9c79..2001c164 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py @@ -12,7 +12,7 @@ from library.slack_utility import SlackNotification from library.ddb_issues import Operations as IssueOperations from library.ddb_issues import IssueStatus, RedshiftEncryptionIssue -from library.aws.redshift import RedshiftEncryptionChecker +from library.aws.redshift import RedshiftClusterChecker from library.aws.utility import Account from library.utility import confirm from library.utility import SingletonInstance, SingletonInstanceException @@ -82,7 +82,7 @@ def cleanredshiftclusterunencryption(self, batch=False): if account.session is None: continue - checker = RedshiftEncryptionChecker(account=account) + checker = RedshiftClusterChecker(account=account) checker.check(clusters=[cluster_id]) cluster_details = checker.get_cluster(cluster_id) From 30e02849132281d3eba8126248b582eba9344755 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 19:20:58 +0530 Subject: [PATCH 110/193] Added redshift encryption test cases Added redshift encryption test cases --- tests/test_redshift_encryption.py | 37 +++++++++++++++++-------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/tests/test_redshift_encryption.py b/tests/test_redshift_encryption.py index 6efe67cd..df22f4ac 100644 --- a/tests/test_redshift_encryption.py +++ b/tests/test_redshift_encryption.py @@ -32,14 +32,15 @@ } -def find_cluster_name(cluster_details): - for cluster, props in clusters.items(): - if props["ClusterIdentifier"] == cluster_details.name: - return cluster - return None +def find_rule_prop(cluster_details, prop, default): + try: + return clusters[cluster_details.name][prop] + except KeyError: + return default -def ident_cluster_test(cluster_details): + +def ident_cluster_test(arg): """ Used to build identification string for each autogenerated test (for easy recognition of failed tests). @@ -47,10 +48,11 @@ def ident_cluster_test(cluster_details): RedshiftClusterChecker(...) :return: identification string with cluster name. """ - - name = find_cluster_name(cluster_details) - descr = clusters.get(name, {}).get("Description", "default description") - return f"params: {name} ({descr})" + if isinstance(arg, bool): + return "remediated" if arg else "original" + else: + descr = find_rule_prop(arg, "Description", "default description") + return f"params: {arg.name} ({descr})" def pytest_generate_tests(metafunc): @@ -65,7 +67,7 @@ def pytest_generate_tests(metafunc): # validate ebs volumes in mocked env checker = RedshiftClusterChecker(account) - checker.check(ids=test_clusters) + checker.check(clusters=test_clusters) for cluster in checker.clusters: cluster.encrypt_cluster() @@ -77,18 +79,19 @@ def pytest_generate_tests(metafunc): redshift_clusters += [(cluster, True) for cluster in checker_remediated.clusters] # create test cases for each response - metafunc.parametrize("cluster_details", redshift_clusters, ids=ident_cluster_test) + metafunc.parametrize("cluster,remediated", redshift_clusters, ids=ident_cluster_test) @pytest.mark.redshift_public_access -def test_cluster(cluster_details): +def test_cluster(cluster, remediated): """ Actual testing function. - :param cluster_details: dict with information about rules from + :param cluster: dict with information about rules from RedshiftClusterChecker(...) + :param remediated: remediation details :return: nothing, raises AssertionError if actual test result is not matched with expected """ - name = find_cluster_name(cluster_details) - expected = clusters.get(name, {})["CheckShouldPass"] - assert expected == cluster_details.is_encrypt + + expected = True if remediated else find_rule_prop(cluster, "CheckShouldPass", True) + assert expected == cluster.is_encrypt From 6ef89fea696c7fe5c15aa82c73e379eac0f46054 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 19:47:02 +0530 Subject: [PATCH 111/193] Updated with redshift public access test case changes. Updated with redshift public access test case changes. --- tests/mock_redshift.py | 2 +- tests/test_redshift_public_access.py | 47 +++++++++++++++------------- 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/tests/mock_redshift.py b/tests/mock_redshift.py index 29f85c10..39ff3620 100644 --- a/tests/mock_redshift.py +++ b/tests/mock_redshift.py @@ -24,7 +24,7 @@ def create_env_clusters(clusters, region): for cluster, rule in clusters.items(): cluster_id = redshift_client.create_cluster( - DBName=rule["DBName"], + DBName=cluster, ClusterIdentifier=rule["ClusterIdentifier"], ClusterType=rule["ClusterType"], NodeType=rule["NodeType"], diff --git a/tests/test_redshift_public_access.py b/tests/test_redshift_public_access.py index 5e5213bb..5f86a4ab 100644 --- a/tests/test_redshift_public_access.py +++ b/tests/test_redshift_public_access.py @@ -8,7 +8,7 @@ clusters = { "cluster1": { - "DBName": "test1", + "Description": "Cluster has public access", "ClusterIdentifier": "test1", "ClusterType": "single-node", "NodeType": "ds2.xlarge", @@ -16,10 +16,10 @@ "MasterUserPassword": "testUser1password123", "PubliclyAccessible": True, "Encrypted": True, - "CheckShouldPass": True + "CheckShouldPass": False }, "cluster2": { - "DBName": "test2", + "Description": "Cluster has private access", "ClusterIdentifier": "test2", "ClusterType": "single-node", "NodeType": "ds2.xlarge", @@ -27,30 +27,32 @@ "MasterUserPassword": "testUser2password123", "PubliclyAccessible": False, "Encrypted": True, - "CheckShouldPass": False + "CheckShouldPass": True } } -def find_cluster_name(cluster_details): - for cluster, props in clusters.items(): - if props["ClusterIdentifier"] == cluster_details.name: - return cluster - return None +def find_rule_prop(cluster_details, prop, default): + try: + return clusters[cluster_details.name][prop] + except KeyError: + return default -def ident_cluster_test(cluster_details): + +def ident_cluster_test(arg): """ Used to build identification string for each autogenerated test (for easy recognition of failed tests). - :param cluster_details: dict with information about rules from + :param arg: dict with information about rules from RedshiftClusterChecker(...) :return: identification string with cluster name. """ - - name = find_cluster_name(cluster_details) - descr = clusters.get(name, {}).get("Description", "default description") - return f"params: {name} ({descr})" + if isinstance(arg, bool): + return "remediated" if arg else "original" + else: + descr = find_rule_prop(arg, "Description", "default description") + return f"params: {arg.name} ({descr})" def pytest_generate_tests(metafunc): @@ -65,10 +67,11 @@ def pytest_generate_tests(metafunc): # validate ebs volumes in mocked env checker = RedshiftClusterChecker(account) - checker.check(ids=test_clusters) + checker.check(clusters=test_clusters) for cluster in checker.clusters: - cluster.modify_cluster(True) + if cluster.is_public: + cluster.modify_cluster(False) checker_remediated = RedshiftClusterChecker(account) checker_remediated.check() @@ -77,18 +80,18 @@ def pytest_generate_tests(metafunc): redshift_clusters += [(cluster, True) for cluster in checker_remediated.clusters] # create test cases for each response - metafunc.parametrize("cluster_details", redshift_clusters, ids=ident_cluster_test) + metafunc.parametrize("cluster, remediated", redshift_clusters, ids=ident_cluster_test) @pytest.mark.redshift_public_access -def test_cluster(cluster_details): +def test_cluster(cluster, remediated): """ Actual testing function. :param cluster_details: dict with information about rules from RedshiftClusterChecker(...) + :param remediated: remediation details. :return: nothing, raises AssertionError if actual test result is not matched with expected """ - name = find_cluster_name(cluster_details) - expected = clusters.get(name, {})["CheckShouldPass"] - assert expected == cluster_details.is_public + expected = True if remediated else find_rule_prop(cluster, "CheckShouldPass", True) + assert expected == (not cluster.is_public) From 470a9893b42e133610e278d57c83fea6bd8cb0d1 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 19:54:42 +0530 Subject: [PATCH 112/193] Updated with redshift public access testcases Updated with redshift public access testcases --- tests/mock_redshift.py | 4 ++-- tests/test_redshift_public_access.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/mock_redshift.py b/tests/mock_redshift.py index 39ff3620..11aec911 100644 --- a/tests/mock_redshift.py +++ b/tests/mock_redshift.py @@ -24,8 +24,8 @@ def create_env_clusters(clusters, region): for cluster, rule in clusters.items(): cluster_id = redshift_client.create_cluster( - DBName=cluster, - ClusterIdentifier=rule["ClusterIdentifier"], + DBName=rule["DBName"], + ClusterIdentifier=cluster, ClusterType=rule["ClusterType"], NodeType=rule["NodeType"], MasterUsername=rule["MasterUsername"], diff --git a/tests/test_redshift_public_access.py b/tests/test_redshift_public_access.py index 5f86a4ab..48ae7bee 100644 --- a/tests/test_redshift_public_access.py +++ b/tests/test_redshift_public_access.py @@ -9,7 +9,7 @@ clusters = { "cluster1": { "Description": "Cluster has public access", - "ClusterIdentifier": "test1", + "DBName": "test1", "ClusterType": "single-node", "NodeType": "ds2.xlarge", "MasterUsername": "user1", @@ -20,7 +20,7 @@ }, "cluster2": { "Description": "Cluster has private access", - "ClusterIdentifier": "test2", + "DBName": "test2", "ClusterType": "single-node", "NodeType": "ds2.xlarge", "MasterUsername": "user2", @@ -44,7 +44,7 @@ def ident_cluster_test(arg): """ Used to build identification string for each autogenerated test (for easy recognition of failed tests). - :param arg: dict with information about rules from + :param cluster_details: dict with information about rules from RedshiftClusterChecker(...) :return: identification string with cluster name. """ From 1fae6ae5931795a334374270fbe57dd554acdbec Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 20:03:35 +0530 Subject: [PATCH 113/193] Updated with Redshift Encryption test cases. Updated with Redshift Encryption test cases. --- tests/mock_redshift.py | 2 +- tests/test_redshift_encryption.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/mock_redshift.py b/tests/mock_redshift.py index 29f85c10..11aec911 100644 --- a/tests/mock_redshift.py +++ b/tests/mock_redshift.py @@ -25,7 +25,7 @@ def create_env_clusters(clusters, region): for cluster, rule in clusters.items(): cluster_id = redshift_client.create_cluster( DBName=rule["DBName"], - ClusterIdentifier=rule["ClusterIdentifier"], + ClusterIdentifier=cluster, ClusterType=rule["ClusterType"], NodeType=rule["NodeType"], MasterUsername=rule["MasterUsername"], diff --git a/tests/test_redshift_encryption.py b/tests/test_redshift_encryption.py index df22f4ac..adb3908a 100644 --- a/tests/test_redshift_encryption.py +++ b/tests/test_redshift_encryption.py @@ -8,8 +8,8 @@ clusters = { "cluster1": { + "Description": "Encrypted Cluster", "DBName": "test1", - "ClusterIdentifier": "test1", "ClusterType": "single-node", "NodeType": "ds2.xlarge", "MasterUsername": "user1", @@ -19,8 +19,8 @@ "CheckShouldPass": True }, "cluster2": { + "Description": "Unencrypted Cluster", "DBName": "test2", - "ClusterIdentifier": "test2", "ClusterType": "single-node", "NodeType": "ds2.xlarge", "MasterUsername": "user2", @@ -94,4 +94,4 @@ def test_cluster(cluster, remediated): """ expected = True if remediated else find_rule_prop(cluster, "CheckShouldPass", True) - assert expected == cluster.is_encrypt + assert expected == (cluster.is_encrypt) From 9bb27f6e6b415b481388c1db3b69143f98e24908 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 20:30:35 +0530 Subject: [PATCH 114/193] Updated with redshift logging test case changes. Updated with redshift logging test case changes. --- tests/mock_redshift.py | 2 +- tests/test_redshift_logging.py | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/mock_redshift.py b/tests/mock_redshift.py index 29f85c10..11aec911 100644 --- a/tests/mock_redshift.py +++ b/tests/mock_redshift.py @@ -25,7 +25,7 @@ def create_env_clusters(clusters, region): for cluster, rule in clusters.items(): cluster_id = redshift_client.create_cluster( DBName=rule["DBName"], - ClusterIdentifier=rule["ClusterIdentifier"], + ClusterIdentifier=cluster, ClusterType=rule["ClusterType"], NodeType=rule["NodeType"], MasterUsername=rule["MasterUsername"], diff --git a/tests/test_redshift_logging.py b/tests/test_redshift_logging.py index 77800a08..a2660476 100644 --- a/tests/test_redshift_logging.py +++ b/tests/test_redshift_logging.py @@ -8,8 +8,8 @@ clusters = { "cluster1": { + "Description": "Cluster logging disabled", "DBName": "test1", - "ClusterIdentifier": "test1", "ClusterType": "single-node", "NodeType": "ds2.xlarge", "MasterUsername": "user1", @@ -54,12 +54,10 @@ def pytest_generate_tests(metafunc): # validate ebs volumes in mocked env checker = RedshiftLoggingChecker(account) - checker.check(ids=test_clusters) - - redshift_clusters = [(cluster, False) for cluster in checker.clusters] + checker.check(clusters=test_clusters) # create test cases for each response - metafunc.parametrize("cluster_details", redshift_clusters, ids=ident_cluster_test) + metafunc.parametrize("cluster_details", checker.clusters, ids=ident_cluster_test) @pytest.mark.redshift_public_access @@ -73,4 +71,4 @@ def test_cluster(cluster_details): """ name = find_cluster_name(cluster_details) expected = clusters.get(name, {})["CheckShouldPass"] - assert expected == cluster_details.is_logging + assert expected == (not cluster_details.is_logging) From 03977f987bb3f475ed515111a5107d8f128f2669 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 20:56:55 +0530 Subject: [PATCH 115/193] Updated with ECS Logging test case changes. Updated with ECS Logging test case changes. --- hammer/library/aws/ecs.py | 5 ++++- tests/mock_ecs.py | 4 ++-- tests/test_ecs_logging.py | 8 +++----- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 6ff6c7ca..b68438f3 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -106,7 +106,7 @@ def __init__(self, account): self.account = account self.task_definitions = [] - def check(self): + def check(self, task_definitions=None): """ Walk through clusters in the account/region and check them. Put all ECS task definition's container details. @@ -127,6 +127,9 @@ def check(self): if "families" in response: for task_definition_name in response["families"]: + if task_definitions is not None and task_definition_name not in task_definitions: + continue + tags = {} container_image_details = [] disabled_logging_container_names = [] diff --git a/tests/mock_ecs.py b/tests/mock_ecs.py index 7dc1b434..4e60ebf1 100644 --- a/tests/mock_ecs.py +++ b/tests/mock_ecs.py @@ -23,11 +23,11 @@ def create_env_task_definitions(task_definitions, region): for task_definition, rule in task_definitions.items(): ecs_client.register_task_definition( - family=rule["family"], + family=task_definition, containerDefinitions=rule["containerDefinitions"] ) - test_task_definitions.append(rule["family"]) + test_task_definitions.append(task_definition) # remove moto precreated task definitions task_definitions_list_to_check = ecs_client.list_task_definition_families() diff --git a/tests/test_ecs_logging.py b/tests/test_ecs_logging.py index c1a3d7a1..51503603 100644 --- a/tests/test_ecs_logging.py +++ b/tests/test_ecs_logging.py @@ -8,7 +8,6 @@ task_definitions = { "tas_definition1": { - "family": 'test_ecs_logging1', "Description": "ECS task definition's logging is enabled.", "CheckShouldPass": False, "containerDefinitions": [ @@ -31,7 +30,6 @@ ] }, "tas_definition2": { - "family": 'test_ecs_logging2', "Description": "ECS task definition's logging is not enabled.", "CheckShouldPass": True, "containerDefinitions": [ @@ -57,7 +55,7 @@ def find_task_definition_name(task_definition_details): for taskDefinition, props in task_definitions.items(): - if props["family"] == task_definition_details.name: + if taskDefinition == task_definition_details.name: return taskDefinition return None @@ -88,7 +86,7 @@ def pytest_generate_tests(metafunc): # validate ebs volumes in mocked env checker = ECSChecker(account) - checker.check(ids=test_task_definitions) + checker.check(task_definitions=test_task_definitions) # create test cases for each response metafunc.parametrize("task_definition_details", checker.task_definitions, ids=ident_task_definition_test) @@ -105,4 +103,4 @@ def test_task(task_definition_details): """ name = find_task_definition_name(task_definition_details) expected = task_definitions.get(name, {})["CheckShouldPass"] - assert expected == task_definition_details.is_logging + assert expected == (not task_definition_details.is_logging) From 79dfea9eb94f20820631d2f50fbf48c6a58b4a19 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 21:16:14 +0530 Subject: [PATCH 116/193] Updated with ECS privilegeg access unit test changes. Updated with ECS privilegeg access unit test changes. --- hammer/library/aws/ecs.py | 14 +++++++++----- tests/mock_ecs.py | 4 ++-- tests/test_ecs_privileged_access.py | 12 +++++------- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 1361e3ce..b68438f3 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -46,7 +46,7 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): ec2_instance_id = container_instance[0]["ec2InstanceId"] ec2_instance = \ - ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] + ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] if group_id in str(ec2_instance["SecurityGroups"]): ecs_instances.append(ECSCluster_Details( @@ -64,9 +64,10 @@ class ECSTaskDefinitions(object): """ def __init__(self, account, name, arn, tags, is_logging=None, disabled_logging_container_names=None, - is_privileged=None, privileged_container_names=None, external_image=None, container_image_details=None): + is_privileged=None, privileged_container_names=None, external_image=None, + container_image_details=None): """ - + :param account: `Account` instance where ECS task definition is present :param name: name of the task definition :param arn: arn of the task definition @@ -105,7 +106,7 @@ def __init__(self, account): self.account = account self.task_definitions = [] - def check(self): + def check(self, task_definitions=None): """ Walk through clusters in the account/region and check them. Put all ECS task definition's container details. @@ -126,6 +127,9 @@ def check(self): if "families" in response: for task_definition_name in response["families"]: + if task_definitions is not None and task_definition_name not in task_definitions: + continue + tags = {} container_image_details = [] disabled_logging_container_names = [] @@ -143,7 +147,7 @@ def check(self): if container_definition.get('privileged') is not None \ and container_definition['privileged']: - privileged_container_names.append(container_name) + privileged_container_names.append(container_name) image = container_definition.get('image') image_details = {} diff --git a/tests/mock_ecs.py b/tests/mock_ecs.py index 7dc1b434..4e60ebf1 100644 --- a/tests/mock_ecs.py +++ b/tests/mock_ecs.py @@ -23,11 +23,11 @@ def create_env_task_definitions(task_definitions, region): for task_definition, rule in task_definitions.items(): ecs_client.register_task_definition( - family=rule["family"], + family=task_definition, containerDefinitions=rule["containerDefinitions"] ) - test_task_definitions.append(rule["family"]) + test_task_definitions.append(task_definition) # remove moto precreated task definitions task_definitions_list_to_check = ecs_client.list_task_definition_families() diff --git a/tests/test_ecs_privileged_access.py b/tests/test_ecs_privileged_access.py index 0affbe66..b8cb330d 100644 --- a/tests/test_ecs_privileged_access.py +++ b/tests/test_ecs_privileged_access.py @@ -8,9 +8,8 @@ task_definitions = { "tas_definition1": { - "family": 'test_ecs_privileged_access1', "Description": "ECS task enabled privileged access", - "CheckShouldPass": False, + "CheckShouldPass": True, "containerDefinitions": [ { 'name': 'hello_world1', @@ -23,9 +22,8 @@ ] }, "tas_definition2": { - "family": 'test_ecs_privileged_access2', "Description": "ECS task disabled privileged access", - "CheckShouldPass": True, + "CheckShouldPass": False, "containerDefinitions": [ { 'name': 'hello_world2', @@ -42,7 +40,7 @@ def find_task_definition_name(task_definition_details): for taskDefinition, props in task_definitions.items(): - if props["family"] == task_definition_details.name: + if taskDefinition == task_definition_details.name: return taskDefinition return None @@ -73,7 +71,7 @@ def pytest_generate_tests(metafunc): # validate ebs volumes in mocked env checker = ECSChecker(account) - checker.check(ids=test_task_definitions) + checker.check(task_definitions=test_task_definitions) # create test cases for each response metafunc.parametrize("task_definition_details", checker.task_definitions, ids=ident_task_definition_test) @@ -90,4 +88,4 @@ def test_task(task_definition_details): """ name = find_task_definition_name(task_definition_details) expected = task_definitions.get(name, {})["CheckShouldPass"] - assert expected == task_definition_details.is_privileged \ No newline at end of file + assert expected == task_definition_details.is_privileged From 7016630d8fc0c6fe95dd9f8d5dd28135a5756cbe Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 21:21:48 +0530 Subject: [PATCH 117/193] Updated test case conditions. Updated test case conditions. --- tests/test_ecs_privileged_access.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_ecs_privileged_access.py b/tests/test_ecs_privileged_access.py index b8cb330d..0169eff8 100644 --- a/tests/test_ecs_privileged_access.py +++ b/tests/test_ecs_privileged_access.py @@ -9,7 +9,7 @@ task_definitions = { "tas_definition1": { "Description": "ECS task enabled privileged access", - "CheckShouldPass": True, + "CheckShouldPass": False, "containerDefinitions": [ { 'name': 'hello_world1', @@ -23,7 +23,7 @@ }, "tas_definition2": { "Description": "ECS task disabled privileged access", - "CheckShouldPass": False, + "CheckShouldPass": True, "containerDefinitions": [ { 'name': 'hello_world2', @@ -88,4 +88,4 @@ def test_task(task_definition_details): """ name = find_task_definition_name(task_definition_details) expected = task_definitions.get(name, {})["CheckShouldPass"] - assert expected == task_definition_details.is_privileged + assert expected == (not task_definition_details.is_privileged) From a0e5a10ccc6f0c6655cafc2c095bd9e22d777213 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 21:25:08 +0530 Subject: [PATCH 118/193] Updated test case conditions. Updated test case conditions. --- tests/test_ecs_logging.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_ecs_logging.py b/tests/test_ecs_logging.py index 51503603..0f8bfcaa 100644 --- a/tests/test_ecs_logging.py +++ b/tests/test_ecs_logging.py @@ -9,7 +9,7 @@ task_definitions = { "tas_definition1": { "Description": "ECS task definition's logging is enabled.", - "CheckShouldPass": False, + "CheckShouldPass": True, "containerDefinitions": [ { 'name': 'hello_world1', @@ -31,7 +31,7 @@ }, "tas_definition2": { "Description": "ECS task definition's logging is not enabled.", - "CheckShouldPass": True, + "CheckShouldPass": False, "containerDefinitions": [ { 'name': 'hello_world3', @@ -103,4 +103,4 @@ def test_task(task_definition_details): """ name = find_task_definition_name(task_definition_details) expected = task_definitions.get(name, {})["CheckShouldPass"] - assert expected == (not task_definition_details.is_logging) + assert expected == task_definition_details.is_logging From 004635d82ad5d97ae6426494995f6554b2b16351 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 21:32:56 +0530 Subject: [PATCH 119/193] Updated ECS imagesource test case changes. Updated ECS imagesource test case changes. --- hammer/library/aws/ecs.py | 29 +++++++++++++++---------- tests/mock_ecs.py | 4 ++-- tests/test_ecs_external_image_source.py | 8 +++---- 3 files changed, 23 insertions(+), 18 deletions(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 54f5bced..b68438f3 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -1,4 +1,3 @@ -import json import logging from botocore.exceptions import ClientError @@ -47,7 +46,7 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): ec2_instance_id = container_instance[0]["ec2InstanceId"] ec2_instance = \ - ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] + ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] if group_id in str(ec2_instance["SecurityGroups"]): ecs_instances.append(ECSCluster_Details( @@ -65,15 +64,22 @@ class ECSTaskDefinitions(object): """ def __init__(self, account, name, arn, tags, is_logging=None, disabled_logging_container_names=None, - is_privileged=None, privileged_container_names=None, external_image=None, container_image_details=None): + is_privileged=None, privileged_container_names=None, external_image=None, + container_image_details=None): """ - :param account: `Account` instance where ECS task definition is present + :param account: `Account` instance where ECS task definition is present :param name: name of the task definition :param arn: arn of the task definition - :param arn: tags of task definition. - :param is_logging: logging enabled or not. + :param tags: tags of task definition. + :param is_logging: boolean. Task definition's container logging is enabled or not + :param disabled_logging_container_names: List of containers which logging disabled. + :param is_privileged: boolean + :param privileged_container_names: List of containers which privileged access enabled + :param external_image: boolean + :param container_image_details: List of containers which image source is taken from external """ + self.account = account self.name = name self.arn = arn @@ -85,10 +91,6 @@ def __init__(self, account, name, arn, tags, is_logging=None, disabled_logging_c self.external_image = external_image self.container_image_details = container_image_details - def __str__(self): - return f"{self.__class__.__name__}(Name={self.name}, is_logging={self.is_logging}, " \ - f"is_privileged={self.is_privileged}, external_image={self.external_image})" - class ECSChecker(object): """ @@ -104,7 +106,7 @@ def __init__(self, account): self.account = account self.task_definitions = [] - def check(self): + def check(self, task_definitions=None): """ Walk through clusters in the account/region and check them. Put all ECS task definition's container details. @@ -125,6 +127,9 @@ def check(self): if "families" in response: for task_definition_name in response["families"]: + if task_definitions is not None and task_definition_name not in task_definitions: + continue + tags = {} container_image_details = [] disabled_logging_container_names = [] @@ -142,7 +147,7 @@ def check(self): if container_definition.get('privileged') is not None \ and container_definition['privileged']: - privileged_container_names.append(container_name) + privileged_container_names.append(container_name) image = container_definition.get('image') image_details = {} diff --git a/tests/mock_ecs.py b/tests/mock_ecs.py index 7dc1b434..4e60ebf1 100644 --- a/tests/mock_ecs.py +++ b/tests/mock_ecs.py @@ -23,11 +23,11 @@ def create_env_task_definitions(task_definitions, region): for task_definition, rule in task_definitions.items(): ecs_client.register_task_definition( - family=rule["family"], + family=task_definition, containerDefinitions=rule["containerDefinitions"] ) - test_task_definitions.append(rule["family"]) + test_task_definitions.append(task_definition) # remove moto precreated task definitions task_definitions_list_to_check = ecs_client.list_task_definition_families() diff --git a/tests/test_ecs_external_image_source.py b/tests/test_ecs_external_image_source.py index 061f3fc5..c1af0864 100644 --- a/tests/test_ecs_external_image_source.py +++ b/tests/test_ecs_external_image_source.py @@ -10,7 +10,7 @@ "tas_definition": { "family": 'test_ecs_image_source', "Description": "Congainer image taken from external source", - "CheckShouldPass": True, + "CheckShouldPass": False, "containerDefinitions": [ { 'name': 'hello_world1', @@ -33,7 +33,7 @@ def find_task_definition_name(task_definition_details): for taskDefinition, props in task_definitions.items(): - if props["family"] == task_definition_details.name: + if taskDefinition == task_definition_details.name: return taskDefinition return None @@ -64,7 +64,7 @@ def pytest_generate_tests(metafunc): # validate ebs volumes in mocked env checker = ECSChecker(account) - checker.check(ids=test_task_definitions) + checker.check(task_definitions=test_task_definitions) # create test cases for each response metafunc.parametrize("task_definition_details", checker.task_definitions, ids=ident_task_definition_test) @@ -81,4 +81,4 @@ def test_task(task_definition_details): """ name = find_task_definition_name(task_definition_details) expected = task_definitions.get(name, {})["CheckShouldPass"] - assert expected == task_definition_details.external_image \ No newline at end of file + assert expected == (not task_definition_details.external_image) \ No newline at end of file From 595578d4ed96508cf2be9f59d8887a4f42fb5f4f Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 5 Jul 2019 21:39:34 +0530 Subject: [PATCH 120/193] Updated with Redshift modify cluster permissions. Updated with Redshift modify cluster permissions. --- .../cf-templates/reporting-remediation-crossaccount-role.json | 3 ++- deployment/cf-templates/reporting-remediation-role.json | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 9bfa486e..b209ae70 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -152,7 +152,8 @@ "redshift:DisableLogging", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:ModifyCluster", - "redshift:RevokeClusterSecurityGroupIngress" + "redshift:RevokeClusterSecurityGroupIngress", + "ec2:Describe*" ], "Resource": "*" } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index 198e88b1..012d5e9b 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -219,7 +219,8 @@ "redshift:DisableLogging", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:ModifyCluster", - "redshift:RevokeClusterSecurityGroupIngress" + "redshift:RevokeClusterSecurityGroupIngress", + "ec2:Describe*" ], "Resource": "*" }, From b3f837981718c583f7d9f6fcbc9314e6e3ef1055 Mon Sep 17 00:00:00 2001 From: Ashish Kurmi Date: Wed, 10 Jul 2019 14:26:41 -0700 Subject: [PATCH 121/193] 91:AWS libraries should use boto3 Paginators wherever possible --- hammer/library/aws/ebs.py | 5 ++++- hammer/library/aws/iam.py | 5 ++++- hammer/library/aws/rds.py | 7 +++++-- hammer/library/aws/security_groups.py | 5 ++++- 4 files changed, 17 insertions(+), 5 deletions(-) diff --git a/hammer/library/aws/ebs.py b/hammer/library/aws/ebs.py index 1a9c66d8..c33b9c7c 100755 --- a/hammer/library/aws/ebs.py +++ b/hammer/library/aws/ebs.py @@ -106,8 +106,11 @@ def check(self, ids=None, tags=None): {'Name': f"tag:{key}", 'Values': value if isinstance(value, list) else [value]}, ) + volume_details = [] try: - volume_details = self.account.client("ec2").describe_volumes(**args)["Volumes"] + paginator = self.account.client("ec2").get_paginator('describe_volumes') + for page in paginator.paginate(**args): + volume_details.extend(page["Volumes"]) except ClientError as err: if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: logging.error(f"Access denied in {self.account} " diff --git a/hammer/library/aws/iam.py b/hammer/library/aws/iam.py index 9c018c4b..d7960ba9 100755 --- a/hammer/library/aws/iam.py +++ b/hammer/library/aws/iam.py @@ -308,9 +308,12 @@ def check(self, users_to_check=None, last_used_check_enabled=False): :return: boolean. True - if check was successful, False - otherwise """ + users = [] try: # get all users in account - users = self.account.client("iam").list_users()['Users'] + paginator = self.account.client("iam").get_paginator("list_users") + for page in paginator.paginate(): + users.extend(page['Users']) except ClientError as err: if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: logging.error(f"Access denied in {self.account} " diff --git a/hammer/library/aws/rds.py b/hammer/library/aws/rds.py index 7a40c365..0d81a6b3 100755 --- a/hammer/library/aws/rds.py +++ b/hammer/library/aws/rds.py @@ -36,8 +36,11 @@ def get_rds_instance_details_of_sg_associated(cls, rds_client, group_id): rds_instances = [] # this will include both DB and Cluster instances - rds_response = rds_client.describe_db_instances() - for db_instance in rds_response["DBInstances"]: + rds_response = [] + paginator = rds_client.get_paginator('describe_db_instances') + for page in paginator.paginate(**args): + rds_response.extend(page["DBInstances"]) + for db_instance in rds_response: active_security_groups = [ sg["VpcSecurityGroupId"] for sg in db_instance['VpcSecurityGroups'] if sg["Status"] == "active" ] if group_id in active_security_groups: rds_instances.append(RDSInstance( diff --git a/hammer/library/aws/security_groups.py b/hammer/library/aws/security_groups.py index 814d88aa..bd143e51 100755 --- a/hammer/library/aws/security_groups.py +++ b/hammer/library/aws/security_groups.py @@ -529,8 +529,11 @@ def check(self, ids=None, tags=None): args['Filters'].append( {'Name': f"tag:{key}", 'Values': value if isinstance(value, list) else [value]}, ) + secgroups = [] try: - secgroups = self.account.client("ec2").describe_security_groups(**args)["SecurityGroups"] + paginator = self.account.client("ec2").get_paginator('describe_security_groups') + for page in paginator.paginate(**args): + secgroups.extend(page["SecurityGroups"]) except ClientError as err: if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: logging.error(f"Access denied in {self.account} " From 2103b7a09aae8f80aecdd0bbeb839f35b0c6ecc3 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 22 Jul 2019 15:53:18 +0530 Subject: [PATCH 122/193] Updated with review comment changes. Updated with review comment changes. --- .../cf-templates/reporting-remediation-crossaccount-role.json | 1 - deployment/cf-templates/reporting-remediation-role.json | 1 - 2 files changed, 2 deletions(-) diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 9bfa486e..938f8900 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -149,7 +149,6 @@ "redshift:GetClusterCredentials", "redshift:DescribeClusters", "redshift:EnableLogging", - "redshift:DisableLogging", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:ModifyCluster", "redshift:RevokeClusterSecurityGroupIngress" diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index 198e88b1..6953995b 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -216,7 +216,6 @@ "redshift:GetClusterCredentials", "redshift:DescribeClusters", "redshift:EnableLogging", - "redshift:DisableLogging", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:ModifyCluster", "redshift:RevokeClusterSecurityGroupIngress" From 74f60b656632d3d864350afcab65df527dfb7679 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 22 Jul 2019 15:53:34 +0530 Subject: [PATCH 123/193] Updated with review comment changes. Updated with review comment changes. --- .../create_redshift_logging_issue_tickets.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py index aa285e5c..2fed3a09 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py @@ -65,18 +65,6 @@ def create_tickets_redshift_logging(self): bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) - # issue.status != IssueStatus.Closed (should be IssueStatus.Open) - elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {cluster_id}") - slack.report_issue( - msg=f"Redshift cluster logging '{cluster_id}' issue is changed " - f"in '{account_name} / {account_id}' account, '{region}' region" - f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{cluster_id}'") # issue has not been reported yet From eda50ae7b79c142e77e0d9bdac72098581bf2796 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 23 Jul 2019 12:01:21 +0530 Subject: [PATCH 124/193] Updated with review comment changes. Updated with review comment changes. --- ...porting-remediation-crossaccount-role.json | 1 - .../reporting-remediation-role.json | 1 - hammer/library/aws/redshift.py | 81 +++---------------- .../clean_redshift_cluster_unencrypted.py | 16 ++-- ...shift_unencrypted_cluster_issue_tickets.py | 12 --- 5 files changed, 19 insertions(+), 92 deletions(-) diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index b209ae70..fd716877 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -149,7 +149,6 @@ "redshift:GetClusterCredentials", "redshift:DescribeClusters", "redshift:EnableLogging", - "redshift:DisableLogging", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:ModifyCluster", "redshift:RevokeClusterSecurityGroupIngress", diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index 012d5e9b..b0fa584b 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -216,7 +216,6 @@ "redshift:GetClusterCredentials", "redshift:DescribeClusters", "redshift:EnableLogging", - "redshift:DisableLogging", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:ModifyCluster", "redshift:RevokeClusterSecurityGroupIngress", diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 21a735f8..4cf14aec 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -169,6 +169,7 @@ def check(self, clusters=None): if "Clusters" in response: for cluster_details in response["Clusters"]: + logging_enabled = False tags = {} cluster_id = cluster_details["ClusterIdentifier"] @@ -179,80 +180,24 @@ def check(self, clusters=None): is_encrypted = cluster_details["Encrypted"] if "Tags" in cluster_details: tags = cluster_details["Tags"] + try: + logging_details = self.account.client("redshift").describe_logging_status(ClusterIdentifier=cluster_id) + if "LoggingEnabled" in logging_details: + logging_enabled = logging_details["LoggingEnabled"] + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to describe logging status cluster in {self.account}") - cluster = RedshiftCluster(account=self.account, - name=cluster_id, - tags=tags, - is_encrypted=is_encrypted, - is_public=is_public) - self.clusters.append(cluster) - - return True - - -class RedshiftLoggingChecker(object): - """ - Basic class for checking redshift cluster's logging enabled or not in account/region. - Encapsulates check settings and discovered clusters. - """ - - def __init__(self, account): - """ - :param account: `Account` clusters to check - - """ - self.account = account - self.clusters = [] - - def get_cluster(self, name): - """ - :return: `Redshift cluster` by name - """ - for cluster in self.clusters: - if cluster.name == name: - return cluster - return None - - def check(self, clusters=None): - """ - Walk through clusters in the account/region and check them. - Put all gathered clusters to `self.clusters`. - - :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked - - :return: boolean. True - if check was successful, - False - otherwise - """ - try: - # AWS does not support filtering dirung list, so get all clusters for account - response = self.account.client("redshift").describe_clusters() - except ClientError as err: - if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: - logging.error(f"Access denied in {self.account} " - f"(redshift:{err.operation_name})") - else: - logging.exception(f"Failed to list cluster in {self.account}") - return False - - if "Clusters" in response: - for cluster_details in response["Clusters"]: - logging_enabled = False - tags = {} - cluster_id = cluster_details["ClusterIdentifier"] - - if clusters is not None and cluster_id not in clusters: continue - logging_details = self.account.client("redshift").describe_logging_status(ClusterIdentifier=cluster_id) - if "LoggingEnabled" in logging_details: - logging_enabled = logging_details["LoggingEnabled"] - - if "Tags" in cluster_details: - tags = cluster_details["Tags"] - cluster = RedshiftCluster(account=self.account, name=cluster_id, tags=tags, + is_encrypted=is_encrypted, + is_public=is_public, is_logging=logging_enabled) self.clusters.append(cluster) diff --git a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py index 2001c164..99df8cf6 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py @@ -40,19 +40,15 @@ def cleanredshiftclusterunencryption(self, batch=False): cluster_id = issue.issue_id in_whitelist = self.config.redshiftEncrypt.in_whitelist(account_id, cluster_id) - in_fixlist = True if in_whitelist: logging.debug(f"Skipping {cluster_id} (in whitelist)") # Adding label with "whitelisted" to jira ticket. jira.add_label( ticket_id=issue.jira_details.ticket, - labels=IssueStatus.Whitelisted + labels=IssueStatus.Whitelisted.value ) continue - if not in_fixlist: - logging.debug(f"Skipping {cluster_id} (not in fixlist)") - continue if issue.timestamps.reported is None: logging.debug(f"Skipping '{cluster_id}' (was not reported)") @@ -72,7 +68,7 @@ def cleanredshiftclusterunencryption(self, batch=False): try: if not batch and \ - not confirm(f"Do you want to remediate '{cluster_id}' Redshift cluster Un-encryption", False): + not confirm(f"Do you want to remediate '{cluster_id}' Redshift cluster un-encryption", False): continue account = Account(id=account_id, @@ -96,13 +92,13 @@ def cleanredshiftclusterunencryption(self, batch=False): remediation_succeed = True if cluster_details.encrypt_cluster(): comment = (f"Cluster '{cluster_details.name}' un-encryption issue " - f"in '{account_name} / {account_id}' account , '{issue.issue_details.region}' region" - f"was remediated by hammer") + f"in '{account_name} / {account_id}' account, '{issue.issue_details.region}'" + f" region was remediated by hammer") else: remediation_succeed = False comment = (f"Failed to remediate cluster '{cluster_details.name}' un-encryption issue " - f"in '{account_name} / {account_id}' account , '{issue.issue_details.region}' region" - f"due to some limitations. Please, check manually") + f"in '{account_name}/{account_id}' account, '{issue.issue_details.region}'" + f" region due to some limitations. Please, check manually") jira.remediate_issue( ticket_id=issue.jira_details.ticket, diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py index 25e212e0..61ecd3be 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -65,18 +65,6 @@ def create_tickets_redshift_unencrypted_cluster(self): bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) - # issue.status != IssueStatus.Closed (should be IssueStatus.Open) - elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {cluster_id}") - slack.report_issue( - msg=f"Redshift unencrypted cluster '{cluster_id}' issue is changed " - f"in '{account_name} / {account_id}' account, '{region}' region" - f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{cluster_id}'") # issue has not been reported yet From 121b99ce265c75cafe4daec5179780fd181c7bb1 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 23 Jul 2019 12:10:18 +0530 Subject: [PATCH 125/193] Updated with redshift logging issue review comment changes. Updated with redshift logging issue review comment changes. --- .../describe_redshift_logging_issues.py | 4 +- hammer/library/aws/redshift.py | 82 ++++--------------- .../create_redshift_logging_issue_tickets.py | 4 +- 3 files changed, 18 insertions(+), 72 deletions(-) diff --git a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py index bb33f81a..265591c3 100644 --- a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py +++ b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py @@ -3,7 +3,7 @@ from library.logger import set_logging from library.config import Config -from library.aws.redshift import RedshiftLoggingChecker +from library.aws.redshift import RedshiftClusterChecker from library.aws.utility import Account, DDB from library.ddb_issues import IssueStatus, RedshiftLoggingIssue from library.ddb_issues import Operations as IssueOperations @@ -48,7 +48,7 @@ def lambda_handler(event, context): open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} logging.debug(f"Redshift clusters in DDB:\n{open_issues.keys()}") - checker = RedshiftLoggingChecker(account=account) + checker = RedshiftClusterChecker(account=account) if checker.check(): for cluster in checker.clusters: logging.debug(f"Checking {cluster.name}") diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 98307ab3..d6ec377a 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -168,6 +168,7 @@ def check(self, clusters=None): if "Clusters" in response: for cluster_details in response["Clusters"]: + logging_enabled = False tags = {} cluster_id = cluster_details["ClusterIdentifier"] @@ -178,80 +179,25 @@ def check(self, clusters=None): is_encrypted = cluster_details["Encrypted"] if "Tags" in cluster_details: tags = cluster_details["Tags"] + try: + logging_details = self.account.client("redshift").describe_logging_status( + ClusterIdentifier=cluster_id) + if "LoggingEnabled" in logging_details: + logging_enabled = logging_details["LoggingEnabled"] + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to describe logging status cluster in {self.account}") - cluster = RedshiftCluster(account=self.account, - name=cluster_id, - tags=tags, - is_encrypted=is_encrypted, - is_public=is_public) - self.clusters.append(cluster) - - return True - - -class RedshiftLoggingChecker(object): - """ - Basic class for checking redshift cluster's logging enabled or not in account/region. - Encapsulates check settings and discovered clusters. - """ - - def __init__(self, account): - """ - :param account: `Account` clusters to check - - """ - self.account = account - self.clusters = [] - - def get_cluster(self, name): - """ - :return: `Redshift cluster` by name - """ - for cluster in self.clusters: - if cluster.name == name: - return cluster - return None - - def check(self, clusters=None): - """ - Walk through clusters in the account/region and check them. - Put all gathered clusters to `self.clusters`. - - :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked - - :return: boolean. True - if check was successful, - False - otherwise - """ - try: - # AWS does not support filtering dirung list, so get all clusters for account - response = self.account.client("redshift").describe_clusters() - except ClientError as err: - if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: - logging.error(f"Access denied in {self.account} " - f"(redshift:{err.operation_name})") - else: - logging.exception(f"Failed to list cluster in {self.account}") - return False - - if "Clusters" in response: - for cluster_details in response["Clusters"]: - logging_enabled = False - tags = {} - cluster_id = cluster_details["ClusterIdentifier"] - - if clusters is not None and cluster_id not in clusters: continue - logging_details = self.account.client("redshift").describe_logging_status(ClusterIdentifier=cluster_id) - if "LoggingEnabled" in logging_details: - logging_enabled = logging_details["LoggingEnabled"] - - if "Tags" in cluster_details: - tags = cluster_details["Tags"] - cluster = RedshiftCluster(account=self.account, name=cluster_id, tags=tags, + is_encrypted=is_encrypted, + is_public=is_public, is_logging=logging_enabled) self.clusters.append(cluster) diff --git a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py index 2fed3a09..cb412c6d 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py @@ -43,9 +43,9 @@ def create_tickets_redshift_logging(self): product = issue.jira_details.product if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: - logging.debug(f"Closing {issue.status.value} Redshift logging enabled '{cluster_id}' issue") + logging.debug(f"Closing {issue.status.value} Redshift logging '{cluster_id}' issue") - comment = (f"Closing {issue.status.value} Redshift cluster logging enabled '{cluster_id}' issue " + comment = (f"Closing {issue.status.value} Redshift cluster logging '{cluster_id}' issue " f"in '{account_name} / {account_id}' account, '{region}' region") if issue.status == IssueStatus.Whitelisted: # Adding label with "whitelisted" to jira ticket. From 7e02684e036812ce5e671a3cc98771341fee6e72 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 23 Jul 2019 14:24:40 +0530 Subject: [PATCH 126/193] Updated with redshift review comment changes. Updated with redshift review comment changes. --- hammer/library/aws/redshift.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 4cf14aec..9c5c7e73 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -191,8 +191,6 @@ def check(self, clusters=None): else: logging.exception(f"Failed to describe logging status cluster in {self.account}") - continue - cluster = RedshiftCluster(account=self.account, name=cluster_id, tags=tags, From a39b782b55a86db49e5d6005655449cf526fc0d3 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 23 Jul 2019 20:21:29 +0530 Subject: [PATCH 127/193] Updated with review comment changes. Updated with review comment changes. --- hammer/library/aws/redshift.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index d6ec377a..c8539832 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -191,8 +191,6 @@ def check(self, clusters=None): else: logging.exception(f"Failed to describe logging status cluster in {self.account}") - continue - cluster = RedshiftCluster(account=self.account, name=cluster_id, tags=tags, From c4ea08c9190a104b720fa4736b3300ebe3222d11 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 24 Jul 2019 11:18:42 +0530 Subject: [PATCH 128/193] Removed unwanter permissions. Removed unwanter permissions. --- .../cf-templates/reporting-remediation-crossaccount-role.json | 1 - deployment/cf-templates/reporting-remediation-role.json | 1 - 2 files changed, 2 deletions(-) diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index fd716877..ab18435d 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -151,7 +151,6 @@ "redshift:EnableLogging", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:ModifyCluster", - "redshift:RevokeClusterSecurityGroupIngress", "ec2:Describe*" ], "Resource": "*" diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index b0fa584b..7fc0b3eb 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -218,7 +218,6 @@ "redshift:EnableLogging", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:ModifyCluster", - "redshift:RevokeClusterSecurityGroupIngress", "ec2:Describe*" ], "Resource": "*" From 6eb42cba32209466319822253c9446f90300c3e5 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 24 Jul 2019 20:42:16 +0530 Subject: [PATCH 129/193] Updated with review comment changes. Updated with review comment changes. --- ...porting-remediation-crossaccount-role.json | 3 +- .../reporting-remediation-role.json | 3 +- hammer/library/aws/redshift.py | 105 ++++-------------- .../clean_redshift_public_access.py | 6 +- ...te_redshift_public_access_issue_tickets.py | 12 -- 5 files changed, 26 insertions(+), 103 deletions(-) diff --git a/deployment/cf-templates/reporting-remediation-crossaccount-role.json b/deployment/cf-templates/reporting-remediation-crossaccount-role.json index 9bfa486e..ab18435d 100755 --- a/deployment/cf-templates/reporting-remediation-crossaccount-role.json +++ b/deployment/cf-templates/reporting-remediation-crossaccount-role.json @@ -149,10 +149,9 @@ "redshift:GetClusterCredentials", "redshift:DescribeClusters", "redshift:EnableLogging", - "redshift:DisableLogging", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:ModifyCluster", - "redshift:RevokeClusterSecurityGroupIngress" + "ec2:Describe*" ], "Resource": "*" } diff --git a/deployment/cf-templates/reporting-remediation-role.json b/deployment/cf-templates/reporting-remediation-role.json index 198e88b1..7fc0b3eb 100755 --- a/deployment/cf-templates/reporting-remediation-role.json +++ b/deployment/cf-templates/reporting-remediation-role.json @@ -216,10 +216,9 @@ "redshift:GetClusterCredentials", "redshift:DescribeClusters", "redshift:EnableLogging", - "redshift:DisableLogging", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:ModifyCluster", - "redshift:RevokeClusterSecurityGroupIngress" + "ec2:Describe*" ], "Resource": "*" }, diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index 21a735f8..5ee5da03 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -5,18 +5,16 @@ from collections import namedtuple from library.aws.utility import convert_tags - # structure which describes EC2 instance RedshiftCluster_Details = namedtuple('RedshiftCluster_Details', [ # cluster_id 'id', # subnet_group_id 'subnet_group_name' - ]) +]) class RedshiftClusterOperations(object): - @classmethod @timeit def get_redshift_vpc_security_groups(cls, redshift_client, group_id): @@ -44,29 +42,27 @@ def get_redshift_vpc_security_groups(cls, redshift_client, group_id): return redshift_clusters @staticmethod - def set_cluster_access(redshift_client, cluster_id, public_access): + def make_priviate(redshift_client, cluster_id): """ Sets the cluster access as private. :param redshift_client: Redshift boto3 client - :param cluster_id: Redshift cluster name which to make as private - :param public_access: Redshift cluster public access True or False. + :param cluster_id: Redshift cluster name which to make as private. :return: nothing """ redshift_client.modify_cluster( ClusterIdentifier=cluster_id, - PubliclyAccessible=public_access + PubliclyAccessible=False ) @staticmethod def cluster_encryption(redshift_client, cluster_id): """ - :param redshift_client: redshift client :param cluster_id: cluster id which need to be encrypted. - + :return: """ # Modify cluster as encrypted. @@ -81,6 +77,7 @@ class RedshiftCluster(object): Basic class for Redshift Cluster. Encapsulates `Owner`/`Tags`. """ + def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_logging=None): """ :param account: `Account` instance where redshift cluster is present @@ -96,13 +93,13 @@ def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_lo self.is_public = is_public self.is_logging = is_logging - def modify_cluster(self, public_access): + def make_priviate(self): """ Modify cluster as private. :return: nothing """ try: - RedshiftClusterOperations.set_cluster_access(self.account.client("redshift"), self.name, public_access) + RedshiftClusterOperations.make_priviate(self.account.client("redshift"), self.name) except Exception: logging.exception(f"Failed to modify {self.name} cluster ") return False @@ -124,77 +121,10 @@ def encrypt_cluster(self): class RedshiftClusterChecker(object): - """ Basic class for checking redshift clusters public access and encryption in account/region. Encapsulates check settings and discovered clusters. """ - def __init__(self, account): - """ - :param account: `Account` clusters to check - - """ - self.account = account - self.clusters = [] - - def get_cluster(self, name): - """ - :return: `Redshift cluster` by name - """ - for cluster in self.clusters: - if cluster.name == name: - return cluster - return None - - def check(self, clusters=None): - """ - Walk through clusters in the account/region and check them. - Put all gathered clusters to `self.clusters`. - - :param clusters: list with clusters to check, if it is not supplied - all clusters must be checked - - :return: boolean. True - if check was successful, - False - otherwise - """ - try: - # AWS does not support filtering dirung list, so get all clusters for account - response = self.account.client("redshift").describe_clusters() - except ClientError as err: - if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: - logging.error(f"Access denied in {self.account} " - f"(redshift:{err.operation_name})") - else: - logging.exception(f"Failed to list cluster in {self.account}") - return False - - if "Clusters" in response: - for cluster_details in response["Clusters"]: - tags = {} - cluster_id = cluster_details["ClusterIdentifier"] - - if clusters is not None and cluster_id not in clusters: - continue - - is_public = cluster_details["PubliclyAccessible"] - is_encrypted = cluster_details["Encrypted"] - if "Tags" in cluster_details: - tags = cluster_details["Tags"] - - cluster = RedshiftCluster(account=self.account, - name=cluster_id, - tags=tags, - is_encrypted=is_encrypted, - is_public=is_public) - self.clusters.append(cluster) - - return True - - -class RedshiftLoggingChecker(object): - """ - Basic class for checking redshift cluster's logging enabled or not in account/region. - Encapsulates check settings and discovered clusters. - """ def __init__(self, account): """ @@ -243,16 +173,27 @@ def check(self, clusters=None): if clusters is not None and cluster_id not in clusters: continue - logging_details = self.account.client("redshift").describe_logging_status(ClusterIdentifier=cluster_id) - if "LoggingEnabled" in logging_details: - logging_enabled = logging_details["LoggingEnabled"] - + is_public = cluster_details["PubliclyAccessible"] + is_encrypted = cluster_details["Encrypted"] if "Tags" in cluster_details: tags = cluster_details["Tags"] + try: + logging_details = self.account.client("redshift").describe_logging_status( + ClusterIdentifier=cluster_id) + if "LoggingEnabled" in logging_details: + logging_enabled = logging_details["LoggingEnabled"] + except ClientError as err: + if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + logging.error(f"Access denied in {self.account} " + f"(redshift:{err.operation_name})") + else: + logging.exception(f"Failed to describe logging status cluster in {self.account}") cluster = RedshiftCluster(account=self.account, name=cluster_id, tags=tags, + is_encrypted=is_encrypted, + is_public=is_public, is_logging=logging_enabled) self.clusters.append(cluster) diff --git a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py index 167aad0b..00d80914 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py @@ -40,7 +40,6 @@ def clean_redshift_public_access(self, batch=False): cluster_id = issue.issue_id in_whitelist = self.config.redshift_public_access.in_whitelist(account_id, cluster_id) - in_fixlist = True if in_whitelist: logging.debug(f"Skipping {cluster_id} (in whitelist)") @@ -50,9 +49,6 @@ def clean_redshift_public_access(self, batch=False): label=IssueStatus.Whitelisted.value ) continue - if not in_fixlist: - logging.debug(f"Skipping {cluster_id} (not in fixlist)") - continue if issue.timestamps.reported is None: logging.debug(f"Skipping '{cluster_id}' (was not reported)") @@ -94,7 +90,7 @@ def clean_redshift_public_access(self, batch=False): logging.debug(f"Remediating '{cluster_details.name}' public access") remediation_succeed = True - if cluster_details.modify_cluster(False): + if cluster_details.make_priviate(): comment = (f"Cluster '{cluster_details.name}' public access issue " f"in '{account_name} / {account_id}' account, '{issue.issue_details.region}' region " f"was remediated by hammer") diff --git a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py index cab00dcf..d7875154 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py @@ -65,18 +65,6 @@ def create_tickets_redshift_public_access(self): bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) - # issue.status != IssueStatus.Closed (should be IssueStatus.Open) - elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {cluster_id}") - slack.report_issue( - msg=f"Redshift publicly accessible cluster '{cluster_id}' issue is changed " - f"in '{account_name} / {account_id}' account, '{region}' region" - f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{cluster_id}'") # issue has not been reported yet From 98f4db29e1c7bad2aa83757e9fa3fde70cd4e9e9 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 25 Jul 2019 10:43:31 +0530 Subject: [PATCH 130/193] Added remediation permissions. Added remediation permissions. --- .../identification-crossaccount-role.json | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/deployment/cf-templates/identification-crossaccount-role.json b/deployment/cf-templates/identification-crossaccount-role.json index 24ddbf26..8660586a 100755 --- a/deployment/cf-templates/identification-crossaccount-role.json +++ b/deployment/cf-templates/identification-crossaccount-role.json @@ -127,6 +127,18 @@ "sqs:ListQueueTags" ], "Resource": "*" + }, + { + "Sid": "RedshiftIssues", + "Effect": "Allow", + "Action": [ + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeLoggingStatus", + "redshift:GetClusterCredentials", + "redshift:DescribeClusters" + ], + "Resource": "*" } ] } From 54a38ad49010d4c99d4440995b797548e479b90d Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 25 Jul 2019 13:52:22 +0530 Subject: [PATCH 131/193] Fixed deployment issues. Fixed deployment issues. --- deployment/cf-templates/ddb.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 6279535f..c4f4d909 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -510,7 +510,7 @@ "ReadCapacityUnits": "10", "WriteCapacityUnits": "2" }, - "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-unencrypted" ] ]}} + "TableName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "redshift-unencrypted" ] ]} } }, "DynamoDBECSPrivilegedAccess": { From f10145472a86942bd3afc9dfcb11eea7ec506c60 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 25 Jul 2019 17:16:31 +0530 Subject: [PATCH 132/193] Updated function name. Updated function name. --- hammer/library/aws/redshift.py | 4 ++-- .../remediation/clean_redshift_public_access.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index d574fb97..a675237c 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -42,7 +42,7 @@ def get_redshift_vpc_security_groups(cls, redshift_client, group_id): return redshift_clusters @staticmethod - def make_priviate(redshift_client, cluster_id): + def make_private(redshift_client, cluster_id): """ Sets the cluster access as private. @@ -98,7 +98,7 @@ def make_priviate(self): :return: nothing """ try: - RedshiftClusterOperations.make_priviate(self.account.client("redshift"), self.name) + RedshiftClusterOperations.make_private(self.account.client("redshift"), self.name) except Exception: logging.exception(f"Failed to modify {self.name} cluster ") return False diff --git a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py index 00d80914..a67f29be 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py @@ -90,7 +90,7 @@ def clean_redshift_public_access(self, batch=False): logging.debug(f"Remediating '{cluster_details.name}' public access") remediation_succeed = True - if cluster_details.make_priviate(): + if cluster_details.make_private(): comment = (f"Cluster '{cluster_details.name}' public access issue " f"in '{account_name} / {account_id}' account, '{issue.issue_details.region}' region " f"was remediated by hammer") From e42c44bb30b4f9c39355cd9bdd37557856f5fdd2 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 26 Jul 2019 11:01:19 +0530 Subject: [PATCH 133/193] Updated with instance type. Updated with instance type. --- deployment/cf-templates/reporting-remediation.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/cf-templates/reporting-remediation.json b/deployment/cf-templates/reporting-remediation.json index 731b8b02..c66f7d71 100755 --- a/deployment/cf-templates/reporting-remediation.json +++ b/deployment/cf-templates/reporting-remediation.json @@ -9,7 +9,7 @@ }, "InstanceType" : { "Type" : "String", - "Default" : "t2.small", + "Default" : "t2.medium", "AllowedValues" : ["t2.small", "t2.medium", "t2.large", "t2.xlarge", "t2.2xlarge"] }, "Vpcid": { From 7376c25da140b64707acd4a1b144972b10849809 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 26 Jul 2019 11:15:17 +0530 Subject: [PATCH 134/193] Reversed instance type. Reversed instance type. --- deployment/cf-templates/reporting-remediation.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/cf-templates/reporting-remediation.json b/deployment/cf-templates/reporting-remediation.json index c66f7d71..731b8b02 100755 --- a/deployment/cf-templates/reporting-remediation.json +++ b/deployment/cf-templates/reporting-remediation.json @@ -9,7 +9,7 @@ }, "InstanceType" : { "Type" : "String", - "Default" : "t2.medium", + "Default" : "t2.small", "AllowedValues" : ["t2.small", "t2.medium", "t2.large", "t2.xlarge", "t2.2xlarge"] }, "Vpcid": { From ae0f678442926576dd72aa929d5dc6949db10440 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 26 Jul 2019 13:12:47 +0530 Subject: [PATCH 135/193] Added missing variable. Added missing variable. --- .../reporting/create_security_groups_tickets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 9be07649..5e5ba399 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -340,7 +340,7 @@ def create_tickets_securitygroups(self): iam_client = account.client("iam") if account.session is not None else None redshift_client = account.client("redshift") if account.session is not None else None - rds_instance_details = elb_instance_details = None + rds_instance_details = elb_instance_details = sg_redshift_details = None if ec2_client is not None: ec2_instances = EC2Operations.get_instance_details_of_sg_associated(ec2_client, group_id) From 0c927f2d7f449814a39abf1a75db206b01fd07a7 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 29 Jul 2019 15:50:19 +0530 Subject: [PATCH 136/193] Updated with function name. Updated with function name. --- hammer/library/aws/redshift.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/library/aws/redshift.py b/hammer/library/aws/redshift.py index a675237c..9078c6a4 100644 --- a/hammer/library/aws/redshift.py +++ b/hammer/library/aws/redshift.py @@ -92,7 +92,7 @@ def __init__(self, account, name, tags, is_encrypted=None, is_public=None, is_lo self.is_public = is_public self.is_logging = is_logging - def make_priviate(self): + def make_private(self): """ Modify cluster as private. :return: nothing From 97c405629d2a22556ed571653957c96c680a10bd Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 29 Jul 2019 20:49:59 +0530 Subject: [PATCH 137/193] Updated with review comment changes. Updated with review comment changes. --- hammer/library/aws/elasticsearch.py | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index 6c1a986c..83f1aad3 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -150,26 +150,7 @@ def validate_access_policy(cls, policy_details): """ public_policy = False for statement in policy_details.get("Statement", []): - effect = statement['Effect'] - principal = statement.get('Principal', {}) - not_principal = statement.get('NotPrincipal', None) - condition = statement.get('Condition', None) - suffix = "/0" - # check both `Principal` - `{"AWS": "*"}` and `"*"` - # and condition (if exists) to be restricted (not "0.0.0.0/0") - if effect == "Allow" and \ - (principal == "*" or principal.get("AWS") == "*"): - if condition is not None: - if suffix in str(condition.get("IpAddress")): - return True - else: - return True - if effect == "Allow" and \ - not_principal is not None: - # TODO: it is not recommended to use `Allow` with `NotPrincipal`, need to write proper check for such case - # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_notprincipal.html - logging.error(f"TODO: is this statement public???\n{statement}") - return False + public_policy = S3Operations.public_statement(statement) return public_policy From 74ed2b53f5505f9458fe19deb150d59d6102e03a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 29 Jul 2019 21:01:36 +0530 Subject: [PATCH 138/193] Added review comment changes. Added review comment changes. --- .../create_elasticsearch_unencrypted_issue_tickets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index 72debd8c..49ae0d2d 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -116,6 +116,8 @@ def create_tickets_elasticsearch_unencryption(self): f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" f"*Domain ID*: {domain_name}\n" + f" Encryption enabled at rest: {encrypted_at_rest}\n" + f" Encryption enabled in transit: {encrypted_at_transit}\n" ) issue_description += JiraOperations.build_tags_table(tags) @@ -130,8 +132,6 @@ def create_tickets_elasticsearch_unencryption(self): f"5. After creation of new domain, migrate your data to new domain. \n " ) - - try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, From 196170d62840d07a85f16a862fac73898fb5f73d Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 30 Jul 2019 10:05:58 +0530 Subject: [PATCH 139/193] Updated with mapping changes. Updated with mapping changes. Deployment failing due to 100+ mappings. --- deployment/cf-templates/identification.json | 335 ++++++++------------ 1 file changed, 133 insertions(+), 202 deletions(-) diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index b85e771c..6747ba8e 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -272,9 +272,6 @@ }, "Mappings": { "NamingStandards": { - "IdentificationMetricsNamespace": { - "value": "HammerIdentification" - }, "SNSTopicNameIdentificationErrors": { "value": "identification-errors" }, @@ -482,138 +479,72 @@ "BackupDDBLambdaFunctionName": { "value": "backup-ddb" }, - "InitiateSecurityGroupLambdaFunctionName": { - "value": "initiate-security-groups" - }, - "IdentifySecurityGroupLambdaFunctionName": { - "value": "describe-security-groups" - }, - "InitiateCloudTrailsLambdaFunctionName": { - "value": "initiate-cloudtrails" - }, - "IdentifyCloudTrailsLambdaFunctionName": { - "value": "describe-cloudtrails" - }, - "InitiateS3ACLLambdaFunctionName": { - "value": "initiate-s3-acl" - }, - "IdentifyS3ACLLambdaFunctionName": { - "value": "describe-s3-acl" - }, - "InitiateS3PolicyLambdaFunctionName": { - "value": "initiate-s3-policy" - }, - "IdentifyS3PolicyLambdaFunctionName": { - "value": "describe-s3-policy" - }, - "InitiateIAMUserKeysRotationLambdaFunctionName": { - "value": "initiate-iam-user-keys-rotation" - }, - "IdentifyIAMUserKeysRotationLambdaFunctionName": { - "value": "describe-iam-user-keys-rotation" - }, - "InitiateIAMUserInactiveKeysLambdaFunctionName": { - "value": "initiate-iam-user-inactive-keys" - }, - "IdentifyIAMUserInactiveKeysLambdaFunctionName": { - "value": "describe-iam-user-inactive-keys" + "SecurityGroupLambdaFunctionName": { + "value": "security-groups" }, - "InitiateEBSVolumesLambdaFunctionName": { - "value": "initiate-ebs-unencrypted-volumes" + "CloudTrailsLambdaFunctionName": { + "value": "cloudtrails" }, - "IdentifyEBSVolumesLambdaFunctionName": { - "value": "describe-ebs-unencrypted-volumes" + "S3ACLLambdaFunctionName": { + "value": "s3-acl" }, - "InitiateEBSSnapshotsLambdaFunctionName": { - "value": "initiate-ebs-public-snapshots" + "S3PolicyLambdaFunctionName": { + "value": "s3-policy" }, - "IdentifyEBSSnapshotsLambdaFunctionName": { - "value": "describe-ebs-public-snapshots" + "IAMUserKeysRotationLambdaFunctionName": { + "value": "iam-user-keys-rotation" }, - "InitiateRDSSnapshotsLambdaFunctionName": { - "value": "initiate-rds-public-snapshots" + "IAMUserInactiveKeysLambdaFunctionName": { + "value": "iam-user-inactive-keys" }, - "IdentifyRDSSnapshotsLambdaFunctionName": { - "value": "describe-rds-public-snapshots" + "EBSVolumesLambdaFunctionName": { + "value": "ebs-unencrypted-volumes" }, - "InitiateAMIPublicAccessLambdaFunctionName": { - "value": "initiate-ami-public-access" + "EBSSnapshotsLambdaFunctionName": { + "value": "ebs-public-snapshots" }, - "IdentifyAMIPublicAccessLambdaFunctionName": { - "value": "describe-ami-public-access" + "RDSSnapshotsLambdaFunctionName": { + "value": "rds-public-snapshots" }, - "InitiateSQSPublicPolicyLambdaFunctionName": { - "value": "initiate-sqs-public-policy" + "AMIPublicAccessLambdaFunctionName": { + "value": "ami-public-access" }, - "IdentifySQSPublicPolicyLambdaFunctionName": { - "value": "describe-sqs-public-policy" + "SQSPublicPolicyLambdaFunctionName": { + "value": "sqs-public-policy" }, - "InitiateS3EncryptionLambdaFunctionName": { - "value": "initiate-s3-encryption" + "S3EncryptionLambdaFunctionName": { + "value": "s3-encryption" }, - "IdentifyS3EncryptionLambdaFunctionName": { - "value": "describe-s3-encryption" + "RDSEncryptionLambdaFunctionName": { + "value": "rds-encryption" }, - "InitiateRDSEncryptionLambdaFunctionName": { - "value": "initiate-rds-encryption" + "RedshiftPublicAccessLambdaFunctionName": { + "value": "redshift-public-access" }, - "IdentifyRDSEncryptionLambdaFunctionName": { - "value": "describe-rds-encryption" + "RedshiftClusterEncryptionLambdaFunctionName": { + "value": "redshift-cluster-encryption" }, - "InitiateRedshiftPublicAccessLambdaFunctionName": { - "value": "initiate-redshift-public-access" + "RedshiftLoggingLambdaFunctionName": { + "value": "redshift-logging" }, - "IdentifyRedshiftPublicAccessLambdaFunctionName": { - "value": "describe-redshift-public-access" + "ECSPrivilegedAccessLambdaFunctionName": { + "value": "ecs-privileged-access" }, - "InitiateRedshiftClusterEncryptionLambdaFunctionName": { - "value": "initiate-redshift-cluster-encryption" + "ECSLoggingLambdaFunctionName": { + "value": "ecs-logging" }, - "IdentifyRedshiftClusterEncryptionLambdaFunctionName": { - "value": "describe-redshift-cluster-encryption" + "ECSExternalImageSourceLambdaFunctionName": { + "value": "ecs-external-image-source" }, - "InitiateRedshiftLoggingLambdaFunctionName": { - "value": "initiate-redshift-logging" + "ESLoggingLambdaFunctionName": { + "value": "elasticsearch-logging" }, - "IdentifyRedshiftLoggingLambdaFunctionName": { - "value": "describe-redshift-logging" + "ESEncryptionLambdaFunctionName": { + "value": "elasticsearch-encryption" }, - "InitiateECSPrivilegedAccessLambdaFunctionName": { - "value": "initiate-ecs-privileged-access" - }, - "IdentifyECSPrivilegedAccessLambdaFunctionName": { - "value": "describe-ecs-privileged-access" - }, - "InitiateECSLoggingLambdaFunctionName": { - "value": "initiate-ecs-logging" - }, - "IdentifyECSLoggingLambdaFunctionName": { - "value": "describe-ecs-logging" - }, - "InitiateECSExternalImageSourceLambdaFunctionName": { - "value": "initiate-ecs-external-image-source" - }, - "IdentifyECSExternalImageSourceLambdaFunctionName": { - "value": "describe-ecs-external-image-source" - }, - "InitiateESLoggingLambdaFunctionName": { - "value": "initiate-elasticsearch-logging" - }, - "IdentifyESLoggingLambdaFunctionName": { - "value": "describe-elasticsearch-logging" - }, - "InitiateESEncryptionLambdaFunctionName": { - "value": "initiate-elasticsearch-encryption" - }, - "IdentifyESEncryptionLambdaFunctionName": { - "value": "describe-elasticsearch-encryption" - }, - "InitiateESPublicAccessLambdaFunctionName": { - "value": "initiate-elasticsearch-public-access" - }, - "IdentifyESPublicAccessLambdaFunctionName": { - "value": "describe-elasticsearch-public-access" - } + "ESPublicAccessLambdaFunctionName": { + "value": "elasticsearch-public-access" + } } }, "Resources": { @@ -804,11 +735,11 @@ "IdentificationLambdaSource": {"Ref": "SourceIdentificationSG"}, "InitiateLambdaDescription": "Lambda function for initiate to identify bad security groups", "EvaluateLambdaDescription": "Lambda function to describe security groups unrestricted access.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSecurityGroupLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "SecurityGroupLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySecurityGroupLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "SecurityGroupLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_sec_grps.lambda_handler", "EvaluateLambdaHandler": "describe_sec_grps_unrestricted_access.lambda_handler", @@ -844,11 +775,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationCloudTrails" }, "InitiateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", "EvaluateLambdaDescription": "Lambda function for initiate identification of CloudTrail issues", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateCloudTrailsLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "CloudTrailsLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyCloudTrailsLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "CloudTrailsLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_cloudtrails.lambda_handler", "EvaluateLambdaHandler": "describe_cloudtrails.lambda_handler", @@ -884,11 +815,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3ACL" }, "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3ACLLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "S3ACLLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3ACLLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "S3ACLLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_acl.lambda_handler", "EvaluateLambdaHandler": "describe_s3_bucket_acl.lambda_handler", @@ -924,11 +855,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Policy" }, "InitiateLambdaDescription": "Lambda function for initiate to identify public s3 buckets.", "EvaluateLambdaDescription": "Lambda function to describe public s3 buckets.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3PolicyLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "S3PolicyLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3PolicyLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "S3PolicyLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_s3_bucket_policy.lambda_handler", "EvaluateLambdaHandler": "describe_s3_bucket_policy.lambda_handler", @@ -964,11 +895,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserKeysRotation" }, "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which to be rotate.", "EvaluateLambdaDescription": "Lambda function to describe IAM user keys to be rotated.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserKeysRotationLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "IAMUserKeysRotationLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserKeysRotationLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "IAMUserKeysRotationLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_iam_users_key_rotation.lambda_handler", "EvaluateLambdaHandler": "describe_iam_key_rotation.lambda_handler", @@ -1004,11 +935,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserInactiveKeys" }, "InitiateLambdaDescription": "Lambda function for initiate to identify IAM user keys which last used.", "EvaluateLambdaDescription": "Lambda function to describe IAM user keys last used.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "IAMUserInactiveKeysLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyIAMUserInactiveKeysLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "IAMUserInactiveKeysLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_iam_access_keys.lambda_handler", "EvaluateLambdaHandler": "describe_iam_accesskey_details.lambda_handler", @@ -1044,11 +975,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSVolumes" }, "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted EBS volumes.", "EvaluateLambdaDescription": "Lambda function to describe unencrypted ebs volumes.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSVolumesLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "EBSVolumesLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSVolumesLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "EBSVolumesLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_ebs_unencrypted_volumes.lambda_handler", "EvaluateLambdaHandler": "describe_ebs_unencrypted_volumes.lambda_handler", @@ -1084,11 +1015,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSSnapshots" }, "InitiateLambdaDescription": "Lambda function for initiate to identify public EBS snapshots.", "EvaluateLambdaDescription": "Lambda function to describe public ebs snapshots.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateEBSSnapshotsLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "EBSSnapshotsLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyEBSSnapshotsLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "EBSSnapshotsLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_ebs_public_snapshots.lambda_handler", "EvaluateLambdaHandler": "describe_ebs_public_snapshots.lambda_handler", @@ -1124,11 +1055,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSSnapshots" }, "InitiateLambdaDescription": "Lambda function for initiate to identify public RDS snapshots.", "EvaluateLambdaDescription": "Lambda function to describe public RDS snapshots.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSSnapshotsLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "RDSSnapshotsLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSSnapshotsLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "RDSSnapshotsLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_rds_public_snapshots.lambda_handler", "EvaluateLambdaHandler": "describe_rds_public_snapshots.lambda_handler", @@ -1164,11 +1095,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationSQSPublicPolicy" }, "InitiateLambdaDescription": "Lambda function for initiate to identify public SQS queues.", "EvaluateLambdaDescription": "Lambda function to describe public SQS queues.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateSQSPublicPolicyLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "SQSPublicPolicyLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifySQSPublicPolicyLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "SQSPublicPolicyLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_sqs_public_policy.lambda_handler", "EvaluateLambdaHandler": "describe_sqs_public_policy.lambda_handler", @@ -1204,11 +1135,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Encryption" }, "InitiateLambdaDescription": "Lambda function for initiate to identify S3 unencrypted buckets.", "EvaluateLambdaDescription": "Lambda function to describe un-encrypted S3 buckets.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateS3EncryptionLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "S3EncryptionLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyS3EncryptionLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "S3EncryptionLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_s3_encryption.lambda_handler", "EvaluateLambdaHandler": "describe_s3_encryption.lambda_handler", @@ -1244,11 +1175,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSEncryption" }, "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted RDS instances.", "EvaluateLambdaDescription": "Lambda function to describe un-encrypted RDS instances.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRDSEncryptionLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "RDSEncryptionLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRDSEncryptionLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "RDSEncryptionLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_rds_instance_encryption.lambda_handler", "EvaluateLambdaHandler": "describe_rds_instance_encryption.lambda_handler", @@ -1284,11 +1215,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationAMIPublicAccess" }, "InitiateLambdaDescription": "Lambda function for initiate to identify public AMI access issues.", "EvaluateLambdaDescription": "Lambda function to describe public AMI issues.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateAMIPublicAccessLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "AMIPublicAccessLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyAMIPublicAccessLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "AMIPublicAccessLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_public_ami_issues.lambda_handler", "EvaluateLambdaHandler": "describe_public_ami_issues.lambda_handler", @@ -1324,11 +1255,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationRedshiftPublicAccess" }, "InitiateLambdaDescription": "Lambda function to initiate to identify Redshift public access issues.", "EvaluateLambdaDescription": "Lambda function to describe Redshift public access issues.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRedshiftPublicAccessLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "RedshiftPublicAccessLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftPublicAccessLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "RedshiftPublicAccessLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_redshift_cluster_public_access.lambda_handler", "EvaluateLambdaHandler": "describe_redshift_cluster_public_access.lambda_handler", @@ -1364,11 +1295,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationRedshiftClusterEncryption" }, "InitiateLambdaDescription": "Lambda function for initiate to identify Redshift cluster is encrypted or not.", "EvaluateLambdaDescription": "Lambda function to describe Redshift cluster is encrypted or not.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRedshiftClusterEncryptionLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "RedshiftClusterEncryptionLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftClusterEncryptionLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "RedshiftClusterEncryptionLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_redshift_encryption.lambda_handler", "EvaluateLambdaHandler": "describe_redshift_encryption.lambda_handler", @@ -1405,11 +1336,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationRedshiftLogging" }, "InitiateLambdaDescription": "Lambda function for initiate to identify Redshift logging issues.", "EvaluateLambdaDescription": "Lambda function to describe Redshift logging issues.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateRedshiftLoggingLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "RedshiftLoggingLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyRedshiftLoggingLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "RedshiftLoggingLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_redshift_logging_issues.lambda_handler", "EvaluateLambdaHandler": "describe_redshift_logging_issues.lambda_handler", @@ -1445,11 +1376,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationECSPrivilegedAccess" }, "InitiateLambdaDescription": "Lambda function for initiate to identify ECS privileged access issues.", "EvaluateLambdaDescription": "Lambda function to describe ECS privileged access issues.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateECSPrivilegedAccessLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "ECSPrivilegedAccessLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyECSPrivilegedAccessLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "ECSPrivilegedAccessLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_ecs_privileged_access_issues.lambda_handler", "EvaluateLambdaHandler": "describe_ecs_privileged_access_issues.lambda_handler", @@ -1485,11 +1416,11 @@ "IdentificationLambdaSource": {"Ref": "SourceIdentificationECSLogging"}, "InitiateLambdaDescription": "Lambda function for initiate to identify ECS logging enabled or not.", "EvaluateLambdaDescription": "Lambda function to describe ECS logging enabled or not.", - "InitiateLambdaName": {"Fn::Join": ["",[{"Ref": "ResourcesPrefix"}, - { "Fn::FindInMap": ["NamingStandards","InitiateECSLoggingLambdaFunctionName","value"]}] + "InitiateLambdaName": {"Fn::Join": ["",[{"Ref": "ResourcesPrefix"}, "initiate-", + { "Fn::FindInMap": ["NamingStandards","ECSLoggingLambdaFunctionName","value"]}] ]}, - "EvaluateLambdaName": {"Fn::Join": ["",[{"Ref": "ResourcesPrefix"}, - {"Fn::FindInMap": ["NamingStandards","IdentifyECSLoggingLambdaFunctionName","value"]}] + "EvaluateLambdaName": {"Fn::Join": ["",[{"Ref": "ResourcesPrefix"}, "describe-", + {"Fn::FindInMap": ["NamingStandards","ECSLoggingLambdaFunctionName","value"]}] ]}, "InitiateLambdaHandler": "initiate_to_desc_ecs_logging_issues.lambda_handler", "EvaluateLambdaHandler": "describe_ecs_logging_issues.lambda_handler", @@ -1525,11 +1456,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationECSExternalImageSource" }, "InitiateLambdaDescription": "Lambda function for initiate to identify ECS image source is internal or external.", "EvaluateLambdaDescription": "Lambda function to describe ECS image source is internal or external.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateECSExternalImageSourceLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "ECSExternalImageSourceLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyECSExternalImageSourceLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "ECSExternalImageSourceLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_ecs_external_image_source_issues.lambda_handler", "EvaluateLambdaHandler": "describe_ecs_external_image_source_issues.lambda_handler", @@ -1565,11 +1496,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationElasticSearchEncryption" }, "InitiateLambdaDescription": "Lambda function for initiate to identify unencrypted Elasticsearch domains.", "EvaluateLambdaDescription": "Lambda function to describe un-encrypted Elasticsearch domains.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateESEncryptionLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "ESEncryptionLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyESEncryptionLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "ESEncryptionLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_elasticsearch_unencrypted_domains.lambda_handler", "EvaluateLambdaHandler": "describe_elasticsearch_unencrypted_domains.lambda_handler", @@ -1605,11 +1536,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationElasticSearchLogging" }, "InitiateLambdaDescription": "Lambda function for initiate to identify Elasticsearch domain logging issues.", "EvaluateLambdaDescription": "Lambda function to describe Elasticsearch domain logging issues.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateESLoggingLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "ESLoggingLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyESLoggingLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "ESLoggingLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_elasticsearch_domains_logging_issues.lambda_handler", "EvaluateLambdaHandler": "describe_elasticsearch_domains_logging_issues.lambda_handler", @@ -1645,11 +1576,11 @@ "IdentificationLambdaSource": { "Ref": "SourceIdentificationElasticSearchPublicAccess" }, "InitiateLambdaDescription": "Lambda function for initiate to identify publicly accessible Elasticsearch domains.", "EvaluateLambdaDescription": "Lambda function to describe publicly accessible Elasticsearch domains.", - "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "InitiateESPublicAccessLambdaFunctionName", "value"] } ] + "InitiateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "initiate-", + { "Fn::FindInMap": ["NamingStandards", "ESPublicAccessLambdaFunctionName", "value"] } ] ]}, - "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, - { "Fn::FindInMap": ["NamingStandards", "IdentifyESPublicAccessLambdaFunctionName", "value"] } ] + "EvaluateLambdaName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "describe-", + { "Fn::FindInMap": ["NamingStandards", "ESPublicAccessLambdaFunctionName", "value"] } ] ]}, "InitiateLambdaHandler": "initiate_to_desc_elasticsearch_public_access_domains.lambda_handler", "EvaluateLambdaHandler": "describe_elasticsearch_public_access_domains.lambda_handler", From 5f1e0a611f5c4dc2db427b432fdb2fb9aaef3485 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 30 Jul 2019 11:28:56 +0530 Subject: [PATCH 140/193] Updated with ticket description. Updated with ticket description. --- ...elasticsearch_unencrypted_issue_tickets.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index 49ae0d2d..ab818dc3 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -90,24 +90,24 @@ def create_tickets_elasticsearch_unencryption(self): product = tags.get("product", None) issue_description = "" - if not encrypted_at_rest: - issue_description +=( - f"Elasticsearch domain needs to encrypted at rest. \n\n" + + if not encrypted_at_rest and not encrypted_at_transit: + issue_description += ( + f"Elasticsearch domain needs to be encrypt at rest and transit. \n\n" ) - issue_summary = (f"Elasticsearch unencrypted domain '{domain_name}' unencrypted at rest" + issue_summary = (f"Elasticsearch unencrypted domain '{domain_name}' " f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") - elif not encrypted_at_transit: issue_description += ( f"Elasticsearch domain needs to be encrypt at transit. \n\n" ) issue_summary = (f"Elasticsearch domain '{domain_name}' unencrypted at transit" f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") - else: + elif not encrypted_at_rest: issue_description += ( - f"Elasticsearch domain needs to be encrypt at rest and transit. \n\n" + f"Elasticsearch domain needs to encrypted at rest. \n\n" ) - issue_summary = (f"Elasticsearch unencrypted domain '{domain_name}' " + issue_summary = (f"Elasticsearch unencrypted domain '{domain_name}' unencrypted at rest" f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") issue_description += ( @@ -116,8 +116,8 @@ def create_tickets_elasticsearch_unencryption(self): f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" f"*Domain ID*: {domain_name}\n" - f" Encryption enabled at rest: {encrypted_at_rest}\n" - f" Encryption enabled in transit: {encrypted_at_transit}\n" + f"*Encryption enabled at rest*: {encrypted_at_rest}\n" + f"*Encryption enabled in transit*: {encrypted_at_transit}\n" ) issue_description += JiraOperations.build_tags_table(tags) From 596ee5dc052a9d0c632767ce63f233ced611c13d Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 30 Jul 2019 11:50:26 +0530 Subject: [PATCH 141/193] Updated with SG assoiciated elasticsearch details. Updated with SG assoiciated elasticsearch details. --- .../create_security_groups_tickets.py | 39 +++++++++++++++++-- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 1be6cc18..5d4578e3 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -22,6 +22,7 @@ from library.aws.rds import RDSOperations from library.aws.ecs import ECSClusterOperations from library.aws.redshift import RedshiftClusterOperations +from library.aws.elasticsearch import ElasticSearchOperations from library.utility import SingletonInstance, SingletonInstanceException @@ -251,6 +252,23 @@ def build_redshift_clusters_table(redshift_clusters): return cluster_details, in_use + @staticmethod + def build_es_domains_table(es_domains): + domain_details = "" + in_use = False + + if len(es_domains) > 0: + in_use = True + domain_details += ( + f"\n*Elasticsearch Domains:*\n" + f"||Domain Name||Domain Arn||\n") + for domain in es_domains: + domain_details += ( + f"|{domain.domain_name}|{domain.domain_arn}|\n" + ) + + return domain_details, in_use + def create_tickets_securitygroups(self): """ Class function to create jira tickets """ table_name = self.config.sg.ddb_table_name @@ -350,7 +368,7 @@ def create_tickets_securitygroups(self): ec2_client = account.client("ec2") if account.session is not None else None sg_instance_details = ec2_owner = ec2_bu = ec2_product = None - sg_in_use = sg_in_use_ec2 = sg_in_use_elb = sg_in_use_rds = sg_in_use_ecs = sg_in_use_redshift = None + sg_in_use = sg_in_use_ec2 = sg_in_use_elb = sg_in_use_rds = sg_in_use_ecs = sg_in_use_redshift = sg_in_use_es = None sg_public = sg_blind_public = False rds_client = account.client("rds") if account.session is not None else None @@ -360,9 +378,11 @@ def create_tickets_securitygroups(self): iam_client = account.client("iam") if account.session is not None else None ecs_client = account.client("ecs") if account.session is not None else None - rds_instance_details = elb_instance_details = sg_redshift_details = sg_ecs_details = None + rds_instance_details = elb_instance_details = sg_redshift_details = sg_ecs_details = sg_es_details = None redshift_client = account.client("redshift") if account.session is not None else None + es_client = account.client("es") if account.session is not None else None + if ec2_client is not None: ec2_instances = EC2Operations.get_instance_details_of_sg_associated(ec2_client, group_id) sg_instance_details, instance_profile_details, \ @@ -397,7 +417,6 @@ def create_tickets_securitygroups(self): logging.exception( f"Failed to build ECS Cluster details for '{group_name} / {group_id}' in {account}") - sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds or sg_in_use_ecs if redshift_client is not None: try: redshift_clusters = RedshiftClusterOperations.get_redshift_vpc_security_groups( @@ -408,7 +427,17 @@ def create_tickets_securitygroups(self): logging.exception( f"Failed to build Redshift Cluster details for '{group_name} / {group_id}' in {account}") - sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds or sg_in_use_redshift + if es_client is not None: + try: + es_domains = ElasticSearchOperations.get_elasticsearch_details_of_sg_associated( + es_client, group_id) + sg_es_details, sg_in_use_es = self.build_es_domains_table( + es_domains) + except Exception: + logging.exception( + f"Failed to build Redshift Cluster details for '{group_name} / {group_id}' in {account}") + + sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds or sg_in_use_redshift or sg_in_use_ecs or sg_in_use_es owner = group_owner if group_owner is not None else ec2_owner bu = group_bu if group_bu is not None else ec2_bu @@ -506,6 +535,8 @@ def create_tickets_securitygroups(self): issue_description += f"{sg_redshift_details if sg_redshift_details else ''}" + issue_description += f"{sg_es_details if sg_es_details else ''}" + issue_description += ( f"*Recommendation*: " f"Allow access only for a minimum set of required ip addresses/ranges from [RFC1918|https://tools.ietf.org/html/rfc1918]. " From 3c6ccef2cf5e9874221d1057371665c468b7143c Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 30 Jul 2019 11:56:51 +0530 Subject: [PATCH 142/193] Updated with readme documentation. Updated with readme documentation. --- README.md | 9 ++ docs/pages/playbook18_ecs_logging.md | 177 +++++++++++++++++++++++++++ 2 files changed, 186 insertions(+) create mode 100644 docs/pages/playbook18_ecs_logging.md diff --git a/README.md b/README.md index 3ef84841..5e7f31cc 100755 --- a/README.md +++ b/README.md @@ -23,6 +23,15 @@ Dow Jones Hammer documentation is available via GitHub Pages at [https://dowjone * [S3 Unencrypted Buckets](https://dowjones.github.io/hammer/playbook11_s3_unencryption.html) * [RDS Unencrypted Instances](https://dowjones.github.io/hammer/playbook12_rds_unencryption.html) * [AMIs Public Access](https://dowjones.github.io/hammer/playbook13_amis_public_access.html) +* [Redshift Unencrypted Clusters](https://dowjones.github.io/hammer/playbook15_redshift_unencryption.html) +* [Redshift Public Clusters](https://dowjones.github.io/hammer/playbook16_redshift_public_clusters.html) +* [Redshift Logging Issues](https://dowjones.github.io/hammer/playbook17_redshift_audit_logging.html) +* [ECS Logging Issues](https://dowjones.github.io/hammer/playbook18_ecs_logging.html) +* [ECS Privileged Access Issues](https://dowjones.github.io/hammer/playbook19_ecs_privileged_access.html) +* [ECS Exteranal Image Source Issues](https://dowjones.github.io/hammer/playbook20_ecs_external_image_source.html) +* [Elasticsearch Unencrypted Domains](https://dowjones.github.io/hammer/playbook21_elasticsearch_unencryption.html) +* [Elasticsearch Public Domains](https://dowjones.github.io/hammer/playbook22_elasticsearch_public_access.html) +* [Elasticsearch Logging Issues](https://dowjones.github.io/hammer/playbook23_elasticsearch_logging.html) ## Technologies * Python 3.6 diff --git a/docs/pages/playbook18_ecs_logging.md b/docs/pages/playbook18_ecs_logging.md new file mode 100644 index 00000000..ddc94820 --- /dev/null +++ b/docs/pages/playbook18_ecs_logging.md @@ -0,0 +1,177 @@ +--- +title: ECS logging issues +keywords: playbook18 +sidebar: mydoc_sidebar +permalink: playbook18_ecs_logging.html +--- + +# Playbook 18: ECS logging issues + +## Introduction + +This playbook describes how to configure Dow Jones Hammer to detect ECS logging issues. + +## 1. Issue Identification + +Dow Jones Hammer identifies those ECS logging enabled or not. + +When Dow Jones Hammer detects an issue, it writes the issue to the designated DynamoDB table. + +According to the [Dow Jones Hammer architecture](/index.html), the issue identification functionality uses two Lambda functions. +The table lists the Python modules that implement this functionality: + +|Designation |Path | +|--------------|:--------------------:| +|Initialization|`hammer/identification/lambdas/ecs-logging-issues-identification/initiate_to_desc_ecs_logging_issues.py`| +|Identification|`hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py`| + +## 2. Issue Reporting + +You can configure automatic reporting of cases when Dow Jones Hammer identifies an issue of this type. Dow Jones Hammer supports integration with [JIRA](https://www.atlassian.com/software/jira) and [Slack](https://slack.com/). +These types of reporting are independent from one another and you can turn them on/off in the Dow Jones Hammer configuration. + +Thus, in case you have turned on the reporting functionality for this issue and configured corresponding integrations, Dow Jones Hammer, as [defined in the configuration](#43-the-ticket_ownersjson-file), can: +* raise a JIRA ticket and assign it to a specific person in your organization; +* send the issue notification to the Slack channel or directly to a Slack user. + +Additionally Dow Jones Hammer tries to detect person to report issue to by examining ECS logging status. In case the logging is not enable **valid JIRA/Slack user**: +* for JIRA: `jira_owner` parameter from [ticket_owners.json](#43-the-ticket_ownersjson-file) **is ignored** and discovered `owner` **is used instead** as a JIRA assignee; +* for Slack: discovered `owner` **is used in addition to** `slack_owner` value from [ticket_owners.json](#43-the-ticket_ownersjson-file). + +This Python module implements the issue reporting functionality: +``` +hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +``` + +## 3. Setup Instructions For This Issue + +To configure the detection, reporting, you should edit the following sections of the Dow Jones Hammer configuration files: + +### 3.1. The config.json File + +The **config.json** file is the main configuration file for Dow Jones Hammer that is available at `deployment/terraform/accounts/sample/config/config.json`. +To identify and report issues of this type, you should add the following parameters in the **ecs_logging** section of the **config.json** file: + +|Parameter Name |Description | Default Value| +|------------------------------|---------------------------------------|:------------:| +|`enabled` |Toggles issue detection for this issue |`true`| +|`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-ecs-logging` | +|`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`true`| + +Sample **config.json** section: +``` +""" +"ecs_logging": { + "enabled": true, + "ddb.table_name": "hammer-ecs-logging", + "reporting": true, + "remediation": false, + "remediation_retention_period": 21 + } +``` + +### 3.2. The whitelist.json File + +You can define exceptions to the general automatic remediation settings for specific ECS task definitions. To configure such exceptions, you should edit the **ecs_logging** section of the **whitelist.json** configuration file as follows: + +|Parameter Key | Parameter Value(s)| +|:------------:|:-----------------:| +|AWS Account ID|ECS task definition ids(s)| + +Sample **whitelist.json** section: +``` +"ecs_logging": { + "123456789012": ["task_definition_id1", "task_definition2"] +} +``` + +### 3.3. The ticket_owners.json File + +You should use the **ticket_owners.json** file to configure the integration of Dow Jones Hammer with JIRA and/or Slack for the issue reporting purposes. + +You can configure these parameters for specific AWS accounts and globally. Account-specific settings precede the global settings in the **ticket_owners.json** configuration file. + +Check the following table for parameters: + +|Parameter Name |Description |Sample Value | +|---------------------|--------------------------------------------------------------------|:---------------:| +|`jira_project` |The name of the JIRA project where Dow Jones Hammer will create the issue | `AWSSEC` | +|`jira_owner` |The name of the JIRA user to whom Dow Jones Hammer will assign the issue | `Support-Cloud` | +|`jira_parent_ticket` |The JIRA ticket to which Dow Jones Hammer will link the new ticket it creates | `AWSSEC-1234` | +|`slack_owner` |Name(s) of the Slack channels (prefixed by `#`) and/or Slack users that will receive issue reports from Dow Jones Hammer | `["#devops-channel", "bob"]` | + +Sample **ticket_owners.json** section: + +Account-specific settings: +``` +{ + "account": { + "123456789012": { + "jira_project": "", + "jira_owner": "Support-Cloud", + "jira_parent_ticket": "", + "slack_owner": "" + } + }, + "jira_project": "AWSSEC", + "jira_owner": "Support-General", + "jira_parent_ticket": "AWSSEC-1234", + "slack_owner": ["#devops-channel", "bob"] +} +``` + +## 4. Logging + +Dow Jones Hammer uses **CloudWatch Logs** for logging purposes. + +Dow Jones Hammer automatically sets up CloudWatch Log Groups and Log Streams for this issue when you deploy Dow Jones Hammer. + +### 4.1. Issue Identification Logging + +Dow Jones Hammer issue identification functionality uses two Lambda functions: + +* Initialization: this Lambda function selects slave accounts to check for this issue as designated in the Dow Jones Hammer configuration files and triggers the check. +* Identification: this Lambda function identifies this issue for each account/region selected at the previous step. + +You can see the logs for each of these Lambda functions in the following Log Groups: + +|Lambda Function|CloudWatch Log Group Name | +|---------------|--------------------------------------------| +|Initialization |`/aws/lambda/initiate-ecs-logging`| +|Identification |`/aws/lambda/describe-ecs-logging`| + +### 4.2. Issue Reporting Logging + +Dow Jones Hammer issue reporting functionality uses ```/aws/ec2/hammer-reporting-remediation``` CloudWatch Log Group for logging. The Log Group contains issue-specific Log Streams named as follows: + +|Designation|CloudWatch Log Stream Name | +|-----------|---------------------------------------------------------| +|Reporting |`reporting.create_ecs_logging_issue_tickets`| + + +### 4.3. Slack Reports + +In case you have enabled Dow Jones Hammer and Slack integration, Dow Jones Hammer sends notifications about issue identification and reporting to the designated Slack channel and/or recipient(s). + +Check [ticket_owners.json](#43-the-ticket_ownersjson-file) configuration for further guidance. + +### 4.4. Using CloudWatch Logs for Dow Jones Hammer + +To access Dow Jones Hammer logs, proceed as follows: + +1. Open **AWS Management Console**. +2. Select **CloudWatch** service. +3. Select **Logs** from the CloudWatch sidebar. +4. Select the log group you want to explore. The log group will open. +5. Select the log stream you want to explore. + +Check [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) for further guidance. + +## 5. Issue specific details in DynamoDB + +Dow Jones Hammer stores various issue specific details in DynamoDB as a map under `issue_details` key. You can use it to create your own reporting modules. + +|Key |Type |Description |Example | +|-------------|:----:|----------------------------------|------------------------------------------------| +|`id` |string|ecs task definition id |`task-definition-id` | +|`tags` |map |Tags associated with ECS task |`{"Name": "TestKey", "service": "archive"}`| \ No newline at end of file From cb9d9607d2c407097321100147e2508a437dc211 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 30 Jul 2019 15:02:28 +0530 Subject: [PATCH 143/193] Updated with lambda schedulers. Updated with lambda schedulers. --- deployment/cf-templates/identification.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 6747ba8e..82caa983 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -929,7 +929,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "0 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserInactiveKeys" }, From fb9f4533848271cece52d2aaca86c5a8443dfae8 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 1 Aug 2019 00:29:43 +0530 Subject: [PATCH 144/193] Updated with production rollout chagnes. Updated with production rollout chagnes. --- deployment/cf-templates/ddb.json | 2 +- hammer/library/aws/ecs.py | 2 +- .../reporting/create_security_groups_tickets.py | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index ec7179d8..82a39012 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -24,7 +24,7 @@ } ], "ProvisionedThroughput": { - "ReadCapacityUnits": "25", + "ReadCapacityUnits": "35", "WriteCapacityUnits": "2" }, "SSESpecification": { diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 1b46de8e..15eaca2c 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -44,7 +44,7 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): ] ) - ec2_instance_id = container_instance[0]["ec2InstanceId"] + ec2_instance_id = container_instance["containerInstances"][0]["ec2InstanceId"] ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] if group_id in str(ec2_instance["SecurityGroups"]): ecs_instances.append(ECSCluster_Details( diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 5d4578e3..0d0de669 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -20,7 +20,7 @@ from library.aws.utility import Account from library.aws.security_groups import RestrictionStatus from library.aws.rds import RDSOperations -from library.aws.ecs import ECSClusterOperations +#from library.aws.ecs import ECSClusterOperations from library.aws.redshift import RedshiftClusterOperations from library.aws.elasticsearch import ElasticSearchOperations from library.utility import SingletonInstance, SingletonInstanceException @@ -407,7 +407,7 @@ def create_tickets_securitygroups(self): logging.exception( f"Failed to build RDS details for '{group_name} / {group_id}' in {account}") - if ecs_client is not None: + """if ecs_client is not None: try: ecs_instances = ECSClusterOperations.get_ecs_instance_security_groups(ec2_client, ecs_client, group_id) @@ -416,6 +416,7 @@ def create_tickets_securitygroups(self): except Exception: logging.exception( f"Failed to build ECS Cluster details for '{group_name} / {group_id}' in {account}") + """ if redshift_client is not None: try: From 3bd6cdaed12eb51c6633d34537fe68a715f68c8f Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 1 Aug 2019 13:27:40 +0530 Subject: [PATCH 145/193] Updated with deployment issues fix. Updated with deployment issues fix. --- hammer/library/aws/ecs.py | 2 +- .../reporting/create_security_groups_tickets.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 15eaca2c..6463d4a2 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -44,7 +44,7 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): ] ) - ec2_instance_id = container_instance["containerInstances"][0]["ec2InstanceId"] + ec2_instance_id = container_instance["containerInstances"][0]["ec2InstanceId"] ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] if group_id in str(ec2_instance["SecurityGroups"]): ecs_instances.append(ECSCluster_Details( diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 0d0de669..5d4578e3 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -20,7 +20,7 @@ from library.aws.utility import Account from library.aws.security_groups import RestrictionStatus from library.aws.rds import RDSOperations -#from library.aws.ecs import ECSClusterOperations +from library.aws.ecs import ECSClusterOperations from library.aws.redshift import RedshiftClusterOperations from library.aws.elasticsearch import ElasticSearchOperations from library.utility import SingletonInstance, SingletonInstanceException @@ -407,7 +407,7 @@ def create_tickets_securitygroups(self): logging.exception( f"Failed to build RDS details for '{group_name} / {group_id}' in {account}") - """if ecs_client is not None: + if ecs_client is not None: try: ecs_instances = ECSClusterOperations.get_ecs_instance_security_groups(ec2_client, ecs_client, group_id) @@ -416,7 +416,6 @@ def create_tickets_securitygroups(self): except Exception: logging.exception( f"Failed to build ECS Cluster details for '{group_name} / {group_id}' in {account}") - """ if redshift_client is not None: try: From 36e3f8317c64caba5ecd29e86ceb9683b4e2b849 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 1 Aug 2019 15:11:16 +0530 Subject: [PATCH 146/193] Updated with indent chagnes. Updated with indent chagnes. --- hammer/library/aws/ecs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 6463d4a2..15eaca2c 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -44,7 +44,7 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): ] ) - ec2_instance_id = container_instance["containerInstances"][0]["ec2InstanceId"] + ec2_instance_id = container_instance["containerInstances"][0]["ec2InstanceId"] ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] if group_id in str(ec2_instance["SecurityGroups"]): ecs_instances.append(ECSCluster_Details( From f2a8bf69df7b3dc41f5d7811fa2dd94529da79ab Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 2 Aug 2019 08:35:07 +0530 Subject: [PATCH 147/193] Updated indent issues. Updated indent issues. --- hammer/library/aws/ecs.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 15eaca2c..780c8a95 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -43,7 +43,6 @@ def get_ecs_instance_security_groups(cls, ec2_client, ecs_client, group_id): instance_arn, ] ) - ec2_instance_id = container_instance["containerInstances"][0]["ec2InstanceId"] ec2_instance = ec2_client.describe_instances(InstanceIds=[ec2_instance_id])['Reservations'][0]["Instances"][0] if group_id in str(ec2_instance["SecurityGroups"]): From df115525c568c64851b5dda89ac6c5581553398a Mon Sep 17 00:00:00 2001 From: Alexey Chuprikov Date: Mon, 5 Aug 2019 16:13:39 +0300 Subject: [PATCH 148/193] Add workaround for hammer bot "status" command This message appears in logs when issuing "status" command: "lost websocket connection, try to reconnect now". The problem appears when the "status" message is sent to slack. It appears to be long message (althoug it's less then 40000 chars slack limit) and the slack should just truncate the message instead of closing the connection. But ii works if we use shorter messages. --- hammer/reporting-remediation/bot/commands.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/hammer/reporting-remediation/bot/commands.py b/hammer/reporting-remediation/bot/commands.py index 7d79a24c..f1f79ab1 100644 --- a/hammer/reporting-remediation/bot/commands.py +++ b/hammer/reporting-remediation/bot/commands.py @@ -81,7 +81,15 @@ def status(message): else: response += f"`disabled`" response += "\n" - message.reply(response) + # FIXME this is workaround to issues "lost websocket connection, try to reconnect now" that + # happens after we send accounts' statuses to slack. The message is long, however slack should handle + # this case and truncate the message without closing connection. I limited the size of message to 15000 + # which seems to work. + msg_length = 15000 + msg_start = 0 + while msg_start < len(response): + message.reply(response[msg_start:msg_start + msg_length]) + msg_start = msg_start + msg_length @respond_to('^(?P
.*) config$', re.IGNORECASE) From 6ba1ca1b31793179f3ad4a666cf8f1cdd33c17ef Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 5 Aug 2019 20:27:43 +0530 Subject: [PATCH 149/193] Updated with ecs image known source issues. Updated with ecs image known source issues. --- deployment/configs/config.json | 3 ++- hammer/library/aws/ecs.py | 24 +++++++++++++++++------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/deployment/configs/config.json b/deployment/configs/config.json index de6db85a..23f95791 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -201,7 +201,8 @@ "enabled": true, "topic_name": "hammer-describe-ecs-external-image-source-lambda", "ddb.table_name": "hammer-ecs-external-image-source", - "reporting": true + "reporting": true, + "safe_image_sources": ["amazonaws", "dowjones"] }, "es_domain_logging": { "enabled": true, diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 780c8a95..2cddc8ba 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -4,6 +4,7 @@ from library.utility import timeit from collections import namedtuple from library.aws.utility import convert_tags +from library.config import Config # structure which describes EC2 instance ECSCluster_Details = namedtuple('ECSCluster_Details', [ @@ -103,6 +104,18 @@ def __init__(self, account): self.account = account self.task_definitions = [] + def validate_image_source(self, image): + config = Config() + + is_external = True + safe_image_sources = config.ecs_external_image_source.safe_image_sources + for image_source in safe_image_sources: + if image_source in image: + is_external = False + break + + return is_external + def check(self, task_definitions=None): """ Walk through clusters in the account/region and check them. @@ -131,6 +144,7 @@ def check(self, task_definitions=None): container_image_details = [] disabled_logging_container_names = [] privileged_container_names = [] + external_image = False try: task_definition = self.account.client("ecs").describe_task_definition( taskDefinition=task_definition_name @@ -147,9 +161,10 @@ def check(self, task_definitions=None): privileged_container_names.append(container_name) image = container_definition.get('image') - image_details = {} if image is not None: - if image.split("/")[0].split(".")[-2:] != ['amazonaws', 'com']: + external_image = self.validate_image_source(image) + image_details = {} + if external_image: image_details["container_name"] = container_name image_details["image_url"] = image container_image_details.append(image_details) @@ -164,11 +179,6 @@ def check(self, task_definitions=None): else: is_privileged = False - if len(container_image_details) > 0: - external_image = True - else: - external_image = False - if "Tags" in task_definition: tags = task_definition["Tags"] task_definition_details = ECSTaskDefinitions(account=self.account, From f7d66c1395416d33fdff361c80082d7aac605e07 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 6 Aug 2019 16:09:48 +0530 Subject: [PATCH 150/193] Updated with ECS image known sources. Updated with ECS image known sources. --- deployment/configs/config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 23f95791..42200839 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -202,7 +202,7 @@ "topic_name": "hammer-describe-ecs-external-image-source-lambda", "ddb.table_name": "hammer-ecs-external-image-source", "reporting": true, - "safe_image_sources": ["amazonaws", "dowjones"] + "safe_image_sources": ["amazonaws", "dowjones", "artifactory"] }, "es_domain_logging": { "enabled": true, From d7632cb6afdd134cc444d0041ed28416c8e2bbed Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 6 Aug 2019 16:25:29 +0530 Subject: [PATCH 151/193] Updated with review comment changes. Updated with review comment changes. --- deployment/configs/config.json | 2 +- docs/pages/playbook20_ecs_external_image_source.md | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 42200839..3f72b475 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -202,7 +202,7 @@ "topic_name": "hammer-describe-ecs-external-image-source-lambda", "ddb.table_name": "hammer-ecs-external-image-source", "reporting": true, - "safe_image_sources": ["amazonaws", "dowjones", "artifactory"] + "safe_image_sources": ["amazonaws", "artifactory"] }, "es_domain_logging": { "enabled": true, diff --git a/docs/pages/playbook20_ecs_external_image_source.md b/docs/pages/playbook20_ecs_external_image_source.md index b3ebe9f1..658ed5a7 100644 --- a/docs/pages/playbook20_ecs_external_image_source.md +++ b/docs/pages/playbook20_ecs_external_image_source.md @@ -9,7 +9,7 @@ permalink: playbook20_ecs_external_image_source.html ## Introduction -This playbook describes how to configure Dow Jones Hammer to detect ECS image source is external or internal. +This playbook describes how to configure Dow Jones Hammer to detect ECS image source is external or internal based on configured image known sources. ## 1. Issue Identification @@ -66,8 +66,7 @@ Sample **config.json** section: "enabled": true, "ddb.table_name": "hammer-ecs-external-image-source", "reporting": true, - "remediation": false, - "remediation_retention_period": 21 + "safe_image_sources": ["amazonaws", "artifactory"] } ``` From beac6ebfd96a200cb3b701f875ea2ba7a207fbd9 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 19 Aug 2019 12:18:11 +0530 Subject: [PATCH 152/193] Updated with Lambda python version for ES encryption issues. Updated with Lambda python version for ES encryption issues. --- .../cf-templates/identification-nested.json | 7 +- deployment/cf-templates/identification.json | 66 ++++++++++++------- 2 files changed, 49 insertions(+), 24 deletions(-) diff --git a/deployment/cf-templates/identification-nested.json b/deployment/cf-templates/identification-nested.json index 53d2fd81..ac3c9822 100644 --- a/deployment/cf-templates/identification-nested.json +++ b/deployment/cf-templates/identification-nested.json @@ -69,6 +69,9 @@ }, "SNSIdentificationErrors": { "Type": "String" + }, + "PythonVersion": { + "Type": "String" } }, "Conditions": { @@ -99,7 +102,7 @@ "MemorySize": 128, "Timeout": "300", "Role": { "Ref": "IdentificationIAMRole" }, - "Runtime": "python3.6" + "Runtime": { "Ref": "PythonVersion" } } }, "LogGroupLambdaInitiateEvaluation": { @@ -148,7 +151,7 @@ "MemorySize": {"Ref": "EvaluateLambdaMemorySize"}, "Timeout": "300", "Role": { "Ref": "IdentificationIAMRole" }, - "Runtime": "python3.6" + "Runtime": { "Ref": "PythonVersion" } } }, "LogGroupLambdaEvaluate": { diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 82caa983..23f8b046 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -753,7 +753,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -793,7 +794,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -833,7 +835,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -873,7 +876,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -913,7 +917,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -953,7 +958,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -993,7 +999,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1033,7 +1040,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1073,7 +1081,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1113,7 +1122,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1153,7 +1163,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1193,7 +1204,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1233,7 +1245,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1273,7 +1286,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftPublicAccess", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1314,7 +1328,8 @@ { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftClusterEncryption", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1354,7 +1369,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftLogging", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1394,7 +1410,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSPrivilegedAccess", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1434,7 +1451,8 @@ "SNSTopicName": {"Fn::Join": ["",[{"Ref": "ResourcesPrefix"}, {"Fn::FindInMap": ["NamingStandards","SNSTopicNameECSLogging","value"]}] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1474,7 +1492,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSExternalImageSource", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1514,7 +1533,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameESEncryption", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.7" } } }, @@ -1554,7 +1574,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameESLogging", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } }, @@ -1594,7 +1615,8 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameESPublicAccess", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, + "PythonVersion" : "python3.6" } } } From 02e328cc9bcfffebe7f1880acc0221547fca7f3a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 19 Aug 2019 12:21:42 +0530 Subject: [PATCH 153/193] Updated with ES Unencrypted notification changes. Updated with Elasticsearch unencrypted notification changes. --- .../create_elasticsearch_unencrypted_issue_tickets.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index ab818dc3..fd152e9c 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -68,17 +68,6 @@ def create_tickets_elasticsearch_unencryption(self): ) IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) - elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {domain_name}") - slack.report_issue( - msg=f"Elasticsearch unencrypted domain '{domain_name}' issue is changed " - f"in '{account_name} / {account_id}' account, '{region}' region" - f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{domain_name}'") # issue has not been reported yet From 5bfb8054f47aea8a4592902306e8a52107346e2d Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 19 Aug 2019 20:29:29 +0530 Subject: [PATCH 154/193] Revert "Updated with ES Unencrypted notification changes." This reverts commit 02e328cc9bcfffebe7f1880acc0221547fca7f3a. --- .../create_elasticsearch_unencrypted_issue_tickets.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index fd152e9c..ab818dc3 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -68,6 +68,17 @@ def create_tickets_elasticsearch_unencryption(self): ) IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) + elif issue.timestamps.updated > issue.timestamps.reported: + logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {domain_name}") + slack.report_issue( + msg=f"Elasticsearch unencrypted domain '{domain_name}' issue is changed " + f"in '{account_name} / {account_id}' account, '{region}' region" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{domain_name}'") # issue has not been reported yet From 051179a2b4de88e1d7b56d65f814ecca9f03a86a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Mon, 19 Aug 2019 20:34:42 +0530 Subject: [PATCH 155/193] Reverting changes. Reverting changes. --- .../cf-templates/identification-nested.json | 7 +- deployment/cf-templates/identification.json | 66 +++++++------------ 2 files changed, 24 insertions(+), 49 deletions(-) diff --git a/deployment/cf-templates/identification-nested.json b/deployment/cf-templates/identification-nested.json index ac3c9822..53d2fd81 100644 --- a/deployment/cf-templates/identification-nested.json +++ b/deployment/cf-templates/identification-nested.json @@ -69,9 +69,6 @@ }, "SNSIdentificationErrors": { "Type": "String" - }, - "PythonVersion": { - "Type": "String" } }, "Conditions": { @@ -102,7 +99,7 @@ "MemorySize": 128, "Timeout": "300", "Role": { "Ref": "IdentificationIAMRole" }, - "Runtime": { "Ref": "PythonVersion" } + "Runtime": "python3.6" } }, "LogGroupLambdaInitiateEvaluation": { @@ -151,7 +148,7 @@ "MemorySize": {"Ref": "EvaluateLambdaMemorySize"}, "Timeout": "300", "Role": { "Ref": "IdentificationIAMRole" }, - "Runtime": { "Ref": "PythonVersion" } + "Runtime": "python3.6" } }, "LogGroupLambdaEvaluate": { diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 23f8b046..82caa983 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -753,8 +753,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSecurityGroups", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -794,8 +793,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameCloudTrails", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -835,8 +833,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3ACL", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -876,8 +873,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Policy", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -917,8 +913,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserKeysRotation", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -958,8 +953,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameIAMUserInactiveKeys", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -999,8 +993,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSVolumes", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1040,8 +1033,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameEBSSnapshots", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1081,8 +1073,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSSnapshots", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1122,8 +1113,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameSQSPublicPolicy", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1163,8 +1153,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameS3Encryption", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1204,8 +1193,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRDSEncryption", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1245,8 +1233,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameAMIPublicAccess", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1286,8 +1273,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftPublicAccess", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1328,8 +1314,7 @@ { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftClusterEncryption", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1369,8 +1354,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameRedshiftLogging", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1410,8 +1394,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSPrivilegedAccess", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1451,8 +1434,7 @@ "SNSTopicName": {"Fn::Join": ["",[{"Ref": "ResourcesPrefix"}, {"Fn::FindInMap": ["NamingStandards","SNSTopicNameECSLogging","value"]}] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1492,8 +1474,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameECSExternalImageSource", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1533,8 +1514,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameESEncryption", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.7" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1574,8 +1554,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameESLogging", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } }, @@ -1615,8 +1594,7 @@ "SNSTopicName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, { "Fn::FindInMap": ["NamingStandards", "SNSTopicNameESPublicAccess", "value"] } ] ]}, - "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"}, - "PythonVersion" : "python3.6" + "SNSIdentificationErrors": {"Ref": "SNSIdentificationErrors"} } } } From c1a6c099d65452caaadb6c2b3c97d73aea1b66b5 Mon Sep 17 00:00:00 2001 From: "yevheniia.pasiechna@dowjones.com" Date: Tue, 20 Aug 2019 18:17:00 +0300 Subject: [PATCH 156/193] Calling out 0/0 vs specific non-DJ IPs --- .../identification/lambdas/requirements.txt | 3 +- hammer/library/aws/security_groups.py | 47 ++++++++++++++++++- hammer/library/config.py | 5 ++ 3 files changed, 53 insertions(+), 2 deletions(-) diff --git a/hammer/identification/lambdas/requirements.txt b/hammer/identification/lambdas/requirements.txt index 663bd1f6..59e7f8f4 100755 --- a/hammer/identification/lambdas/requirements.txt +++ b/hammer/identification/lambdas/requirements.txt @@ -1 +1,2 @@ -requests \ No newline at end of file +requests +ipwhois \ No newline at end of file diff --git a/hammer/library/aws/security_groups.py b/hammer/library/aws/security_groups.py index 814d88aa..50cda0b2 100755 --- a/hammer/library/aws/security_groups.py +++ b/hammer/library/aws/security_groups.py @@ -1,19 +1,23 @@ import json import logging import ipaddress +import warnings from enum import Enum from datetime import datetime, timezone from botocore.exceptions import ClientError +from ipwhois import IPWhois from library.utility import jsonDumps from library.aws.s3 import S3Operations from library.aws.utility import convert_tags +from library.config import Config class RestrictionStatus(Enum): Restricted = "restricted" OpenCompletely = "open_completely" OpenPartly = "open_partly" + ExcludedRegistrant = 'owner' class SecurityGroupOperations: @@ -372,6 +376,40 @@ def __str__(self): perms = ", ".join([str(perm) for perm in self.permissions]) return f"{self.__class__.__name__}(Name={self.name}, Id={self.id}, Permissions=[{perms}])" + @staticmethod + def validate_trusted_registrant(cidr): + """ + :param cidr: + :return: + """ + config = Config() + trusted_registrants = config.sg.trusted_registrants + + if not trusted_registrants: + return False + + ip = cidr.split("/")[0] + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + try: + whois = IPWhois(ip).lookup_rdap() + except Exception: + return False + + registrant = "" + + for obj in whois.get('objects', {}).values(): + if obj.get('contact') is None: + continue + if 'registrant' in obj.get('roles', []): + registrant = obj['contact'].get('name') + break + + if registrant and registrant in trusted_registrants: + return True + return False + def restriction_status(self, cidr): """ Check restriction status of cidr @@ -381,7 +419,9 @@ def restriction_status(self, cidr): :return: RestrictionStatus with check result """ status = RestrictionStatus.Restricted - if cidr.endswith("/0"): + if ipaddress.ip_network(cidr).is_global and self.validate_trusted_registrant(cidr): + status = RestrictionStatus.ExcludedRegistrant + elif cidr.endswith("/0"): status = RestrictionStatus.OpenCompletely elif ipaddress.ip_network(cidr).is_global: status = RestrictionStatus.OpenPartly @@ -409,6 +449,9 @@ def check(self, restricted_ports): if status == RestrictionStatus.Restricted: logging.debug(f"Skipping restricted '{ip_range}'") continue + elif status == RestrictionStatus.ExcludedRegistrant: + logging.debug(f"Skipping excluded '{ip_range}'") + continue # second - check if ports from `restricted_ports` list has intersection with ports from FromPort..ToPort range if perm.from_port is None or perm.to_port is None: logging.debug(f"Marking world-wide open all ports from '{ip_range}'") @@ -432,6 +475,8 @@ def status(self): statuses = {perms.status for perms in self.permissions} if RestrictionStatus.OpenCompletely in statuses: return RestrictionStatus.OpenCompletely + elif RestrictionStatus.ExcludedRegistrant in statuses: + return RestrictionStatus.ExcludedRegistrant elif RestrictionStatus.OpenPartly in statuses: return RestrictionStatus.OpenPartly return RestrictionStatus.Restricted diff --git a/hammer/library/config.py b/hammer/library/config.py index 11ad9eee..ac0e425c 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -572,6 +572,11 @@ def issue_retention_date(self): """ :return: `timedelta` object before performing auto remediation """ return timedelta(days=self.remediation_retention_period) + @property + def trusted_registrants(self): + """ :return: list of trusted registrants""" + return self._config.get('trusted_registrants', []) + class IAMUserInactiveKeysConfig(ModuleConfig): """ Extend ModuleConfig with IAM inactive keys specific details """ From d4c35415653a90dfc189ead6cea6b627a9fcd68c Mon Sep 17 00:00:00 2001 From: "yevheniia.pasiechna@dowjones.com" Date: Wed, 21 Aug 2019 18:11:28 +0300 Subject: [PATCH 157/193] Added documentation --- docs/pages/playbook2_insecure_services.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/pages/playbook2_insecure_services.md b/docs/pages/playbook2_insecure_services.md index d91e6a76..c0602894 100644 --- a/docs/pages/playbook2_insecure_services.md +++ b/docs/pages/playbook2_insecure_services.md @@ -89,6 +89,7 @@ To identify, report, and remediate issues of this type, you should add the follo |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`21`| +|`trusted_registrants` |*Optional*. List of registrants of public IPs that should be excluded from the report. |`[]`| Sample **secgrp_unrestricted_access** section of the **config.json** file: From f9c674985c4513ae47a93270d457f8eec199f24c Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 21 Aug 2019 21:15:30 +0530 Subject: [PATCH 158/193] Updated boto3 version. Updated boto3 version. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 59f3ec29..25089c94 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ pyflakes -boto3==1.7.84 +boto3==1.9.42 moto==1.3.5 watchtower pytest From 1ed978ed03b0f1af0e40427f513ec970cae7be93 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 21 Aug 2019 21:25:30 +0530 Subject: [PATCH 159/193] Updated with boto3 version Updated with boto3 version --- hammer/identification/lambdas/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/hammer/identification/lambdas/requirements.txt b/hammer/identification/lambdas/requirements.txt index 663bd1f6..7001ed12 100755 --- a/hammer/identification/lambdas/requirements.txt +++ b/hammer/identification/lambdas/requirements.txt @@ -1 +1,2 @@ +boto3==1.9.42 requests \ No newline at end of file From 2d76bc3cdb34a02178d3e137db6c8159c623774d Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 23 Aug 2019 15:00:41 +0530 Subject: [PATCH 160/193] Updated code changes to fix slack user msg issues. Updated code changes to fix user channel issues. --- hammer/library/slack_utility.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hammer/library/slack_utility.py b/hammer/library/slack_utility.py index a9bc179a..8b54d497 100755 --- a/hammer/library/slack_utility.py +++ b/hammer/library/slack_utility.py @@ -52,8 +52,8 @@ def post_message(self, msg, owner=None): # if owner is not set - try to find channel to send msg to based on msg body owner = owner if owner is not None else self.config.slack.find_channel(msg) - # open user channel if owner is not prefixed with # - channel = owner if owner.startswith("#") else self.open_user_channel(owner) + # get user id if owner is not prefixed with # + channel = owner if owner.startswith("#") else self.user_id(owner) if not channel: logging.debug(f"ignoring: '{msg}'") From 344c034098faa16c4232dd2ab63e0f557d8be6d4 Mon Sep 17 00:00:00 2001 From: "yevheniia.pasiechna@dowjones.com" Date: Fri, 23 Aug 2019 18:01:17 +0300 Subject: [PATCH 161/193] Calling out 0/0 vs specific non-DJ IPs --- hammer/library/aws/security_groups.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/library/aws/security_groups.py b/hammer/library/aws/security_groups.py index 50cda0b2..a02ce8ee 100755 --- a/hammer/library/aws/security_groups.py +++ b/hammer/library/aws/security_groups.py @@ -393,7 +393,7 @@ def validate_trusted_registrant(cidr): with warnings.catch_warnings(): warnings.simplefilter("ignore") try: - whois = IPWhois(ip).lookup_rdap() + whois = IPWhois(ip).lookup_rdap(asn_methods=['dns', 'whois', 'http']) except Exception: return False From 6e8962722269158d423c2cf28a8ef4b543e5e222 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Sat, 24 Aug 2019 00:12:48 +0530 Subject: [PATCH 162/193] Updated with documentation changes. Updated with documentation changes. --- docs/pages/playbook10_sqs_public_policy.md | 4 +++- docs/pages/playbook11_s3_unencryption.md | 4 +++- docs/pages/playbook12_rds_unencryption.md | 4 +++- docs/pages/playbook13_amis_public_access.md | 4 +++- docs/pages/playbook15_redshift_unencryption.md | 4 +++- docs/pages/playbook16_redshift_public_clusters.md | 5 ++++- docs/pages/playbook17_redshift_audit_logging.md | 4 +++- docs/pages/playbook18_ecs_logging.md | 4 +++- docs/pages/playbook19_ecs_privileged_access.md | 4 +++- docs/pages/playbook1_s3_public_buckets_acl.md | 5 ++++- docs/pages/playbook20_ecs_external_image_source.md | 4 +++- docs/pages/playbook21_elasticsearch_unencryption.md | 4 +++- docs/pages/playbook22_elasticsearch_public_access.md | 4 +++- docs/pages/playbook23_elasticsearch_logging.md | 4 +++- docs/pages/playbook2_insecure_services.md | 5 ++++- docs/pages/playbook3_inactive_user_keys.md | 4 +++- docs/pages/playbook4_keysrotation.md | 4 +++- docs/pages/playbook5_s3_public_buckets_policy.md | 4 +++- docs/pages/playbook6_cloudtrail.md | 2 ++ docs/pages/playbook7_ebs_unencrypted_volumes.md | 2 ++ docs/pages/playbook8_ebs_snapshots_public.md | 4 +++- docs/pages/playbook9_rds_snapshots_public.md | 4 +++- 22 files changed, 67 insertions(+), 20 deletions(-) diff --git a/docs/pages/playbook10_sqs_public_policy.md b/docs/pages/playbook10_sqs_public_policy.md index 304bf1db..85a173b7 100644 --- a/docs/pages/playbook10_sqs_public_policy.md +++ b/docs/pages/playbook10_sqs_public_policy.md @@ -88,6 +88,7 @@ To identify, report, and remediate issues of this type, you should add the follo |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`0`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-sqs-public-policy-lambda`| Sample **config.json** section: ``` @@ -96,7 +97,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-sqs-public-access", "reporting": false, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "topic_name": "hammer-describe-sqs-public-policy-lambda" }, ``` diff --git a/docs/pages/playbook11_s3_unencryption.md b/docs/pages/playbook11_s3_unencryption.md index 15b2c54b..1d168f90 100644 --- a/docs/pages/playbook11_s3_unencryption.md +++ b/docs/pages/playbook11_s3_unencryption.md @@ -83,6 +83,7 @@ To identify, report, and remediate issues of this type, you should add the follo |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`0`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-s3-encryption-lambda`| Sample **config.json** section: ``` @@ -91,7 +92,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-s3-unencrypted", "reporting": true, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "topic_name": "hammer-describe-s3-encryption-lambda" } ``` diff --git a/docs/pages/playbook12_rds_unencryption.md b/docs/pages/playbook12_rds_unencryption.md index aceb9f10..87dec997 100644 --- a/docs/pages/playbook12_rds_unencryption.md +++ b/docs/pages/playbook12_rds_unencryption.md @@ -58,13 +58,15 @@ To identify and report issues of this type, you should add the following paramet |`enabled` |Toggles issue detection for this issue |`true`| |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-rds-unencrypted` | |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-rds-encryption-lambda`| Sample **config.json** section: ``` "rds_encryption": { "enabled": true, "ddb.table_name": "hammer-rds-unencrypted", - "reporting": true + "reporting": true, + "topic_name": "hammer-describe-rds-encryption-lambda" } ``` diff --git a/docs/pages/playbook13_amis_public_access.md b/docs/pages/playbook13_amis_public_access.md index a3e6c53c..22653eb2 100644 --- a/docs/pages/playbook13_amis_public_access.md +++ b/docs/pages/playbook13_amis_public_access.md @@ -58,6 +58,7 @@ To identify and report issues of this type, you should add the following paramet |`enabled` |Toggles issue detection for this issue |`true`| |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-public-amis` | |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-ami-public-access-lambda`| Sample **config.json** section: ``` @@ -66,7 +67,8 @@ Sample **config.json** section: "ddb.table_name": "djif-hammer-public-amis", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "topic_name": "hammer-describe-ami-public-access-lambda" } ``` diff --git a/docs/pages/playbook15_redshift_unencryption.md b/docs/pages/playbook15_redshift_unencryption.md index cacd1e6b..91db0a4b 100644 --- a/docs/pages/playbook15_redshift_unencryption.md +++ b/docs/pages/playbook15_redshift_unencryption.md @@ -78,6 +78,7 @@ To identify and report issues of this type, you should add the following paramet |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`0`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-redshift-cluster-encryption-lambda`| Sample **config.json** section: @@ -87,7 +88,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-redshift-unencrypted", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "topic_name": "hammer-describe-redshift-cluster-encryption-lambda" } ``` diff --git a/docs/pages/playbook16_redshift_public_clusters.md b/docs/pages/playbook16_redshift_public_clusters.md index f6c7e4c3..19ee830a 100644 --- a/docs/pages/playbook16_redshift_public_clusters.md +++ b/docs/pages/playbook16_redshift_public_clusters.md @@ -58,6 +58,8 @@ To identify and report issues of this type, you should add the following paramet |`enabled` |Toggles issue detection for this issue |`true`| |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-redshift-public-access` | |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-redshift-public-access-lambda`| + Sample **config.json** section: ``` @@ -67,7 +69,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-redshift-public-access", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "topic_name": "hammer-describe-redshift-public-access-lambda" } ``` diff --git a/docs/pages/playbook17_redshift_audit_logging.md b/docs/pages/playbook17_redshift_audit_logging.md index d91df3bd..c1611ec5 100644 --- a/docs/pages/playbook17_redshift_audit_logging.md +++ b/docs/pages/playbook17_redshift_audit_logging.md @@ -58,6 +58,7 @@ To identify and report issues of this type, you should add the following paramet |`enabled` |Toggles issue detection for this issue |`true`| |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-redshift-logging` | |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-redshift-logging-lambda`| Sample **config.json** section: ``` @@ -67,7 +68,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-redshift-logging", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "topic_name": "hammer-describe-redshift-logging-lambda" } ``` diff --git a/docs/pages/playbook18_ecs_logging.md b/docs/pages/playbook18_ecs_logging.md index ddc94820..1340f780 100644 --- a/docs/pages/playbook18_ecs_logging.md +++ b/docs/pages/playbook18_ecs_logging.md @@ -57,6 +57,7 @@ To identify and report issues of this type, you should add the following paramet |`enabled` |Toggles issue detection for this issue |`true`| |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-ecs-logging` | |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`true`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-ecs-logging-lambda`| Sample **config.json** section: ``` @@ -66,7 +67,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-ecs-logging", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "topic_name": "hammer-describe-ecs-logging-lambda" } ``` diff --git a/docs/pages/playbook19_ecs_privileged_access.md b/docs/pages/playbook19_ecs_privileged_access.md index 35e65b6d..eca6ba36 100644 --- a/docs/pages/playbook19_ecs_privileged_access.md +++ b/docs/pages/playbook19_ecs_privileged_access.md @@ -58,6 +58,7 @@ To identify and report issues of this type, you should add the following paramet |`enabled` |Toggles issue detection for this issue |`true`| |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-ecs-privileged-access` | |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`true`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-ecs-privileged-access-lambda`| Sample **config.json** section: ``` @@ -67,7 +68,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-ecs-privileged-access", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "topic_name": "hammer-describe-ecs-privileged-access-lambda" } ``` diff --git a/docs/pages/playbook1_s3_public_buckets_acl.md b/docs/pages/playbook1_s3_public_buckets_acl.md index d3d159c8..78fe82b1 100644 --- a/docs/pages/playbook1_s3_public_buckets_acl.md +++ b/docs/pages/playbook1_s3_public_buckets_acl.md @@ -87,6 +87,8 @@ To identify, report, and remediate issues of this type, you should add the follo |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`0`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-s3-acl-lambda`| + Sample **config.json** section: ``` @@ -95,7 +97,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-s3-public-bucket-acl", "reporting": false, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "topic_name": "hammer-describe-s3-acl-lambda", } ``` diff --git a/docs/pages/playbook20_ecs_external_image_source.md b/docs/pages/playbook20_ecs_external_image_source.md index 658ed5a7..b2fe165a 100644 --- a/docs/pages/playbook20_ecs_external_image_source.md +++ b/docs/pages/playbook20_ecs_external_image_source.md @@ -58,6 +58,7 @@ To identify and report issues of this type, you should add the following paramet |`enabled` |Toggles issue detection for this issue |`true`| |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-ecs-external-image-source` | |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`true`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-ecs-external-image-source-lambda`| Sample **config.json** section: ``` @@ -66,7 +67,8 @@ Sample **config.json** section: "enabled": true, "ddb.table_name": "hammer-ecs-external-image-source", "reporting": true, - "safe_image_sources": ["amazonaws", "artifactory"] + "safe_image_sources": ["amazonaws", "artifactory"], + "topic_name": "hammer-describe-ecs-external-image-source-lambda" } ``` diff --git a/docs/pages/playbook21_elasticsearch_unencryption.md b/docs/pages/playbook21_elasticsearch_unencryption.md index 3fd5c58a..0621331c 100644 --- a/docs/pages/playbook21_elasticsearch_unencryption.md +++ b/docs/pages/playbook21_elasticsearch_unencryption.md @@ -58,13 +58,15 @@ To identify and report issues of this type, you should add the following paramet |`enabled` |Toggles issue detection for this issue |`true`| |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-es-unencrypted-domain` | |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-es-encryption-lambda`| Sample **config.json** section: ``` "es_unencrypted_domain": { "enabled": true, "ddb.table_name": "djif-hammer-es-unencrypted-domain", - "reporting": true + "reporting": true, + "topic_name": "hammer-describe-es-encryption-lambda" }, ``` diff --git a/docs/pages/playbook22_elasticsearch_public_access.md b/docs/pages/playbook22_elasticsearch_public_access.md index 8c7b2b5c..b1e3b136 100644 --- a/docs/pages/playbook22_elasticsearch_public_access.md +++ b/docs/pages/playbook22_elasticsearch_public_access.md @@ -80,6 +80,7 @@ To identify and report issues of this type, you should add the following paramet |`enabled` |Toggles issue detection for this issue |`true`| |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type| `hammer-es-public-access-domain` | |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-es-public-access-lambda`| Sample **config.json** section: ``` @@ -88,7 +89,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-es-public-access-domain", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "topic_name": "hammer-describe-es-public-access-lambda" },``` ### 4.2. The whitelist.json File diff --git a/docs/pages/playbook23_elasticsearch_logging.md b/docs/pages/playbook23_elasticsearch_logging.md index 72515ca7..c4913671 100644 --- a/docs/pages/playbook23_elasticsearch_logging.md +++ b/docs/pages/playbook23_elasticsearch_logging.md @@ -79,6 +79,7 @@ To identify and report issues of this type, you should add the following paramet |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer remediation functionality for this issue type |`false`| |`remediation_retention_period` |Toggle Dow Jones Hammer remediation retention period details for this issue type |`21`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-es-logging-lambda`| Sample **config.json** section: ``` @@ -87,7 +88,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-es-domain-logging", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "topic_name": "hammer-describe-es-logging-lambda" } ``` diff --git a/docs/pages/playbook2_insecure_services.md b/docs/pages/playbook2_insecure_services.md index d91e6a76..9da5579a 100644 --- a/docs/pages/playbook2_insecure_services.md +++ b/docs/pages/playbook2_insecure_services.md @@ -89,6 +89,8 @@ To identify, report, and remediate issues of this type, you should add the follo |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`21`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-security-groups-lambda`| + Sample **secgrp_unrestricted_access** section of the **config.json** file: @@ -99,7 +101,8 @@ Sample **secgrp_unrestricted_access** section of the **config.json** file: "restricted_ports": [21, 22, 23, 3389, 1433, 1521, 3306, 5432, 27017], "reporting": false, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "topic_name": "hammer-describe-security-groups-lambda" } ``` diff --git a/docs/pages/playbook3_inactive_user_keys.md b/docs/pages/playbook3_inactive_user_keys.md index f19dfd56..52d06791 100644 --- a/docs/pages/playbook3_inactive_user_keys.md +++ b/docs/pages/playbook3_inactive_user_keys.md @@ -88,6 +88,7 @@ To identify, report, and remediate issues of this type, you should add the follo |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`0`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-iam-user-inactive-keys-lambda`| Sample **config.json** section: ``` @@ -97,7 +98,8 @@ Sample **config.json** section: "inactive_criteria_days": "10", "reporting": false, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "topic_name": "hammer-describe-iam-user-inactive-keys-lambda" } ``` diff --git a/docs/pages/playbook4_keysrotation.md b/docs/pages/playbook4_keysrotation.md index 50a92060..0fa89b16 100644 --- a/docs/pages/playbook4_keysrotation.md +++ b/docs/pages/playbook4_keysrotation.md @@ -84,6 +84,7 @@ To identify, report, and remediate issues of this type, you should add the follo |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`0`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-iam-user-keys-rotation-lambda`| Sample **config.json** section: @@ -94,7 +95,8 @@ Sample **config.json** section: "rotation_criteria_days": 10, "reporting": false, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "topic_name": "hammer-describe-iam-user-keys-rotation-lambda" } ``` diff --git a/docs/pages/playbook5_s3_public_buckets_policy.md b/docs/pages/playbook5_s3_public_buckets_policy.md index 8fcb523d..49f6d663 100644 --- a/docs/pages/playbook5_s3_public_buckets_policy.md +++ b/docs/pages/playbook5_s3_public_buckets_policy.md @@ -92,6 +92,7 @@ To identify, report, and remediate issues of this type, you should add the follo |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`7`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-s3-policy-lambda`| Sample **config.json** section: ``` @@ -100,7 +101,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-s3-public-bucket-policy", "reporting": false, "remediation": false - "remediation_retention_period": 7 + "remediation_retention_period": 7, + "topic_name": "hammer-describe-s3-policy-lambda" } ``` diff --git a/docs/pages/playbook6_cloudtrail.md b/docs/pages/playbook6_cloudtrail.md index facc0077..e991c4f8 100644 --- a/docs/pages/playbook6_cloudtrail.md +++ b/docs/pages/playbook6_cloudtrail.md @@ -60,6 +60,7 @@ To identify, report, and remediate issues of this type, you should add the follo |`enabled` |Toggles issue detection for this issue |`true` | |`ddb.table_name` |Name of the DynamoDB table where Dow Jones Hammer will store the identified issues of this type|`hammer-cloudtrails`| |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-cloudtrails-lambda`| Sample **config.json** section: ``` @@ -67,6 +68,7 @@ Sample **config.json** section: "enabled": "true", "ddb.table_name": "hammer-cloudtrails", "reporting": false, + "topic_name": "hammer-describe-cloudtrails-lambda" } ``` diff --git a/docs/pages/playbook7_ebs_unencrypted_volumes.md b/docs/pages/playbook7_ebs_unencrypted_volumes.md index 4fd282c6..1a8e145a 100644 --- a/docs/pages/playbook7_ebs_unencrypted_volumes.md +++ b/docs/pages/playbook7_ebs_unencrypted_volumes.md @@ -64,6 +64,7 @@ To identify, report, and remediate issues of this type, you should add the follo |`ddb.table_name`|The name of the DynamoDB table to which Dow Jones Hammer would record detected issues of this type|`hammer-ebs-volumes-unencrypted` | |`accounts` |*Optional* comma-separated list of accounts to limit check for |`aws.accounts` from `config.json`| |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue |`false` | +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-ebs-unencrypted-volumes-lambda`| Sample **config.json** section: ``` @@ -72,6 +73,7 @@ Sample **config.json** section: "ddb.table_name": "hammer-ebs-volumes-unencrypted", "accounts": ["210987654321"], "reporting": false, + "topic_name": "hammer-describe-ebs-unencrypted-volumes-lambda" } ``` diff --git a/docs/pages/playbook8_ebs_snapshots_public.md b/docs/pages/playbook8_ebs_snapshots_public.md index dec2f0e6..e4327d77 100644 --- a/docs/pages/playbook8_ebs_snapshots_public.md +++ b/docs/pages/playbook8_ebs_snapshots_public.md @@ -87,6 +87,7 @@ To identify, report, and remediate issues of this type, you should add the follo |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`0`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-ebs-public-snapshots-lambda`| Sample **config.json** section: ``` @@ -95,7 +96,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-ebs-snapshots-public", "reporting": false, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "topic_name": "hammer-describe-ebs-public-snapshots-lambda" } ``` diff --git a/docs/pages/playbook9_rds_snapshots_public.md b/docs/pages/playbook9_rds_snapshots_public.md index 409383ed..41fcb130 100644 --- a/docs/pages/playbook9_rds_snapshots_public.md +++ b/docs/pages/playbook9_rds_snapshots_public.md @@ -87,6 +87,7 @@ To identify, report, and remediate issues of this type, you should add the follo |`reporting` |Toggle Dow Jones Hammer reporting functionality for this issue type |`false`| |`remediation` |Toggle Dow Jones Hammer automatic remediation functionality for this issue type |`false`| |`remediation_retention_period`|The amount of days to pass between issue detection and its automatic remediation. The value `0` denotes that Dow Jones Hammer will remediate the issue at the next remediation job run.|`0`| +|`topic_name`|Name of the SNS topic to trigger Lambda function from API scan.|`hammer-describe-rds-public-snapshots-lambda`| Sample **config.json** section: ``` @@ -95,7 +96,8 @@ Sample **config.json** section: "ddb.table_name": "hammer-rds-public-snapshots", "reporting": false, "remediation": false, - "remediation_retention_period": 0 + "remediation_retention_period": 0, + "topic_name": "hammer-describe-rds-public-snapshots-lambda" } ``` From 5d493125b9e74cc9ec659de7c6e1e9126b945faa Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 27 Aug 2019 13:38:53 +0530 Subject: [PATCH 163/193] Updated with ECS notifications fix. Updated with ECS notifications fix. --- hammer/library/aws/ecs.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 2cddc8ba..f948a07d 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -200,6 +200,5 @@ def check(self, task_definitions=None): else: logging.exception(f"Failed to describe task definitions in {self.account} " f"for task {task_definition_name}") - continue return True From a29166c642db0a87b3ec4aa6296a89d65fb837cd Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 27 Aug 2019 14:15:32 +0530 Subject: [PATCH 164/193] Revert "Updated with ECS notifications fix." This reverts commit 5d493125b9e74cc9ec659de7c6e1e9126b945faa. --- hammer/library/aws/ecs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index f948a07d..2cddc8ba 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -200,5 +200,6 @@ def check(self, task_definitions=None): else: logging.exception(f"Failed to describe task definitions in {self.account} " f"for task {task_definition_name}") + continue return True From e4ca3942161817782c6143a5e0c95e2f62936e09 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 27 Aug 2019 18:45:22 +0530 Subject: [PATCH 165/193] Updated with ECS error handling. Updated with ECS error handling. --- hammer/library/aws/ecs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/library/aws/ecs.py b/hammer/library/aws/ecs.py index 2cddc8ba..f22e3a53 100644 --- a/hammer/library/aws/ecs.py +++ b/hammer/library/aws/ecs.py @@ -198,7 +198,7 @@ def check(self, task_definitions=None): logging.error(f"Access denied in {self.account} " f"(ecs:{err.operation_name})") else: - logging.exception(f"Failed to describe task definitions in {self.account} " + logging.error(f"Failed to describe task definitions in {self.account} " f"for task {task_definition_name}") continue From e7dcb3f9a65ab7dcc178f07727e4a66dd7c855fe Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 27 Aug 2019 21:05:45 +0530 Subject: [PATCH 166/193] Updated with ECS error notification changes. Updated with ECS error notification changes. --- deployment/cf-templates/ddb.json | 2 +- ...create_ecs_external_image_source_issue_tickets.py | 12 ------------ .../reporting/create_ecs_logging_issue_tickets.py | 12 ------------ .../create_ecs_privileged_access_issue_tickets.py | 12 ------------ ...create_elasticsearch_unencrypted_issue_tickets.py | 12 ------------ 5 files changed, 1 insertion(+), 49 deletions(-) diff --git a/deployment/cf-templates/ddb.json b/deployment/cf-templates/ddb.json index 82a39012..ba3e13c6 100755 --- a/deployment/cf-templates/ddb.json +++ b/deployment/cf-templates/ddb.json @@ -24,7 +24,7 @@ } ], "ProvisionedThroughput": { - "ReadCapacityUnits": "35", + "ReadCapacityUnits": "50", "WriteCapacityUnits": "2" }, "SSESpecification": { diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py index 67cace2e..63b56de0 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -66,18 +66,6 @@ def create_tickets_ecs_external_images(self): bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) - # issue.status != IssueStatus.Closed (should be IssueStatus.Open) - elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_name}") - slack.report_issue( - msg=f"ECS external image source '{task_definition_name}' issue is changed " - f"in '{account_name} / {account_id}' account, '{region}' region" - f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{task_definition_name}'") # issue has not been reported yet diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index b3ef1ab6..afbaadb3 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -66,18 +66,6 @@ def create_tickets_ecs_logging(self): bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) - # issue.status != IssueStatus.Closed (should be IssueStatus.Open) - elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_name}") - slack.report_issue( - msg=f"ECS logging '{task_definition_name}' issue is changed " - f"in '{account_name} / {account_id}' account, '{region}' region" - f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{task_definition_name}'") # issue has not been reported yet diff --git a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py index 13f83504..533ef7df 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py @@ -68,18 +68,6 @@ def create_tickets_ecs_privileged(self): bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) - # issue.status != IssueStatus.Closed (should be IssueStatus.Open) - elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {task_definition_name}") - slack.report_issue( - msg=f"ECS privileged access disabled '{task_definition_name}' issue is changed " - f"in '{account_name} / {account_id}' account, '{region}' region" - f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{task_definition_name}'") # issue has not been reported yet diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index ab818dc3..52dacd89 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -67,18 +67,6 @@ def create_tickets_elasticsearch_unencryption(self): bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) - # issue.status != IssueStatus.Closed (should be IssueStatus.Open) - elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {domain_name}") - slack.report_issue( - msg=f"Elasticsearch unencrypted domain '{domain_name}' issue is changed " - f"in '{account_name} / {account_id}' account, '{region}' region" - f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{domain_name}'") # issue has not been reported yet From 3a82e18022bdd23522c82eb10928a1d627e98e36 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 28 Aug 2019 12:34:12 +0530 Subject: [PATCH 167/193] Updated with Elasticsearch Error handling changes. Updated with Elasticsearch Error handling changes. --- hammer/library/aws/elasticsearch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index e8dea752..c064f51d 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -315,7 +315,7 @@ def check(self, ids=None): logging.error(f"Access denied in {self.account} " f"(ec2:{err.operation_name})") else: - logging.exception(f"Failed to describe elasticsearch domains in {self.account}") + logging.error(f"Failed to describe elasticsearch domains in {self.account}") return False for domain_detail in domain_details: From 36020d5d217b7512f82e0697e4889a9538a54bb9 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 28 Aug 2019 12:36:08 +0530 Subject: [PATCH 168/193] Updated with Error handling changes. Updated with Error handling changes. --- hammer/library/aws/elasticsearch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/library/aws/elasticsearch.py b/hammer/library/aws/elasticsearch.py index c064f51d..577eafe7 100644 --- a/hammer/library/aws/elasticsearch.py +++ b/hammer/library/aws/elasticsearch.py @@ -311,7 +311,7 @@ def check(self, ids=None): domain_details = es_client.describe_elasticsearch_domains(DomainNames=ids)["DomainStatusList"] except ClientError as err: - if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]: + if err.response['Error']['Code'] in ["AccessDenied", "AccessDeniedException", "UnauthorizedOperation"]: logging.error(f"Access denied in {self.account} " f"(ec2:{err.operation_name})") else: From 19a0bff0fff22656d130bbf5183843a7d9bb5d90 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 29 Aug 2019 11:53:14 +0530 Subject: [PATCH 169/193] Removed code for Error notification if issue not changed Removed code for Error notification if issue not changed --- ...ate_elasticsearch_domain_logging_issue_tickets.py | 12 ------------ ...eate_elasticsearch_public_access_issue_tickets.py | 12 ------------ 2 files changed, 24 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py index 4057a6f2..8649ca6b 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py @@ -67,18 +67,6 @@ def create_tickets_elasticsearch_domain_logging(self): bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) - # issue.status != IssueStatus.Closed (should be IssueStatus.Open) - elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {domain_name}") - slack.report_issue( - msg=f"Elasticsearch domain logging '{domain_name}' issue is changed " - f"in '{account_name} / {account_id}' account, '{region}' region" - f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{domain_name}'") # issue has not been reported yet diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py index ae29eba5..72c1cfc3 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -69,18 +69,6 @@ def create_tickets_elasticsearch_public_access(self): bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) - # issue.status != IssueStatus.Closed (should be IssueStatus.Open) - elif issue.timestamps.updated > issue.timestamps.reported: - logging.error(f"TODO: update jira ticket with new data: {table_name}, {account_id}, {domain_name}") - slack.report_issue( - msg=f"Elasticsearch publicly accessible domain '{domain_name}' issue is changed " - f"in '{account_name} / {account_id}' account, '{region}' region" - f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{domain_name}'") # issue has not been reported yet From 89141458643faaa8c2ad3717752a3db425d5e2c6 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 29 Aug 2019 12:54:25 +0530 Subject: [PATCH 170/193] Fixes for Ec2 userdata issues. Userdata is failing with "public key for ius-release.rpm is not installed" error. --- deployment/cf-templates/reporting-remediation.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/cf-templates/reporting-remediation.json b/deployment/cf-templates/reporting-remediation.json index 731b8b02..d3bb9aed 100755 --- a/deployment/cf-templates/reporting-remediation.json +++ b/deployment/cf-templates/reporting-remediation.json @@ -170,7 +170,7 @@ "yum -y install openssl-devel\n", "# remove for following install to not fail\n", "yum -y remove ius-release\n", - "yum -y install https://centos7.iuscommunity.org/ius-release.rpm\n", + "yum -y install https://centos7.iuscommunity.org/ius-release.rpm --nogpgcheck\n", "yum -y install python36u python36u-pip python36u-devel\n", "pip3.6 install awscli\n", "rm -rf /hammer-correlation-engine\n", From 62baffe578a7c913b9ce828935911383844b8d2d Mon Sep 17 00:00:00 2001 From: "yevheniia.pasiechna@dowjones.com" Date: Mon, 2 Sep 2019 14:39:27 +0300 Subject: [PATCH 171/193] Calling out 0/0 vs specific non-DJ IPs --- hammer/library/aws/security_groups.py | 37 +++++-------------- hammer/library/utility.py | 30 ++++++++++++++- .../create_security_groups_tickets.py | 25 ++----------- 3 files changed, 41 insertions(+), 51 deletions(-) diff --git a/hammer/library/aws/security_groups.py b/hammer/library/aws/security_groups.py index 64ed858e..6d59a9ec 100755 --- a/hammer/library/aws/security_groups.py +++ b/hammer/library/aws/security_groups.py @@ -1,16 +1,15 @@ import json import logging import ipaddress -import warnings from enum import Enum from datetime import datetime, timezone from botocore.exceptions import ClientError -from ipwhois import IPWhois from library.utility import jsonDumps from library.aws.s3 import S3Operations from library.aws.utility import convert_tags from library.config import Config +from library.utility import get_registrant class RestrictionStatus(Enum): @@ -69,7 +68,7 @@ def find_source_s3(account, if objects is None: logging.error(f"Failed to find '{group_id}' rules backup in {account}") return - backup_objects = [ obj["Key"] for obj in objects if obj.get("Key", "").startswith(f"{prefix}{group_id}_") ] + backup_objects = [obj["Key"] for obj in objects if obj.get("Key", "").startswith(f"{prefix}{group_id}_")] # return most recent backup recent_backup = max(backup_objects) source = json.loads(S3Operations.get_object(s3_client, bucket, recent_backup)) @@ -103,8 +102,8 @@ def restore_s3(cls, from_port = ingress.get("FromPort", None) to_port = ingress.get("ToPort", None) ip_protocol = ingress["IpProtocol"] - cidrs = [ ipv6_range["CidrIpv6"] for ipv6_range in ingress.get("Ipv6Ranges", []) ] - cidrs += [ ip_range["CidrIp"] for ip_range in ingress.get("IpRanges", []) ] + cidrs = [ipv6_range["CidrIpv6"] for ipv6_range in ingress.get("Ipv6Ranges", [])] + cidrs += [ip_range["CidrIp"] for ip_range in ingress.get("IpRanges", [])] for cidr in cidrs: cls.add_inbound_rule(ec2_client, group_id, ip_protocol, from_port, to_port, cidr) @@ -121,9 +120,9 @@ def ip_permissions(ip_protocol, from_port, to_port, cidr): :return: dict with `IpPermissions` element """ - perms = { 'IpProtocol': ip_protocol } + perms = {'IpProtocol': ip_protocol} if from_port is not None and \ - to_port is not None: + to_port is not None: perms['FromPort'] = from_port perms['ToPort'] = to_port ipv = ipaddress.ip_network(cidr).version @@ -382,31 +381,15 @@ def validate_trusted_registrant(cidr): :param cidr: :return: """ - config = Config() - trusted_registrants = config.sg.trusted_registrants + trusted_registrants = Config().config.sg.trusted_registrants if not trusted_registrants: return False - ip = cidr.split("/")[0] + registrant = get_registrant(cidr) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - try: - whois = IPWhois(ip).lookup_rdap(asn_methods=['dns', 'whois', 'http']) - except Exception: - return False - - registrant = "" - - for obj in whois.get('objects', {}).values(): - if obj.get('contact') is None: - continue - if 'registrant' in obj.get('roles', []): - registrant = obj['contact'].get('name') - break - - if registrant and registrant in trusted_registrants: + if registrant and (registrant['name'] in trusted_registrants + or registrant['title'] in trusted_registrants): return True return False diff --git a/hammer/library/utility.py b/hammer/library/utility.py index 77e5689e..d136d4e3 100755 --- a/hammer/library/utility.py +++ b/hammer/library/utility.py @@ -1,5 +1,6 @@ import os import json +import warnings import xml import time import logging @@ -8,9 +9,10 @@ import tempfile import fcntl - from datetime import datetime from decimal import Decimal +from functools import lru_cache +from ipwhois import IPWhois def jsonEncoder(obj): @@ -120,6 +122,30 @@ def confirm(question, default=None): print("Please respond with 'yes' or 'no'") +@lru_cache(maxsize=128) +def get_registrant(cidr): + ip = cidr.split("/")[0] + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + try: + whois = IPWhois(ip).lookup_rdap(asn_methods=['dns', 'whois', 'http']) + except Exception: + return "" + + registrant = {} + + for title, obj in whois.get('objects', {}).items(): + if obj.get('contact') is None: + continue + if 'registrant' in obj.get('roles', []): + registrant['name'] = obj['contact'].get('name') + registrant['title'] = title + break + + return registrant + + class SingletonInstanceException(BaseException): pass @@ -138,4 +164,4 @@ def __init__(self, instance_id): try: fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: - raise SingletonInstanceException() \ No newline at end of file + raise SingletonInstanceException() diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 5d4578e3..3fd5af1a 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -3,10 +3,7 @@ """ import sys import logging -import warnings -from functools import lru_cache -from ipwhois import IPWhois from collections import Counter from library.logger import set_logging, add_cw_logging from library.config import Config @@ -24,6 +21,7 @@ from library.aws.redshift import RedshiftClusterOperations from library.aws.elasticsearch import ElasticSearchOperations from library.utility import SingletonInstance, SingletonInstanceException +from library.utility import get_registrant class CreateSecurityGroupsTickets(object): @@ -33,26 +31,9 @@ def __init__(self, config): self.config = config @staticmethod - @lru_cache(maxsize=128) def get_registrant(cidr): - ip = cidr.split("/")[0] - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - try: - whois = IPWhois(ip).lookup_rdap() - except Exception: - return "" - - registrants = [] - for title, obj in whois.get('objects', {}).items(): - if obj.get('contact') is None: - continue - if 'registrant' in obj.get('roles', []): - registrants.append(f"{obj['contact'].get('name')} ({title})") - break - - return ', '.join(registrants) + registrant = get_registrant(cidr) + return f"{registrant['name']} ({registrant['title']})" if registrant else "" def build_open_ports_table_jira(self, perms): open_partly = any([perm['status'] == 'open_partly' for perm in perms]) From c559652aa879f222f5f888a6fe65862461bd322b Mon Sep 17 00:00:00 2001 From: "yevheniia.pasiechna@dowjones.com" Date: Thu, 5 Sep 2019 15:05:51 +0300 Subject: [PATCH 172/193] Config name changes to label names --- deployment/configs/config.json | 45 +++++++++++-------- hammer/library/jiraoperations.py | 33 +++++++------- hammer/library/slack_utility.py | 5 +-- .../reporting/create_cloudtrail_tickets.py | 2 +- ...reate_ebs_public_snapshot_issue_tickets.py | 2 +- .../create_ebs_volume_issue_tickets.py | 2 +- ...ecs_external_image_source_issue_tickets.py | 4 +- .../create_ecs_logging_issue_tickets.py | 4 +- ...ate_ecs_privileged_access_issue_tickets.py | 4 +- ...sticsearch_domain_logging_issue_tickets.py | 4 +- ...asticsearch_public_access_issue_tickets.py | 4 +- ...elasticsearch_unencrypted_issue_tickets.py | 4 +- .../create_iam_key_inactive_tickets.py | 2 +- .../create_iam_key_rotation_tickets.py | 2 +- .../create_public_ami_issue_tickets.py | 2 +- ...reate_rds_public_snapshot_issue_tickets.py | 2 +- ..._rds_unencrypted_instance_issue_tickets.py | 2 +- .../create_redshift_logging_issue_tickets.py | 4 +- ...te_redshift_public_access_issue_tickets.py | 4 +- ...shift_unencrypted_cluster_issue_tickets.py | 4 +- ...ate_s3_unencrypted_bucket_issue_tickets.py | 2 +- .../create_s3bucket_acl_issue_tickets.py | 2 +- .../create_s3bucket_policy_issue_tickets.py | 2 +- .../create_security_groups_tickets.py | 2 +- .../create_sqs_policy_issue_tickets.py | 2 +- 25 files changed, 75 insertions(+), 70 deletions(-) diff --git a/deployment/configs/config.json b/deployment/configs/config.json index d0df3431..cf72e281 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -55,7 +55,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, - "labels": ["s3-public-acl"], + "labels": ["publics3"], "slack": true }, "secgrp_unrestricted_access": { @@ -93,7 +93,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, - "labels": ["iam-key-inactive"], + "labels": ["inactive-iam-keys"], "slack": true }, "user_keysrotation": { @@ -116,7 +116,7 @@ "remediation": false, "remediation_retention_period": 7, "jira": true, - "labels": ["s3-public-policy"], + "labels": ["publics3"], "slack": true }, "cloudtrails": { @@ -125,7 +125,7 @@ "topic_name": "hammer-describe-cloudtrails-lambda", "reporting": false, "jira": false, - "labels": ["cloudtrail-issue"], + "labels": ["cloud-trail-disabled"], "slack": false }, "ebs_unencrypted_volume": { @@ -135,7 +135,7 @@ "accounts": ["123456789012", "210987654321"], "reporting": false, "jira": false, - "labels": ["ebs-unencrypted-volume"], + "labels": ["unencrypted-ebs-volumes"], "slack": false }, "ebs_public_snapshot": { @@ -146,7 +146,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, - "labels": ["ebs-public-snapshot"], + "labels": ["public_snapshots"], "slack": true }, "rds_public_snapshot": { @@ -157,7 +157,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, - "labels": ["rds-public-snapshot"], + "labels": ["rds-public-snapshots"], "slack": true }, "ec2_public_ami": { @@ -179,7 +179,7 @@ "remediation": false, "remediation_retention_period": 0, "jira": true, - "labels": ["sqs-public-policy"], + "labels": ["publicsqs"], "slack": true }, "s3_encryption": { @@ -199,14 +199,15 @@ "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true, "jira": true, - "labels": ["rds-unencrypted"], + "labels": ["rds-unencrypted-instances"], "slack": true }, "redshift_logging": { "enabled": true, "ddb.table_name": "hammer-redshift-logging", "topic_name": "hammer-describe-redshift-logging-lambda", - "reporting": true + "reporting": true, + "labels": ["redshift-logging"] }, "redshift_public_access": { "enabled": true, @@ -214,7 +215,8 @@ "topic_name": "hammer-describe-redshift-public-access-lambda", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "labels": ["redshift-public-access"] }, "redshift_encryption": { "enabled": true, @@ -222,26 +224,30 @@ "topic_name": "hammer-describe-redshift-cluster-encryption-lambda", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "labels": ["redshift-unencrypted-clusters"] }, "ecs_privileged_access": { "enabled": true, "ddb.table_name": "hammer-ecs-privileged-access", "topic_name": "hammer-describe-ecs-privileged-access-lambda", - "reporting": true + "reporting": true, + "labels": ["ecs-privileged-access"] }, "ecs_logging": { "enabled": true, "ddb.table_name": "hammer-ecs-logging", "topic_name": "hammer-describe-ecs-logging-lambda", - "reporting": true + "reporting": true, + "labels": ["ecs-logging"] }, "ecs_external_image_source": { "enabled": true, "topic_name": "hammer-describe-ecs-external-image-source-lambda", "ddb.table_name": "hammer-ecs-external-image-source", "reporting": true, - "safe_image_sources": ["amazonaws", "artifactory"] + "safe_image_sources": ["amazonaws", "artifactory"], + "labels": ["ecs-external-image"] }, "es_domain_logging": { "enabled": true, @@ -249,13 +255,15 @@ "topic_name": "hammer-describe-es-logging-lambda", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "labels": ["es-domain-logging"], }, "es_unencrypted_domain": { "enabled": true, "ddb.table_name": "hammer-es-unencrypted-domain", "topic_name": "hammer-describe-es-encryption-lambda", - "reporting": true + "reporting": true, + "labels": ["unencrypted-elasticsearch-domains"], }, "es_public_access_domain": { "enabled": true, @@ -263,6 +271,7 @@ "topic_name": "hammer-describe-es-public-access-lambda", "reporting": true, "remediation": false, - "remediation_retention_period": 21 + "remediation_retention_period": 21, + "labels": ["public-elasticsearch-domains"], } } diff --git a/hammer/library/jiraoperations.py b/hammer/library/jiraoperations.py index c40dcf3d..b7e4a901 100755 --- a/hammer/library/jiraoperations.py +++ b/hammer/library/jiraoperations.py @@ -2,17 +2,16 @@ import logging import urllib3 - from collections import namedtuple from jira import JIRA from jira import JIRAError from library.utility import empty_converter - NewIssue = namedtuple('NewIssue', [ 'ticket_id', 'ticket_assignee_id' - ]) +]) + class JiraLabels(object): """ Base class for JIRA tickets labeling """ @@ -20,23 +19,33 @@ class JiraLabels(object): 'cloudtrails': ['cloudtrail-issue'], 'ebsSnapshot': ['ebs-public-snapshot'], 'ebsVolume': ['ebs-unencrypted-volume'], + 'ecsExternalImageSource': ['ecs-external-image'], + 'ecsLogging': ['ecs-logging'], + 'ecsPrivilegedAccess': ['ecs-privileged-access'], + 'esDomainLogging': ['es-domain-logging'], + 'esPublicAccessDomain': ['es-public-access-domain'], + 'esUnencryptedDomain': ['es-unencrypted-domain'], 'iamUserInactiveKeys': ['iam-key-inactive'], 'iamUserKeysRotation': ['iam-key-rotation'], 'publicAMIs': ['public-ami'], 'rdsSnapshot': ['rds-public-snapshot'], 'rdsEncrypt': ['rds-unencrypted'], + 'redshiftLogging': ['redshift-logging'], + 'redshiftPublicAccess': ['redshift-public-access'], + 'redshiftUnencrypted': ['redshift-encryption'], 's3Encrypt': ['s3-unencrypted'], 's3acl': ['s3-public-acl'], 's3policy': ['s3-public-policy'], 'sg': ['insecure-services'], 'sqspolicy': ['sqs-public-policy'] } + def __init__(self, config, module=''): self.config = config self.module = module self.module_jira = getattr(config, module) if hasattr(config, module) else False self.module_jira_labels = self.module_jira.labels if hasattr(self.module_jira, 'labels') else False - + @property def module_labels(self): if self.module_jira_labels: @@ -50,17 +59,9 @@ class JiraReporting(object): def __init__(self, config, module=''): self.config = config self.jira = JiraOperations(self.config, module=module) - self.module_jira_enabled = getattr(config, module).jira if hasattr(hasattr(config, module), 'jira') else True self.jira_labels = JiraLabels(config, module) self.module_jira_labels = self.jira_labels.module_labels - def _jira_enabled(func): - def decorated(self, *args, **kwargs): - if self.config.jira.enabled and self.module_jira_enabled: - return func(self, *args, **kwargs) - return decorated - - @_jira_enabled def add_issue(self, issue_summary, issue_description, priority, @@ -111,23 +112,19 @@ def add_issue(self, return NewIssue(ticket_id=ticket_id, ticket_assignee_id=ticket_assignee_id) - @_jira_enabled def close_issue(self, ticket_id, comment): self.jira.add_comment(ticket_id, comment) self.jira.close_issue(ticket_id) logging.debug(f"Closed issue ({self.jira.ticket_url(ticket_id)})") - @_jira_enabled def update_issue(self, ticket_id, comment): # TODO: reopen ticket if closed self.jira.add_comment(ticket_id, comment) logging.debug(f"Updated issue {self.jira.ticket_url(ticket_id)}") - @_jira_enabled def add_attachment(self, ticket_id, filename, text): return self.jira.add_attachment(ticket_id, filename, text) - @_jira_enabled def remediate_issue(self, ticket_id, comment, reassign): if reassign: self.jira.assign_user(ticket_id, self.jira.current_user) @@ -139,6 +136,7 @@ def ticket_url(self, ticket_id): def add_label(self, ticket_id, label): self.jira.add_label(ticket_id, label) + class JiraOperations(object): """ Base class for interaction with JIRA """ def __init__(self, config, module=''): @@ -150,9 +148,8 @@ def __init__(self, config, module=''): self.server = self.config.jira.server # JIRA established session self.session = None - self.module_jira_enabled = getattr(config, module).jira if hasattr(hasattr(config, module), 'jira') else True - if self.config.jira.enabled and self.module_jira_enabled: + if self.config.jira.enabled: self.login_oauth() else: logging.debug("JIRA integration is disabled") diff --git a/hammer/library/slack_utility.py b/hammer/library/slack_utility.py index beb11c51..a9bc179a 100755 --- a/hammer/library/slack_utility.py +++ b/hammer/library/slack_utility.py @@ -9,11 +9,10 @@ class SlackNotification(object): - def __init__(self, config=None, module=''): + def __init__(self, config=None): self.config = Config() if config is None else config self.sc = SlackClient(self.config.slack.api_token) self.slackUser = "hammer" - self.module_slack_enabled = getattr(config, module).slack if hasattr(hasattr(config, module), 'slack') else True @property @lru_cache(maxsize=1) @@ -48,7 +47,7 @@ def user_id(self, user): return self.users.get(user.lower(), None) def post_message(self, msg, owner=None): - if not self.config.slack.enabled or not self.module_slack_enabled: + if not self.config.slack.enabled: return # if owner is not set - try to find channel to send msg to based on msg body diff --git a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py index 6a912efe..3c6e8a71 100755 --- a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py +++ b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py @@ -47,7 +47,7 @@ def create_tickets_cloud_trail_logging(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='cloudtrails') - slack = SlackNotification(self.config, module='cloudtrails') + slack = SlackNotification(self.config) for account_id, account_name in self.config.cloudtrails.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py index ab2091ce..f0da2d20 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py @@ -27,7 +27,7 @@ def create_tickets_ebs_public_snapshots(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='ebsSnapshot') - slack = SlackNotification(self.config, module='ebsSnapshot') + slack = SlackNotification(self.config) for account_id, account_name in self.config.ebsSnapshot.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py index 51f94edc..7a472782 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py @@ -75,7 +75,7 @@ def create_tickets_ebsvolumes(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='ebsVolume') - slack = SlackNotification(self.config, module='ebsVolume') + slack = SlackNotification(self.config) for account_id, account_name in self.config.ebsVolume.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py index 67cace2e..b737b41f 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -26,7 +26,7 @@ def create_tickets_ecs_external_images(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='ecsExternalImageSource') slack = SlackNotification(self.config) for account_id, account_name in self.config.ecs_external_image_source.accounts.items(): @@ -123,7 +123,7 @@ def create_tickets_ecs_external_images(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["ecs-external-image"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index b3ef1ab6..7c82c829 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -26,7 +26,7 @@ def create_tickets_ecs_logging(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='ecsLogging') slack = SlackNotification(self.config) for account_id, account_name in self.config.ecs_logging.accounts.items(): @@ -122,7 +122,7 @@ def create_tickets_ecs_logging(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["ecs-logging"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py index 13f83504..a9f47efd 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py @@ -26,7 +26,7 @@ def create_tickets_ecs_privileged(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='ecsPrivilegedAccess') slack = SlackNotification(self.config) for account_id, account_name in self.config.ecs_privileged_access.accounts.items(): @@ -125,7 +125,7 @@ def create_tickets_ecs_privileged(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["ecs-privileged-access"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py index 4057a6f2..5a9d262e 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py @@ -26,7 +26,7 @@ def create_tickets_elasticsearch_domain_logging(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='esDomainLogging') slack = SlackNotification(self.config) for account_id, account_name in self.config.esLogging.accounts.items(): @@ -117,7 +117,7 @@ def create_tickets_elasticsearch_domain_logging(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["es-domain-logging"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py index ae29eba5..5bac7137 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -29,7 +29,7 @@ def create_tickets_elasticsearch_public_access(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='esPublicAccessDomain') slack = SlackNotification(self.config) for account_id, account_name in self.config.esPublicAccess.accounts.items(): @@ -119,7 +119,7 @@ def create_tickets_elasticsearch_public_access(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["public-elasticsearch-domains"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index ab818dc3..3154d309 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -26,7 +26,7 @@ def create_tickets_elasticsearch_unencryption(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='esUnencryptedDomain') slack = SlackNotification(self.config) for account_id, account_name in self.config.esEncrypt.accounts.items(): @@ -135,7 +135,7 @@ def create_tickets_elasticsearch_unencryption(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["unencrypted-elasticsearch-domains"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py index f5cb7d73..7d2d1f8a 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py @@ -28,7 +28,7 @@ def create_jira_ticket(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='iamUserInactiveKeys') - slack = SlackNotification(self.config, module='iamUserInactiveKeys') + slack = SlackNotification(self.config) for account_id, account_name in self.config.iamUserInactiveKeys.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py index 62d69966..440bff4a 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py @@ -28,7 +28,7 @@ def create_jira_ticket(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='iamUserKeysRotation') - slack = SlackNotification(self.config, module='iamUserKeysRotation') + slack = SlackNotification(self.config) for account_id, account_name in self.config.iamUserKeysRotation.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py index 15ff5f87..cf3cf43e 100644 --- a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py @@ -27,7 +27,7 @@ def create_tickets_public_ami(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='publicAMIs') - slack = SlackNotification(self.config, module='publicAMIs') + slack = SlackNotification(self.config) for account_id, account_name in self.config.publicAMIs.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py index 67182bba..8813b457 100755 --- a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py @@ -27,7 +27,7 @@ def create_tickets_rds_public_snapshots(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='rdsSnapshot') - slack = SlackNotification(self.config, module='rdsSnapshot') + slack = SlackNotification(self.config) for account_id, account_name in self.config.rdsSnapshot.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py index 9e760419..425be662 100644 --- a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py @@ -27,7 +27,7 @@ def create_tickets_rds_unencrypted_instances(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='rdsEncrypt') - slack = SlackNotification(self.config, module='rdsEncrypt') + slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py index cb412c6d..d662115e 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py @@ -26,7 +26,7 @@ def create_tickets_redshift_logging(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='redshiftLogging') slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): @@ -108,7 +108,7 @@ def create_tickets_redshift_logging(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["redshift-logging"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py index d7875154..aaaabba6 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py @@ -26,7 +26,7 @@ def create_tickets_redshift_public_access(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='redshiftPublicAccess') slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): @@ -100,7 +100,7 @@ def create_tickets_redshift_public_access(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["redshift-public-access"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py index 61ecd3be..38d4ae1a 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -26,7 +26,7 @@ def create_tickets_redshift_unencrypted_cluster(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='redshiftUnencrypted') slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): @@ -101,7 +101,7 @@ def create_tickets_redshift_unencrypted_cluster(self): try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, - priority="Major", labels=["redshift-unencrypted-clusters"], + priority="Major", owner=owner, account_id=account_id, bu=bu, product=product, diff --git a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py index 1ea1db6a..f1fcd704 100644 --- a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py @@ -27,7 +27,7 @@ def create_tickets_s3_unencrypted_buckets(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='s3Encrypt') - slack = SlackNotification(self.config, module='s3Encrypt') + slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py index 770675f2..ea57771f 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py @@ -34,7 +34,7 @@ def create_tickets_s3buckets(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='s3acl') - slack = SlackNotification(self.config, module='s3acl') + slack = SlackNotification(self.config) for account_id, account_name in self.config.s3acl.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py index ea502350..4adf7b75 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py @@ -30,7 +30,7 @@ def create_tickets_s3buckets(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='s3policy') - slack = SlackNotification(self.config, module='s3policy') + slack = SlackNotification(self.config) for account_id, account_name in self.config.s3policy.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 36278d38..c4382b25 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -276,7 +276,7 @@ def create_tickets_securitygroups(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='sg') - slack = SlackNotification(self.config, module='sg') + slack = SlackNotification(self.config) for account_id, account_name in self.config.sg.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py index ecb494aa..0d6f9095 100644 --- a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py @@ -30,7 +30,7 @@ def create_tickets_sqs_policy(self): main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config, module='sqspolicy') - slack = SlackNotification(self.config, module='sqspolicy') + slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") From d26ffeee672aa978170db93ef8ed91b7ef3a301f Mon Sep 17 00:00:00 2001 From: "yevheniia.pasiechna@dowjones.com" Date: Thu, 5 Sep 2019 15:20:01 +0300 Subject: [PATCH 173/193] Config name changes to label names --- hammer/library/jiraoperations.py | 13 +++++++- .../reporting/create_cloudtrail_tickets.py | 26 ++++++++-------- .../create_ebs_volume_issue_tickets.py | 30 +++++++++---------- 3 files changed, 40 insertions(+), 29 deletions(-) diff --git a/hammer/library/jiraoperations.py b/hammer/library/jiraoperations.py index b7e4a901..7be5803a 100755 --- a/hammer/library/jiraoperations.py +++ b/hammer/library/jiraoperations.py @@ -62,6 +62,13 @@ def __init__(self, config, module=''): self.jira_labels = JiraLabels(config, module) self.module_jira_labels = self.jira_labels.module_labels + def _jira_enabled(func): + def decorated(self, *args, **kwargs): + if self.config.jira.enabled: + return func(self, *args, **kwargs) + return decorated + + @_jira_enabled def add_issue(self, issue_summary, issue_description, priority, @@ -112,19 +119,23 @@ def add_issue(self, return NewIssue(ticket_id=ticket_id, ticket_assignee_id=ticket_assignee_id) + @_jira_enabled def close_issue(self, ticket_id, comment): self.jira.add_comment(ticket_id, comment) self.jira.close_issue(ticket_id) logging.debug(f"Closed issue ({self.jira.ticket_url(ticket_id)})") + @_jira_enabled def update_issue(self, ticket_id, comment): # TODO: reopen ticket if closed self.jira.add_comment(ticket_id, comment) logging.debug(f"Updated issue {self.jira.ticket_url(ticket_id)}") + @_jira_enabled def add_attachment(self, ticket_id, filename, text): return self.jira.add_attachment(ticket_id, filename, text) + @_jira_enabled def remediate_issue(self, ticket_id, comment, reassign): if reassign: self.jira.assign_user(ticket_id, self.jira.current_user) @@ -353,7 +364,7 @@ def add_comment(self, ticket_id, comment): def add_watcher(self, ticket_id, user): """ Adding jira ticket watcher. - + :param ticket_id: jira ticket id :param user: watcher user id :return: nothing diff --git a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py index 3c6e8a71..3c3c19dc 100755 --- a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py +++ b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py @@ -130,19 +130,19 @@ def create_tickets_cloud_trail_logging(self): issue_description += (f"For any other exceptions, please follow the [whitelisting procedure|{self.config.whitelisting_procedure_url}] " f"and provide a strong business reasoning. ") - try: - response = jira.add_issue( - issue_summary=issue_summary, issue_description=issue_description, - priority="Major", - account_id=account_id, - ) - except Exception: - logging.exception("Failed to create jira ticket") - continue - - if response is not None: - issue.jira_details.ticket = response.ticket_id - issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + # try: + # response = jira.add_issue( + # issue_summary=issue_summary, issue_description=issue_description, + # priority="Major", + # account_id=account_id, + # ) + # except Exception: + # logging.exception("Failed to create jira ticket") + # continue + # + # if response is not None: + # issue.jira_details.ticket = response.ticket_id + # issue.jira_details.ticket_assignee_id = response.ticket_assignee_id slack.report_issue( msg=f"Discovered {issue_summary}" diff --git a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py index 7a472782..f97ed8fc 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py @@ -180,21 +180,21 @@ def create_tickets_ebsvolumes(self): issue_summary = (f"EBS unencrypted volume '{volume_id}' " f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") - try: - response = jira.add_issue( - issue_summary=issue_summary, issue_description=issue_description, - priority="Major", - owner=owner, - account_id=account_id, - bu=bu, product=product, - ) - except Exception: - logging.exception("Failed to create jira ticket") - continue - - if response is not None: - issue.jira_details.ticket = response.ticket_id - issue.jira_details.ticket_assignee_id = response.ticket_assignee_id + # try: + # response = jira.add_issue( + # issue_summary=issue_summary, issue_description=issue_description, + # priority="Major", + # owner=owner, + # account_id=account_id, + # bu=bu, product=product, + # ) + # except Exception: + # logging.exception("Failed to create jira ticket") + # continue + # + # if response is not None: + # issue.jira_details.ticket = response.ticket_id + # issue.jira_details.ticket_assignee_id = response.ticket_assignee_id issue.jira_details.owner = owner issue.jira_details.business_unit = bu From 4035d2e2330e4495d9f5e5d07dc1023f4f7a2dd5 Mon Sep 17 00:00:00 2001 From: "yevheniia.pasiechna@dowjones.com" Date: Thu, 5 Sep 2019 15:27:26 +0300 Subject: [PATCH 174/193] Config name changes to label names --- deployment/configs/config.json | 58 +++++-------------- .../remediation/clean_ami_public_access.py | 2 +- .../remediation/clean_iam_key_rotation.py | 2 +- .../remediation/clean_iam_keys_inactive.py | 2 +- .../remediation/clean_public_ebs_snapshots.py | 2 +- .../remediation/clean_public_rds_snapshots.py | 2 +- .../clean_s3bucket_acl_permissions.py | 2 +- .../clean_s3bucket_policy_permissions.py | 2 +- .../remediation/clean_s3bucket_unencrypted.py | 2 +- .../remediation/clean_security_groups.py | 2 +- .../clean_sqs_policy_permissions.py | 2 +- 11 files changed, 26 insertions(+), 52 deletions(-) diff --git a/deployment/configs/config.json b/deployment/configs/config.json index cf72e281..2fcb37ae 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -54,9 +54,7 @@ "reporting": false, "remediation": false, "remediation_retention_period": 0, - "jira": true, - "labels": ["publics3"], - "slack": true + "labels": ["publics3"] }, "secgrp_unrestricted_access": { "enabled": true, @@ -79,9 +77,7 @@ "remediation": false, "remediation_accounts": ["210987654321", "654321210987"], "remediation_retention_period": 21, - "jira": true, - "labels": ["insecure-services"], - "slack": true + "labels": ["insecure-services"] }, "user_inactivekeys": { "enabled": true, @@ -92,9 +88,7 @@ "reporting": false, "remediation": false, "remediation_retention_period": 0, - "jira": true, - "labels": ["inactive-iam-keys"], - "slack": true + "labels": ["inactive-iam-keys"] }, "user_keysrotation": { "enabled": true, @@ -104,9 +98,7 @@ "reporting": false, "remediation": false, "remediation_retention_period": 0, - "jira": true, - "labels": ["iam-key-rotation"], - "slack": true + "labels": ["iam-key-rotation"] }, "s3_bucket_policy": { "enabled": true, @@ -115,18 +107,14 @@ "reporting": false, "remediation": false, "remediation_retention_period": 7, - "jira": true, - "labels": ["publics3"], - "slack": true + "labels": ["publics3"] }, "cloudtrails": { "enabled": true, "ddb.table_name": "hammer-cloudtrails", "topic_name": "hammer-describe-cloudtrails-lambda", "reporting": false, - "jira": false, - "labels": ["cloud-trail-disabled"], - "slack": false + "labels": ["cloud-trail-disabled"] }, "ebs_unencrypted_volume": { "enabled": true, @@ -134,9 +122,7 @@ "topic_name": "hammer-describe-ebs-unencrypted-volumes-lambda", "accounts": ["123456789012", "210987654321"], "reporting": false, - "jira": false, - "labels": ["unencrypted-ebs-volumes"], - "slack": false + "labels": ["unencrypted-ebs-volumes"] }, "ebs_public_snapshot": { "enabled": true, @@ -145,9 +131,7 @@ "reporting": false, "remediation": false, "remediation_retention_period": 0, - "jira": true, - "labels": ["public_snapshots"], - "slack": true + "labels": ["public_snapshots"] }, "rds_public_snapshot": { "enabled": true, @@ -156,9 +140,7 @@ "reporting": false, "remediation": false, "remediation_retention_period": 0, - "jira": true, - "labels": ["rds-public-snapshots"], - "slack": true + "labels": ["rds-public-snapshots"] }, "ec2_public_ami": { "enabled": true, @@ -167,9 +149,7 @@ "reporting": false, "remediation": false, "remediation_retention_period": 21, - "jira": true, - "labels": ["public-ami"], - "slack": true + "labels": ["public-ami"] }, "sqs_public_access": { "enabled": true, @@ -178,9 +158,7 @@ "reporting": true, "remediation": false, "remediation_retention_period": 0, - "jira": true, - "labels": ["publicsqs"], - "slack": true + "labels": ["publicsqs"] }, "s3_encryption": { "enabled": true, @@ -189,18 +167,14 @@ "reporting": true, "remediation": false, "remediation_retention_period": 0, - "jira": true, - "labels": ["s3-unencrypted"], - "slack": true + "labels": ["s3-unencrypted"] }, "rds_encryption": { "enabled": true, "ddb.table_name": "hammer-rds-unencrypted", "topic_name": "hammer-describe-rds-encryption-lambda", "reporting": true, - "jira": true, - "labels": ["rds-unencrypted-instances"], - "slack": true + "labels": ["rds-unencrypted-instances"] }, "redshift_logging": { "enabled": true, @@ -256,14 +230,14 @@ "reporting": true, "remediation": false, "remediation_retention_period": 21, - "labels": ["es-domain-logging"], + "labels": ["es-domain-logging"] }, "es_unencrypted_domain": { "enabled": true, "ddb.table_name": "hammer-es-unencrypted-domain", "topic_name": "hammer-describe-es-encryption-lambda", "reporting": true, - "labels": ["unencrypted-elasticsearch-domains"], + "labels": ["unencrypted-elasticsearch-domains"] }, "es_public_access_domain": { "enabled": true, @@ -272,6 +246,6 @@ "reporting": true, "remediation": false, "remediation_retention_period": 21, - "labels": ["public-elasticsearch-domains"], + "labels": ["public-elasticsearch-domains"] } } diff --git a/hammer/reporting-remediation/remediation/clean_ami_public_access.py b/hammer/reporting-remediation/remediation/clean_ami_public_access.py index 1418b8c3..eb5a808e 100644 --- a/hammer/reporting-remediation/remediation/clean_ami_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_ami_public_access.py @@ -29,7 +29,7 @@ def clean_ami_public_access(self): retention_period = self.config.publicAMIs.remediation_retention_period jira = JiraReporting(self.config, module='publicAMIs') - slack = SlackNotification(self.config, module='publicAMIs') + slack = SlackNotification(self.config) for account_id, account_name in self.config.publicAMIs.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py b/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py index ad73178a..0bd7059b 100755 --- a/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py +++ b/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py @@ -31,7 +31,7 @@ def clean_iam_access_keys(self, batch=False): retention_period = self.config.iamUserKeysRotation.remediation_retention_period jira = JiraReporting(self.config, module='iamUserKeysRotation') - slack = SlackNotification(self.config, module='iamUserKeysRotation') + slack = SlackNotification(self.config) for account_id, account_name in self.config.iamUserKeysRotation.remediation_accounts.items(): logging.debug("* Account Name:" + account_name + " :::Account ID:::" + account_id) diff --git a/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py b/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py index 0caa5101..4c21bb52 100755 --- a/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py +++ b/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py @@ -31,7 +31,7 @@ def clean_iam_access_keys(self, batch=False): retention_period = self.config.iamUserInactiveKeys.remediation_retention_period jira = JiraReporting(self.config, module='iamUserInactiveKeys') - slack = SlackNotification(self.config, module='iamUserInactiveKeys') + slack = SlackNotification(self.config) for account_id, account_name in self.config.iamUserInactiveKeys.remediation_accounts.items(): logging.debug("* Account Name:" + account_name + " :::Account ID:::" + account_id) diff --git a/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py b/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py index 43e63aef..4ad79a93 100755 --- a/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py +++ b/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py @@ -31,7 +31,7 @@ def clean_public_ebs_snapshots(self, batch=False): retention_period = self.config.ebsSnapshot.remediation_retention_period jira = JiraReporting(self.config, module='ebsSnapshot') - slack = SlackNotification(self.config, module='ebsSnapshot') + slack = SlackNotification(self.config) for account_id, account_name in self.config.ebsSnapshot.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py b/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py index 327e3c74..cfc3d9e5 100755 --- a/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py +++ b/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py @@ -32,7 +32,7 @@ def clean_public_rds_snapshots(self, batch=False): retention_period = self.config.rdsSnapshot.remediation_retention_period jira = JiraReporting(self.config, module='rdsSnapshot') - slack = SlackNotification(self.config, module='rdsSnapshot') + slack = SlackNotification(self.config) for account_id, account_name in self.config.rdsSnapshot.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py b/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py index 7015a5fc..8f0b4f06 100755 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py @@ -32,7 +32,7 @@ def cleans3bucketaclpermissions(self, batch=False): retention_period = self.config.s3acl.remediation_retention_period jira = JiraReporting(self.config, module='s3acl') - slack = SlackNotification(self.config, module='s3acl') + slack = SlackNotification(self.config) for account_id, account_name in self.config.s3acl.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py index 02abf5ed..c8cc6986 100755 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py @@ -32,7 +32,7 @@ def clean_s3bucket_policy_permissions(self, batch=False): retention_period = self.config.s3policy.remediation_retention_period jira = JiraReporting(self.config, module='s3policy') - slack = SlackNotification(self.config, module='s3policy') + slack = SlackNotification(self.config) for account_id, account_name in self.config.s3policy.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py index 49f0a731..f9757e6b 100644 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py @@ -31,7 +31,7 @@ def cleans3bucketunencrypted(self, batch=False): retention_period = self.config.s3Encrypt.remediation_retention_period jira = JiraReporting(self.config, module='s3Encrypt') - slack = SlackNotification(self.config, module='s3Encrypt') + slack = SlackNotification(self.config) for account_id, account_name in self.config.s3Encrypt.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/remediation/clean_security_groups.py b/hammer/reporting-remediation/remediation/clean_security_groups.py index 7d32078f..b0c7975f 100755 --- a/hammer/reporting-remediation/remediation/clean_security_groups.py +++ b/hammer/reporting-remediation/remediation/clean_security_groups.py @@ -32,7 +32,7 @@ def clean_security_groups(self, batch=False): retention_period = self.config.sg.remediation_retention_period jira = JiraReporting(self.config, module='sg') - slack = SlackNotification(self.config, module='sg') + slack = SlackNotification(self.config) for account_id, account_name in self.config.sg.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") diff --git a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py index 5dca0469..12d84953 100644 --- a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py @@ -30,7 +30,7 @@ def clean_sqs_policy_permissions(self): retention_period = self.config.sqspolicy.remediation_retention_period jira = JiraReporting(self.config, module='sqspolicy') - slack = SlackNotification(self.config, module='sqspolicy') + slack = SlackNotification(self.config) for account_id, account_name in self.config.sqspolicy.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") From f6cf3e4cf39d5e3ac3e552a45ba1c74d6cde027b Mon Sep 17 00:00:00 2001 From: "yevheniia.pasiechna@dowjones.com" Date: Thu, 5 Sep 2019 18:21:09 +0300 Subject: [PATCH 175/193] Calling out 0/0 vs specific non-DJ IPs --- hammer/library/aws/security_groups.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/library/aws/security_groups.py b/hammer/library/aws/security_groups.py index 6d59a9ec..f284b7b7 100755 --- a/hammer/library/aws/security_groups.py +++ b/hammer/library/aws/security_groups.py @@ -381,7 +381,7 @@ def validate_trusted_registrant(cidr): :param cidr: :return: """ - trusted_registrants = Config().config.sg.trusted_registrants + trusted_registrants = Config().sg.trusted_registrants if not trusted_registrants: return False From 16a9af4a7fa27d31dc8e6be6f4dbadfafb581612 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 17 Sep 2019 14:40:57 +0530 Subject: [PATCH 176/193] Update with Quarantine list feature changes Update with Quarantine list feature changes --- .../configs/quarantine_issues_list.json | 91 +++++++++++++++++++ .../describe_public_ami_issues.py | 4 +- hammer/library/config.py | 18 +++- hammer/library/ddb_issues.py | 2 + .../remediation/clean_ami_public_access.py | 7 ++ .../create_public_ami_issue_tickets.py | 21 ++++- 6 files changed, 140 insertions(+), 3 deletions(-) create mode 100644 deployment/configs/quarantine_issues_list.json diff --git a/deployment/configs/quarantine_issues_list.json b/deployment/configs/quarantine_issues_list.json new file mode 100644 index 00000000..f77433f4 --- /dev/null +++ b/deployment/configs/quarantine_issues_list.json @@ -0,0 +1,91 @@ +{ + "__comment__": "Contains dictionary with security issues to quarantine (list of issues skipped now and will remediate in future) grouped by issue type and accounts. Put your account id as a key for desired security issue type and put a list with issues to ignore as a value.", + "cloudtrails": { + "__comment__": "Detects issues with CloudTrail (logging disabled or has issues with permissions). Key - account id, values - AWS regions.", + "123456789012": ["eu-west-1", "us-east-2"] + }, + "user_inactivekeys": { + "__comment__": "Detects IAM users with inactive access keys (not used more that definite number of days). Key - account id, values - IAM user names or access key ids.", + "123456789012": ["user1", "user2", "AKIAI6UV5TCF3NA223T1", "AKIAIG7Y36NN5DWX4NO3"] + }, + "user_keysrotation": { + "__comment__": "Detects IAM users expired access keys (created earlier than definite number of days). Key - account id, values - IAM user names or access key ids.", + "123456789012": ["user1", "user2", "AKIAI6UV5TCF3NA223T1", "AKIAIG7Y36NN5DWX4NO3"] + }, + "s3_bucket_acl": { + "__comment__": "Detects S3 buckets with public ACL (with AllUsers/AuthenticatedUsers groups in Grantee). Key - account id, values - S3 bucket names.", + "123456789012": ["public-site-bucket", "public-bucket-available-via-cloudfront"] + }, + "s3_bucket_policy": { + "__comment__": "Detects S3 buckets with public policy ('Allow' statements with '*' in Principal and not restricted by IP). Key - account id, values - S3 bucket names.", + "123456789012": ["public-site-bucket", "public-bucket-available-via-cloudfront"] + }, + "secgrp_unrestricted_access": { + "__comment__": "Detects security groups with world-wide open ports from the list. Key - account id, values - 1) security group ID or 2) VPC ID + security group Name separated by colon.", + "123456789012": ["sg-7c124307", "sg-2132a25b", "vpc-a372f3ca:default"] + }, + "ebs_unencrypted_volume": { + "__comment__": "Detects unencrypted EBS volumes. Key - account id, values - volume ids.", + "123456789012": ["vol-04ddaf8f2aef1b1f4", "vol-004156f485f6d57c7"] + }, + "ebs_public_snapshot": { + "__comment__": "Detects public EBS snapshots (with group 'all' in 'CreateVolumePermissions'). Key - account id, values - snapshot ids.", + "123456789012": ["snap-027927dbf368b3746", "snap-087534caad1ef1d0a"] + }, + "rds_public_snapshot":{ + "__comment__": "Detects public RDS snapshots (with 'all' in 'restore' attribute). Key - account id, values - snapshot ARNs.", + "123456789012": ["arn:aws:rds:eu-central-1:123456789012:snapshot:public", "arn:aws:rds:eu-west-1:123456789012:snapshot:rds:snapshot1"] + }, + "public_ami_issues": { + "__comment__": "Detects public AMI issues (with 'all' in 'restore' attribute). Key - account id, values - AMI IDs.", + "123456789012": [""] + }, + "sqs_public_access":{ + "__comment__": "Detects public SQS polices (with 'all' in 'restore' attribute). Key - account id, values - SQS names.", + "123456789012": [""] + }, + "s3_encryption": { + "__comment__": "Detects Unencrypted s3 buckets (with 'all' in 'restore' attribute). Key - account id, values - S3 bucket names.", + "123456789012": [""] + }, + "rds_encryption": { + "__comment__": "Detects unencrypted RDS instances (with 'all' in 'restore' attribute). Key - account id, values - Instance ARNs.", + "123456789012": [""] + }, + "redshift_public_access":{ + "__comment__": "Detects publicly accessible Redshift Clusters.", + "123456789012": ["test-cluster"] + }, + "redshift_encryption":{ + "__comment__": "Detects unencrypted clusters.", + "123456789012": ["test-cluster"] + }, + "ecs_privileged_access":{ + "__comment__": "Detects ECS task definitions which are not enabled logging - task definitions ARNs.", + "1234567890123": ["arn:aws:ecs:us-east-1:1234567890123:task-definition/dev-admin:2993"] + }, + "ecs_logging":{ + "__comment__": "Detects ECS task definitions which are not enabled logging - task definitions ARNs.", + "1234567890123": ["arn:aws:ecs:us-east-1:1234567890123:task-definition/test-admin:2993"] + }, + "ecs_external_image_source":{ + "__comment__": "Detects ECS task definitions which are configured with external image source - task definitions ARNs.", + "1234567890123": ["arn:aws:ecs:us-east-1:1234567890123:task-definition/test-admin:2993"] + }, + "redshift_logging": { + "__comment__": "Detects Redshift clusters which are audit logging is not enabled.", + "123456789012": ["test-cluster"] + }, + "es_domain_logging": { + "__comment__": "Detects Elasticsearch domains which are not enabled logging - domain ARNs.", + "1234567890123": ["arn:aws:es:us-east-2:1234567890123:domain/new-domain"] + }, + "es_unencrypted_domain": { + "__comment__": "Detects Unencrypted Elasticsearch domains - domain ARNs.", + "1234567890123": ["arn:aws:es:us-east-2:1234567890123:domain/new-domain"] + }, + "es_public_access_domain": { + "__comment__": "Detects Unencrypted Elasticsearch publicly accessible domains - domain ARNs.", + "1234567890123": ["arn:aws:es:us-east-2:1234567890123:domain/new-domain"] + } +} diff --git a/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py b/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py index 6b957537..45870f07 100644 --- a/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py +++ b/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py @@ -58,7 +58,9 @@ def lambda_handler(event, context): issue.issue_details.tags = ami.tags issue.issue_details.name = ami.name issue.issue_details.region = region - if config.publicAMIs.in_whitelist(account_id, ami.id): + if config.publicAMIs.in_quarantine_list(account_id, ami.id): + issue.status = IssueStatus.Quarantine + elif config.publicAMIs.in_whitelist(account_id, ami.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/library/config.py b/hammer/library/config.py index 11ad9eee..40ff587d 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -21,19 +21,23 @@ def __init__(self, configIniFile="config.ini", whitelistFile="whitelist.json", fixnowFile="fixnow.json", - ticketOwnersFile="ticket_owners.json"): + ticketOwnersFile="ticket_owners.json", + quarantinelistFile="quarantine_issues_list.json"): """ :param configFile: local path to configuration file in json format :param configIniFile: local path to configuration file in ini format (is used in r&r EC2, build from EC2 userdata) :param whitelistFile: local path to whitelist file in json format :param fixnowFile: local path to fixnow file in json format :param ticketOwnersFile: local path to file with default ticket owners by bu/account in json format + :param quarantinelistFile: local path to list of quarantine issues file in json format """ self._config = self.json_load_from_file(configFile) self._config['whitelist'] = self.json_load_from_file(whitelistFile, default={}) self._config['fixnow'] = self.json_load_from_file(fixnowFile, default={}) + self._config['quarantine'] = self.json_load_from_file(quarantinelistFile, default={}) + self.local = LocalConfig(configIniFile) self.owners = OwnersConfig(self.json_load_from_file(ticketOwnersFile, default={})) self.cronjobs = self._config.get('cronjobs', {}) @@ -480,6 +484,7 @@ def __init__(self, config, section): super().__init__(config, section) self._whitelist = config["whitelist"].get(section, {}) self._fixnow = config["fixnow"].get(section, {}) + self._quarantine_list = config["quarantine"].get(section, {}) # main accounts dict self._accounts = config["aws"]["accounts"] self.name = section @@ -543,6 +548,17 @@ def in_whitelist(self, account_id, issue): """ return issue in self._whitelist.get(account_id, []) + def in_quarantine_list(self, account_id, issue): + """ + :param account_id: AWS account Id + :param issue: Issue id + + :return: boolean, if issue Id in quarantine + """ + return issue in self._quarantine_list.get(account_id, []) + + + @property def ddb_table_name(self): """ :return: DDB table name to use for storing issue details """ diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index 06cf1c4b..9488e3a4 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -20,6 +20,8 @@ class IssueStatus(Enum): Resolved = "resolved" # set by reporting after closing ticket Closed = "closed" + # set by identification - issue still exists but was added to quarantine_list for future remediation + Quarantine = "quarantine" class Details(object): diff --git a/hammer/reporting-remediation/remediation/clean_ami_public_access.py b/hammer/reporting-remediation/remediation/clean_ami_public_access.py index e6f327bc..77565412 100644 --- a/hammer/reporting-remediation/remediation/clean_ami_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_ami_public_access.py @@ -39,6 +39,13 @@ def clean_ami_public_access(self): in_whitelist = self.config.publicAMIs.in_whitelist(account_id, ami_id) + in_quarantine = self.config.publicAMIs.in_quarantine_list(account_id, ami_id) + + if in_quarantine: + logging.debug(f"Skipping {ami_id} (in quarantine list. Will remediate this issue in future)") + + continue + if in_whitelist: logging.debug(f"Skipping {ami_id} (in whitelist)") diff --git a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py index 3cf6b7f5..b0661cfa 100644 --- a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py @@ -42,7 +42,26 @@ def create_tickets_public_ami(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"AMI '{ami_id}' is added to quarantine list. ") + + comment = (f"AMI '{ami_id}' public access issue " + f"in '{account_name} / {account_id}' account, {ami_region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} AMI '{ami_id}' public access issue") comment = (f"Closing {issue.status.value} AMI '{ami_id}' public access issue " From 7d7b5cb3117d76fa5497ab42f6ea9233de5474f6 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 18 Sep 2019 10:44:25 +0530 Subject: [PATCH 177/193] Updated quarantine issue changes. Updated quarantine issue changes. --- deployment/configs/quarantine_issues_list.json | 2 +- deployment/configs/whitelist.json | 2 +- hammer/identification/lambdas/requirements.txt | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/deployment/configs/quarantine_issues_list.json b/deployment/configs/quarantine_issues_list.json index f77433f4..678e80d2 100644 --- a/deployment/configs/quarantine_issues_list.json +++ b/deployment/configs/quarantine_issues_list.json @@ -36,7 +36,7 @@ "__comment__": "Detects public RDS snapshots (with 'all' in 'restore' attribute). Key - account id, values - snapshot ARNs.", "123456789012": ["arn:aws:rds:eu-central-1:123456789012:snapshot:public", "arn:aws:rds:eu-west-1:123456789012:snapshot:rds:snapshot1"] }, - "public_ami_issues": { + "ec2_public_ami": { "__comment__": "Detects public AMI issues (with 'all' in 'restore' attribute). Key - account id, values - AMI IDs.", "123456789012": [""] }, diff --git a/deployment/configs/whitelist.json b/deployment/configs/whitelist.json index 6f648de0..02c4c7d4 100755 --- a/deployment/configs/whitelist.json +++ b/deployment/configs/whitelist.json @@ -36,7 +36,7 @@ "__comment__": "Detects public RDS snapshots (with 'all' in 'restore' attribute). Key - account id, values - snapshot ARNs.", "123456789012": ["arn:aws:rds:eu-central-1:123456789012:snapshot:public", "arn:aws:rds:eu-west-1:123456789012:snapshot:rds:snapshot1"] }, - "public_ami_issues": { + "ec2_public_ami": { }, "sqs_public_access":{ "__comment__": "Detects public SQS polices (with 'all' in 'restore' attribute). Key - account id, values - SQS ARNs.", diff --git a/hammer/identification/lambdas/requirements.txt b/hammer/identification/lambdas/requirements.txt index 7001ed12..663bd1f6 100755 --- a/hammer/identification/lambdas/requirements.txt +++ b/hammer/identification/lambdas/requirements.txt @@ -1,2 +1 @@ -boto3==1.9.42 requests \ No newline at end of file From 328bd31a636809089363b98a46f1e4d12d1ca0b4 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 18 Sep 2019 20:00:56 +0530 Subject: [PATCH 178/193] Updated with quarantine list changes. Updated with quarantine list changes. --- .../describe_public_ami_issues.py | 1 + .../describe_cloudtrails.py | 5 ++- .../describe_ebs_public_snapshots.py | 5 ++- .../describe_ebs_unencrypted_volumes.py | 5 ++- ...scribe_ecs_external_image_source_issues.py | 5 ++- .../describe_ecs_logging_issues.py | 4 ++- .../describe_ecs_privileged_access_issues.py | 4 ++- ...be_elasticsearch_domains_logging_issues.py | 4 ++- ...ibe_elasticsearch_public_access_domains.py | 5 ++- ...cribe_elasticsearch_unencrypted_domains.py | 4 ++- .../describe_iam_key_rotation.py | 7 +++- .../describe_iam_accesskey_details.py | 7 +++- .../describe_rds_public_snapshots.py | 5 ++- .../describe_rds_instance_encryption.py | 5 ++- .../describe_redshift_logging_issues.py | 5 ++- ...describe_redshift_cluster_public_access.py | 5 ++- .../describe_redshift_encryption.py | 5 ++- .../describe_s3_bucket_acl.py | 5 ++- .../describe_s3_bucket_policy.py | 5 ++- .../describe_s3_encryption.py | 5 ++- .../describe_sec_grps_unrestricted_access.py | 6 +++- .../describe_sqs_public_policy.py | 5 ++- .../remediation/clean_ami_public_access.py | 2 -- .../clean_elasticsearch_domain_logging.py | 4 +++ .../clean_elasticsearch_policy_permissions.py | 6 +++- .../remediation/clean_iam_key_rotation.py | 5 +++ .../remediation/clean_iam_keys_inactive.py | 6 ++++ .../remediation/clean_public_ebs_snapshots.py | 5 +++ .../remediation/clean_public_rds_snapshots.py | 6 ++++ .../clean_redshift_cluster_unencrypted.py | 5 +++ .../clean_redshift_public_access.py | 5 +++ .../clean_s3bucket_acl_permissions.py | 5 +++ .../clean_s3bucket_policy_permissions.py | 5 +++ .../remediation/clean_s3bucket_unencrypted.py | 5 +++ .../remediation/clean_security_groups.py | 7 ++++ .../clean_sqs_policy_permissions.py | 5 +++ .../reporting/create_cloudtrail_tickets.py | 18 ++++++++++- ...reate_ebs_public_snapshot_issue_tickets.py | 20 +++++++++++- .../create_ebs_volume_issue_tickets.py | 20 +++++++++++- ...ecs_external_image_source_issue_tickets.py | 20 +++++++++++- .../create_ecs_logging_issue_tickets.py | 21 +++++++++++- ...ate_ecs_privileged_access_issue_tickets.py | 21 +++++++++++- ...sticsearch_domain_logging_issue_tickets.py | 21 +++++++++++- ...asticsearch_public_access_issue_tickets.py | 32 ++++++++++++++++--- ...elasticsearch_unencrypted_issue_tickets.py | 30 ++++++++++++++--- .../create_iam_key_inactive_tickets.py | 18 ++++++++++- .../create_iam_key_rotation_tickets.py | 19 ++++++++++- .../create_public_ami_issue_tickets.py | 1 - ...reate_rds_public_snapshot_issue_tickets.py | 20 +++++++++++- ..._rds_unencrypted_instance_issue_tickets.py | 20 +++++++++++- .../create_redshift_logging_issue_tickets.py | 20 +++++++++++- ...te_redshift_public_access_issue_tickets.py | 27 ++++++++++++++-- ...shift_unencrypted_cluster_issue_tickets.py | 21 +++++++++++- ...ate_s3_unencrypted_bucket_issue_tickets.py | 20 +++++++++++- .../create_s3bucket_acl_issue_tickets.py | 20 +++++++++++- .../create_s3bucket_policy_issue_tickets.py | 20 +++++++++++- .../create_security_groups_tickets.py | 21 +++++++++++- .../create_sqs_policy_issue_tickets.py | 21 +++++++++++- 58 files changed, 574 insertions(+), 55 deletions(-) diff --git a/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py b/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py index 45870f07..31d86d1f 100644 --- a/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py +++ b/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py @@ -58,6 +58,7 @@ def lambda_handler(event, context): issue.issue_details.tags = ami.tags issue.issue_details.name = ami.name issue.issue_details.region = region + if config.publicAMIs.in_quarantine_list(account_id, ami.id): issue.status = IssueStatus.Quarantine elif config.publicAMIs.in_whitelist(account_id, ami.id): diff --git a/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py b/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py index b02ea0ec..e7cdce8f 100755 --- a/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py +++ b/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py @@ -56,7 +56,10 @@ def lambda_handler(event, context): issue.issue_details.disabled = checker.disabled issue.issue_details.delivery_errors = checker.delivery_errors issue.add_trails(checker.trails) - if config.cloudtrails.in_whitelist(account_id, region): + + if config.cloudtrails.in_quarantine_list(account_id, region): + issue.status = IssueStatus.Quarantine + elif config.cloudtrails.in_whitelist(account_id, region): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py b/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py index dee609e9..a6bc8c4f 100755 --- a/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py +++ b/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py @@ -57,7 +57,10 @@ def lambda_handler(event, context): issue.issue_details.region = snapshot.account.region issue.issue_details.volume_id = snapshot.volume_id issue.issue_details.tags = snapshot.tags - if config.ebsSnapshot.in_whitelist(account_id, snapshot.id): + + if config.ebsSnapshot.in_quarantine_list(account_id, snapshot.id): + issue.status = IssueStatus.Quarantine + elif config.ebsSnapshot.in_whitelist(account_id, snapshot.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py b/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py index 6c295aff..01172aa1 100755 --- a/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py +++ b/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py @@ -59,7 +59,10 @@ def lambda_handler(event, context): issue.issue_details.state = volume.state issue.issue_details.attachments = volume.attachments issue.issue_details.tags = volume.tags - if config.ebsVolume.in_whitelist(account_id, volume.id): + + if config.ebsVolume.in_quarantine_list(account_id, volume.id): + issue.status = IssueStatus.Quarantine + elif config.ebsVolume.in_whitelist(account_id, volume.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py index 2a93a799..0ae826a6 100644 --- a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py +++ b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py @@ -58,7 +58,10 @@ def lambda_handler(event, context): issue.issue_details.tags = task_definition.tags issue.issue_details.container_image_details = task_definition.container_image_details issue.issue_details.region = task_definition.account.region - if config.ecs_external_image_source.in_whitelist(account_id, task_definition.name): + + if config.ecs_external_image_source.in_quarantine_list(account_id, task_definition.name): + issue.status = IssueStatus.Quarantine + elif config.ecs_external_image_source.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py index 04fa3281..8cc0fa42 100644 --- a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py +++ b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py @@ -59,7 +59,9 @@ def lambda_handler(event, context): issue.issue_details.disabled_logging_container_names = task_definition.disabled_logging_container_names issue.issue_details.tags = task_definition.tags - if config.ecs_logging.in_whitelist(account_id, task_definition.name): + if config.ecs_logging.in_quarantine_list(account_id, task_definition.name): + issue.status = IssueStatus.Quarantine + elif config.ecs_logging.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py index edaf5e1a..50bd32c1 100644 --- a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py +++ b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py @@ -58,7 +58,9 @@ def lambda_handler(event, context): issue.issue_details.tags = task_definition.tags issue.issue_details.privileged_container_names = task_definition.privileged_container_names issue.issue_details.region = task_definition.account.region - if config.ecs_privileged_access.in_whitelist(account_id, task_definition.name): + if config.ecs_privileged_access.in_quarantine_list(account_id, task_definition.name): + issue.status = IssueStatus.Quarantine + elif config.ecs_privileged_access.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py index 0ba5f163..2734a282 100644 --- a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py +++ b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py @@ -59,7 +59,9 @@ def lambda_handler(event, context): issue.issue_details.arn = domain.arn issue.issue_details.tags = domain.tags - if config.esLogging.in_whitelist(account_id, domain.name): + if config.esLogging.in_quarantine_list(account_id, domain.name): + issue.status = IssueStatus.Quarantine + elif config.esLogging.in_whitelist(account_id, domain.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py b/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py index eccbd677..ce45f83a 100644 --- a/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py +++ b/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py @@ -59,7 +59,10 @@ def lambda_handler(event, context): issue.issue_details.arn = domain.arn issue.issue_details.tags = domain.tags issue.issue_details.policy = domain.policy - if config.esPublicAccess.in_whitelist(account_id, domain.name): + + if config.esPublicAccess.in_quarantine_list(account_id, domain.name): + issue.status = IssueStatus.Quarantine + elif config.esPublicAccess.in_whitelist(account_id, domain.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py index 8c5f1c56..c4830513 100644 --- a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py +++ b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py @@ -61,7 +61,9 @@ def lambda_handler(event, context): issue.issue_details.encrypted_at_rest = domain.encrypted_at_rest issue.issue_details.encrypted_at_transit = domain.encrypted_at_transit - if config.esEncrypt.in_whitelist(account_id, domain.name): + if config.esEncrypt.in_quarantine_list(account_id, domain.name): + issue.status = IssueStatus.Quarantine + elif config.esEncrypt.in_whitelist(account_id, domain.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py b/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py index b85e0bc2..29f9f6ba 100755 --- a/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py +++ b/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py @@ -56,7 +56,12 @@ def lambda_handler(event, context): issue = IAMKeyRotationIssue(account_id, key.id) issue.issue_details.username = user.id issue.issue_details.create_date = key.create_date.isoformat() - if config.iamUserKeysRotation.in_whitelist(account_id, key.id) or config.iamUserKeysRotation.in_whitelist(account_id, user.id): + + if config.iamUserKeysRotation.in_quarantine_list(account_id, key.id) \ + or config.iamUserKeysRotation.in_quarantine_list(account_id, user.id): + issue.status = IssueStatus.Quarantine + elif config.iamUserKeysRotation.in_whitelist(account_id, key.id) \ + or config.iamUserKeysRotation.in_whitelist(account_id, user.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py b/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py index c1db9fac..32931df2 100755 --- a/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py +++ b/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py @@ -57,7 +57,12 @@ def lambda_handler(event, context): issue.issue_details.username = user.id issue.issue_details.last_used = key.last_used.isoformat() issue.issue_details.create_date = key.create_date.isoformat() - if config.iamUserInactiveKeys.in_whitelist(account_id, key.id) or config.iamUserInactiveKeys.in_whitelist(account_id, user.id): + + if config.iamUserInactiveKeys.in_quarantine_list(account_id, key.id) \ + or config.iamUserInactiveKeys.in_quarantine_list(account_id, user.id): + issue.status = IssueStatus.Quarantine + elif config.iamUserInactiveKeys.in_whitelist(account_id, key.id) \ + or config.iamUserInactiveKeys.in_whitelist(account_id, user.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py b/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py index 6d155389..20ba2b4f 100755 --- a/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py +++ b/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py @@ -59,7 +59,10 @@ def lambda_handler(event, context): issue.issue_details.region = snapshot.account.region issue.issue_details.engine = snapshot.engine issue.issue_details.tags = snapshot.tags - if config.rdsSnapshot.in_whitelist(account_id, snapshot.id): + + if config.rdsSnapshot.in_quarantine_list(account_id, snapshot.id): + issue.status = IssueStatus.Quarantine + elif config.rdsSnapshot.in_whitelist(account_id, snapshot.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py b/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py index bc84e972..df034fce 100644 --- a/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py +++ b/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py @@ -59,7 +59,10 @@ def lambda_handler(event, context): issue.issue_details.region = instance.account.region issue.issue_details.engine = instance.engine issue.issue_details.tags = instance.tags - if config.rdsEncrypt.in_whitelist(account_id, instance.id): + + if config.rdsEncrypt.in_quarantine_list(account_id, instance.id): + issue.status = IssueStatus.Quarantine + elif config.rdsEncrypt.in_whitelist(account_id, instance.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py index 265591c3..818c4bf3 100644 --- a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py +++ b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py @@ -56,7 +56,10 @@ def lambda_handler(event, context): issue = RedshiftLoggingIssue(account_id, cluster.name) issue.issue_details.tags = cluster.tags issue.issue_details.region = cluster.account.region - if config.redshift_logging.in_whitelist(account_id, cluster.name): + + if config.redshift_logging.in_quarantine_list(account_id, cluster.name): + issue.status = IssueStatus.Quarantine + elif config.redshift_logging.in_whitelist(account_id, cluster.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py index 7410515c..e2ec998b 100644 --- a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py +++ b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py @@ -56,7 +56,10 @@ def lambda_handler(event, context): issue = RedshiftPublicAccessIssue(account_id, cluster.name) issue.issue_details.tags = cluster.tags issue.issue_details.region = cluster.account.region - if config.redshift_public_access.in_whitelist(account_id, cluster.name): + + if config.redshift_public_access.in_quarantine_list(account_id, cluster.name): + issue.status = IssueStatus.Quarantine + elif config.redshift_public_access.in_whitelist(account_id, cluster.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py index 71674c5b..7c9e0ce8 100644 --- a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py +++ b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py @@ -56,7 +56,10 @@ def lambda_handler(event, context): issue = RedshiftEncryptionIssue(account_id, cluster.name) issue.issue_details.tags = cluster.tags issue.issue_details.region = cluster.account.region - if config.redshiftEncrypt.in_whitelist(account_id, cluster.name): + + if config.redshiftEncrypt.in_quarantine_list(account_id, cluster.name): + issue.status = IssueStatus.Quarantine + elif config.redshiftEncrypt.in_whitelist(account_id, cluster.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py b/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py index 6f8f20fa..a6aec4f7 100755 --- a/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py +++ b/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py @@ -55,7 +55,10 @@ def lambda_handler(event, context): issue.issue_details.owner = bucket.owner issue.issue_details.public_acls = bucket.get_public_acls() issue.issue_details.tags = bucket.tags - if config.s3acl.in_whitelist(account_id, bucket.name): + + if config.s3acl.in_quarantine_list(account_id, bucket.name): + issue.status = IssueStatus.Quarantine + elif config.s3acl.in_whitelist(account_id, bucket.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py b/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py index 2ac13ae0..745c9f18 100755 --- a/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py +++ b/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py @@ -55,7 +55,10 @@ def lambda_handler(event, context): issue.issue_details.owner = bucket.owner issue.issue_details.tags = bucket.tags issue.issue_details.policy = bucket.policy - if config.s3policy.in_whitelist(account_id, bucket.name): + + if config.s3policy.in_quarantine_list(account_id, bucket.name): + issue.status = IssueStatus.Quarantine + elif config.s3policy.in_whitelist(account_id, bucket.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py b/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py index ecf8e766..87f5b35b 100644 --- a/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py +++ b/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py @@ -54,7 +54,10 @@ def lambda_handler(event, context): issue = S3EncryptionIssue(account_id, bucket.name) issue.issue_details.owner = bucket.owner issue.issue_details.tags = bucket.tags - if config.s3Encrypt.in_whitelist(account_id, bucket.name): + + if config.s3Encrypt.in_quarantine_list(account_id, bucket.name): + issue.status = IssueStatus.Quarantine + elif config.s3Encrypt.in_whitelist(account_id, bucket.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py b/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py index 5228a266..b3123df6 100755 --- a/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py +++ b/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py @@ -68,7 +68,11 @@ def lambda_handler(event, context): for ip_range in perm.ip_ranges: if not ip_range.restricted: issue.add_perm(perm.protocol, perm.from_port, perm.to_port, ip_range.cidr, ip_range.status) - if config.sg.in_whitelist(account_id, f"{sg.vpc_id}:{sg.name}") or \ + + if config.sg.in_quarantine_list(account_id, f"{sg.vpc_id}:{sg.name}")or \ + config.sg.in_quarantine_list(account_id, sg.id): + issue.status = IssueStatus.Quarantine + elif config.sg.in_whitelist(account_id, f"{sg.vpc_id}:{sg.name}") or \ config.sg.in_whitelist(account_id, sg.id): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py b/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py index 63a02b12..85fcb20c 100644 --- a/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py +++ b/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py @@ -59,7 +59,10 @@ def lambda_handler(event, context): issue.issue_details.name = queue.name issue.issue_details.region = queue.account.region issue.issue_details.policy = queue.policy - if config.sqspolicy.in_whitelist(account_id, queue.url): + + if config.sqspolicy.in_quarantine_list(account_id, queue.url): + issue.status = IssueStatus.Quarantine + elif config.sqspolicy.in_whitelist(account_id, queue.url): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open diff --git a/hammer/reporting-remediation/remediation/clean_ami_public_access.py b/hammer/reporting-remediation/remediation/clean_ami_public_access.py index 77565412..ae1c6b92 100644 --- a/hammer/reporting-remediation/remediation/clean_ami_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_ami_public_access.py @@ -40,10 +40,8 @@ def clean_ami_public_access(self): in_whitelist = self.config.publicAMIs.in_whitelist(account_id, ami_id) in_quarantine = self.config.publicAMIs.in_quarantine_list(account_id, ami_id) - if in_quarantine: logging.debug(f"Skipping {ami_id} (in quarantine list. Will remediate this issue in future)") - continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py index f1facfc8..7513f315 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py @@ -41,6 +41,10 @@ def clean_elasticsearch_domain_domain_logging_issues(self, batch=False): domain_name = issue.issue_id in_whitelist = self.config.esLogging.in_whitelist(account_id, domain_name) + in_quarantine = self.config.esLogging.in_quarantine_list(account_id, domain_name) + if in_quarantine: + logging.debug(f"Skipping {domain_name} (in quarantine list. Will remediate this issue in future)") + continue if in_whitelist: logging.debug(f"Skipping {domain_name} (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py index d92f4365..ed7e28e9 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py @@ -41,7 +41,11 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): domain_name = issue.issue_id in_whitelist = self.config.esPublicAccess.in_whitelist(account_id, domain_name) - #in_fixlist = self.config.esPublicAccess.in_fixnow(account_id, domain_name) + # in_fixlist = self.config.esPublicAccess.in_fixnow(account_id, domain_name) + in_quarantine = self.config.esPublicAccess.in_quarantine_list(account_id, domain_name) + if in_quarantine: + logging.debug(f"Skipping {domain_name} (in quarantine list. Will remediate this issue in future)") + continue if in_whitelist: logging.debug(f"Skipping {domain_name} (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py b/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py index 42598920..96c7f1ba 100755 --- a/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py +++ b/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py @@ -42,6 +42,11 @@ def clean_iam_access_keys(self, batch=False): user_in_whitelist = self.config.iamUserKeysRotation.in_whitelist(account_id, username) key_in_whitelist = self.config.iamUserKeysRotation.in_whitelist(account_id, key_id) + user_in_quarantine = self.config.iamUserKeysRotation.in_quarantine_list(account_id, username) + key_in_quarantine = self.config.iamUserKeysRotation.in_quarantine_list(account_id, key_id) + if user_in_quarantine or key_in_quarantine: + logging.debug(f"Skipping {key_id} / {username} (in quarantine list. Will remediate this issue in future)") + continue if user_in_whitelist or key_in_whitelist: logging.debug(f"Skipping '{key_id} / {username}' (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py b/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py index 6969f360..1eb0b5a4 100755 --- a/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py +++ b/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py @@ -42,6 +42,12 @@ def clean_iam_access_keys(self, batch=False): user_in_whitelist = self.config.iamUserInactiveKeys.in_whitelist(account_id, username) key_in_whitelist = self.config.iamUserInactiveKeys.in_whitelist(account_id, key_id) + user_in_quarantine = self.config.iamUserInactiveKeys.in_quarantine_list(account_id, username) + key_in_quarantine = self.config.iamUserInactiveKeys.in_quarantine_list(account_id, key_id) + if user_in_quarantine or key_in_quarantine: + logging.debug( + f"Skipping {key_id} / {username} (in quarantine list. Will remediate this issue in future)") + continue if user_in_whitelist or key_in_whitelist: logging.debug(f"Skipping '{key_id} / {username}' (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py b/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py index 99df6d66..f4a6c68e 100755 --- a/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py +++ b/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py @@ -42,6 +42,11 @@ def clean_public_ebs_snapshots(self, batch=False): continue in_whitelist = self.config.ebsSnapshot.in_whitelist(account_id, issue.issue_id) + in_quarantine = self.config.ebsSnapshot.in_quarantine_list(account_id, issue.issue_id) + if in_quarantine: + logging.debug(f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + continue + if in_whitelist: logging.debug(f"Skipping '{issue.issue_id}' (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py b/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py index 60511540..81439beb 100755 --- a/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py +++ b/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py @@ -43,6 +43,12 @@ def clean_public_rds_snapshots(self, batch=False): continue in_whitelist = self.config.rdsSnapshot.in_whitelist(account_id, issue.issue_id) + in_quarantine = self.config.rdsSnapshot.in_quarantine_list(account_id, issue.issue_id) + if in_quarantine: + logging.debug( + f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + continue + if in_whitelist: logging.debug(f"Skipping '{issue.issue_id}' (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py index 99df8cf6..42af5f7d 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py @@ -40,6 +40,11 @@ def cleanredshiftclusterunencryption(self, batch=False): cluster_id = issue.issue_id in_whitelist = self.config.redshiftEncrypt.in_whitelist(account_id, cluster_id) + in_quarantine = self.config.redshiftEncrypt.in_quarantine_list(account_id, issue.issue_id) + if in_quarantine: + logging.debug( + f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + continue if in_whitelist: logging.debug(f"Skipping {cluster_id} (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py index a67f29be..289f3d54 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py @@ -40,6 +40,11 @@ def clean_redshift_public_access(self, batch=False): cluster_id = issue.issue_id in_whitelist = self.config.redshift_public_access.in_whitelist(account_id, cluster_id) + in_quarantine = self.config.redshift_public_access.in_quarantine_list(account_id, issue.issue_id) + if in_quarantine: + logging.debug( + f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + continue if in_whitelist: logging.debug(f"Skipping {cluster_id} (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py b/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py index f98773e9..a1d894ca 100755 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py @@ -42,6 +42,11 @@ def cleans3bucketaclpermissions(self, batch=False): in_whitelist = self.config.s3acl.in_whitelist(account_id, bucket_name) in_fixlist = True #self.config.s3acl.in_fixnow(account_id, bucket_name) + in_quarantine = self.config.s3acl.in_quarantine_list(account_id, issue.issue_id) + if in_quarantine: + logging.debug( + f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + continue if in_whitelist: logging.debug(f"Skipping {bucket_name} (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py index 37dca5ba..c9ca5ec4 100755 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py @@ -42,6 +42,11 @@ def clean_s3bucket_policy_permissions(self, batch=False): in_whitelist = self.config.s3policy.in_whitelist(account_id, bucket_name) #in_fixlist = self.config.s3policy.in_fixnow(account_id, bucket_name) + in_quarantine = self.config.s3policy.in_quarantine_list(account_id, issue.issue_id) + if in_quarantine: + logging.debug( + f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + continue if in_whitelist: logging.debug(f"Skipping {bucket_name} (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py index ca6de0f4..c64237aa 100644 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py @@ -41,6 +41,11 @@ def cleans3bucketunencrypted(self, batch=False): in_whitelist = self.config.s3Encrypt.in_whitelist(account_id, bucket_name) in_fixlist = True + in_quarantine = self.config.s3Encrypt.in_quarantine_list(account_id, issue.issue_id) + if in_quarantine: + logging.debug( + f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + continue if in_whitelist: logging.debug(f"Skipping {bucket_name} (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_security_groups.py b/hammer/reporting-remediation/remediation/clean_security_groups.py index 91d0c7ad..7a2d8f3e 100755 --- a/hammer/reporting-remediation/remediation/clean_security_groups.py +++ b/hammer/reporting-remediation/remediation/clean_security_groups.py @@ -47,6 +47,13 @@ def clean_security_groups(self, batch=False): name_in_whitelist = self.config.sg.in_whitelist(account_id, f"{group_vpc_id}:{group_name}") id_in_whitelist = self.config.sg.in_whitelist(account_id, group_id) + name_in_quarantine = self.config.sg.in_quarantine_list(account_id, f"{group_vpc_id}:{group_name}") + id_in_quarantine = self.config.sg.in_quarantine_list(account_id, group_id) + if name_in_quarantine or id_in_quarantine: + logging.debug( + f"Skipping {group_name} / {group_id} (in quarantine list. Will remediate this issue in future)") + continue + if name_in_whitelist or id_in_whitelist: logging.debug(f"Skipping '{group_name} / {group_id}' (in whitelist)") diff --git a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py index a62d7bdb..3dcb101f 100644 --- a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py @@ -41,6 +41,11 @@ def clean_sqs_policy_permissions(self): queue_region = issue.issue_details.region in_whitelist = self.config.sqspolicy.in_whitelist(account_id, queue_url) + in_quarantine = self.config.sqspolicy.in_quarantine_list(account_id, issue.issue_id) + if in_quarantine: + logging.debug( + f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + continue if in_whitelist: logging.debug(f"Skipping {queue_name} (in whitelist)") diff --git a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py index 9a9768d0..1ff6eee7 100755 --- a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py +++ b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py @@ -56,7 +56,23 @@ def create_tickets_cloud_trail_logging(self): region = issue.issue_id # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"CloudTrail logging issue with '{region}' " + f"is added to quarantine list. ") + + comment = (f"CloudTrail logging issue with '{region}' " + f"in '{account_name} / {account_id}' account is added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + account_id=account_id + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} '{region}' CloudTrail logging issue") comment = (f"Closing {issue.status.value} issue with '{region}' CloudTrail logging in " diff --git a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py index 204fc4f5..9709f41d 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py @@ -43,7 +43,25 @@ def create_tickets_ebs_public_snapshots(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"EBS public snapshot '{snapshot_id}' is added to quarantine list. ") + + comment = (f"EBS public snapshot '{snapshot_id}' " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} EBS public snapshot '{snapshot_id}' issue") comment = (f"Closing {issue.status.value} EBS public snapshot '{snapshot_id}' issue " diff --git a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py index b8dc8db7..55bea9c0 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py @@ -90,7 +90,25 @@ def create_tickets_ebsvolumes(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"EBS unencrypted volume '{volume_id}' is added to quarantine list. ") + + comment = (f"EBS unencrypted volume '{volume_id}' " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} EBS unencrypted volume '{volume_id}' issue") comment = (f"Closing {issue.status.value} EBS unencrypted volume '{volume_id}' issue " diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py index 63b56de0..4f284995 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -43,7 +43,25 @@ def create_tickets_ecs_external_images(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"ECS external image source '{task_definition_name}' is added to quarantine list. ") + + comment = (f"ECS external image source '{task_definition_name}' " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} ECS external image source '{task_definition_name}' issue") comment = (f"Closing {issue.status.value} ECS external image source '{task_definition_name}' issue " diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index afbaadb3..c9fd1195 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -43,7 +43,26 @@ def create_tickets_ecs_logging(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug( + f"ECS logging issue '{task_definition_name}' is added to quarantine list. ") + + comment = (f"ECS logging issue '{task_definition_name}' " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} ECS logging enabled '{task_definition_name}' issue") comment = (f"Closing {issue.status.value} ECS logging enabled '{task_definition_name}' issue " diff --git a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py index 533ef7df..e49350c7 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py @@ -43,7 +43,26 @@ def create_tickets_ecs_privileged(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug( + f"ECS privileged access issue '{task_definition_name}' is added to quarantine list. ") + + comment = (f"ECS privileged access issue '{task_definition_name}' " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} ECS privileged access disabled " f"'{task_definition_name}' issue") diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py index 8649ca6b..081cd9c5 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py @@ -43,7 +43,26 @@ def create_tickets_elasticsearch_domain_logging(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug( + f"Elasticsearch domain logging issue '{domain_name}' is added to quarantine list. ") + + comment = (f"Elasticsearch domain logging issue '{domain_name}' " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} Elasticsearch domain logging " f"'{domain_name}' issue") diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py index 72c1cfc3..ff94b241 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -46,11 +46,32 @@ def create_tickets_elasticsearch_public_access(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: - logging.debug(f"Closing {issue.status.value} Elasticsearch publicly accessible domain '{domain_name}' issue") + if issue.status in [IssueStatus.Quarantine]: + logging.debug( + f"Elasticsearch publicly accessible domain issue '{domain_name}' " + f"is added to quarantine list. ") + + comment = (f"Elasticsearch publicly accessible domain issue '{domain_name}' " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} Elasticsearch publicly accessible domain '" + f"{domain_name}' issue") - comment = (f"Closing {issue.status.value} Elasticsearch publicly accessible domain '{domain_name}' issue " - f"in '{account_name} / {account_id}' account, '{region}' region") + comment = (f"Closing {issue.status.value} Elasticsearch publicly accessible domain '" + f"{domain_name}' issue in '{account_name} / {account_id}' account,'{region}' region") if issue.status == IssueStatus.Whitelisted: # Adding label with "whitelisted" to jira ticket. jira.add_label( @@ -92,7 +113,8 @@ def create_tickets_elasticsearch_public_access(self): if self.config.esPublicAccess.remediation: auto_remediation_date = (self.config.now + self.config.esPublicAccess.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}" \ + f"{{color}}\n\n" issue_description += ( f"*Recommendation*: " diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index 52dacd89..5fa89d5c 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -44,11 +44,33 @@ def create_tickets_elasticsearch_unencryption(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: - logging.debug(f"Closing {issue.status.value} Elasticsearch unencrypted domain '{domain_name}' issue") + if issue.status in [IssueStatus.Quarantine]: + logging.debug( + f"Elasticsearch unencrypted domain issue '{domain_name}' " + f"is added to quarantine list. ") + + comment = (f"Elasticsearch unencrypted domain issue '{domain_name}' " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} Elasticsearch unencrypted domain " + f"'{domain_name}' issue") - comment = (f"Closing {issue.status.value} Elasticsearch unencrypted domain '{domain_name}' issue " - f"in '{account_name} / {account_id}' account, '{region}' region") + comment = (f"Closing {issue.status.value} Elasticsearch unencrypted domain " + f"'{domain_name}' issue in '{account_name} / {account_id}' account, '{region}' " + f"region") if issue.status == IssueStatus.Whitelisted: # Adding label with "whitelisted" to jira ticket. jira.add_label( diff --git a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py index f43b7555..4c9dae9e 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py @@ -38,7 +38,23 @@ def create_jira_ticket(self): username = issue.issue_details.username # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug( + f"IAM Inactive access key issue '{key_id} / {username}' is added to quarantine list. ") + + comment = (f"IAM Inactive access key issue '{key_id} / {username}' " + f"in '{account_name} / {account_id}' account is added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + account_id=account_id + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} inactive access key '{key_id} / {username}' issue") comment = (f"Closing {issue.status.value} inactive access key '{key_id} / {username}' issue " diff --git a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py index 74fd5872..52d60aa5 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py @@ -38,7 +38,24 @@ def create_jira_ticket(self): username = issue.issue_details.username # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug( + f"IAM stale access key issue '{key_id} / {username}' " + f"is added to quarantine list. ") + + comment = (f"IAM stale access key issue '{key_id} / {username}' " + f"in '{account_name} / {account_id}' account is added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + account_id=account_id + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing stale access key {issue.status.value} '{key_id} / {username}' issue") comment = (f"Closing {issue.status.value} stale access key '{key_id} / {username}' issue " diff --git a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py index b0661cfa..44c49041 100644 --- a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py @@ -60,7 +60,6 @@ def create_tickets_public_ami(self): account_id=account_id, bu=bu, product=product, ) - elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} AMI '{ami_id}' public access issue") diff --git a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py index e0227dca..d2ee76d8 100755 --- a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py @@ -42,7 +42,25 @@ def create_tickets_rds_public_snapshots(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"RDS public snapshot '{snapshot_id}' is added to quarantine list. ") + + comment = (f"RDS public snapshot '{snapshot_id}' issue " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} RDS public snapshot '{snapshot_id}' issue") comment = (f"Closing {issue.status.value} RDS public snapshot '{snapshot_id}' issue " diff --git a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py index 10a71429..d910dbb1 100644 --- a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py @@ -43,7 +43,25 @@ def create_tickets_rds_unencrypted_instances(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"RDS unencrypted instance '{instance_name}' is added to quarantine list. ") + + comment = (f"RDS unencrypted instance '{instance_name}' issue " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} RDS unencrypted instance '{instance_name}' issue") comment = (f"Closing {issue.status.value} RDS unencrypted instance '{instance_name}' issue " diff --git a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py index cb412c6d..063992c9 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py @@ -42,7 +42,25 @@ def create_tickets_redshift_logging(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"Redshift cluster logging '{cluster_id}' is added to quarantine list. ") + + comment = (f"Redshift cluster logging '{cluster_id}' issue " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} Redshift logging '{cluster_id}' issue") comment = (f"Closing {issue.status.value} Redshift cluster logging '{cluster_id}' issue " diff --git a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py index d7875154..69ba67a4 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py @@ -42,10 +42,31 @@ def create_tickets_redshift_public_access(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: - logging.debug(f"Closing {issue.status.value} Redshift publicly accessible cluster '{cluster_id}' issue") + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"Redshift publicly accessible cluster issue '{cluster_id}' " + f"is added to quarantine list. ") + + comment = (f"Redshift publicly accessible cluster '{cluster_id}' issue " + f"in '{account_name} / {account_id}' account, {region} " + f"region added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + logging.debug(f"Closing {issue.status.value} Redshift publicly accessible " + f"cluster '{cluster_id}' issue") - comment = (f"Closing {issue.status.value} Redshift publicly accessible cluster '{cluster_id}' issue " + comment = (f"Closing {issue.status.value} Redshift publicly accessible cluster " + f"'{cluster_id}' issue " f"in '{account_name} / {account_id}' account, '{region}' region") if issue.status == IssueStatus.Whitelisted: # Adding label with "whitelisted" to jira ticket. diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py index 61ecd3be..02933145 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -42,7 +42,26 @@ def create_tickets_redshift_unencrypted_cluster(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"Redshift unencrypted cluster issue '{cluster_id}' " + f"is added to quarantine list. ") + + comment = (f"Redshift unencrypted cluster '{cluster_id}' issue " + f"in '{account_name} / {account_id}' account, {region} " + f"region is added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} Redshift unencrypted cluster '{cluster_id}' issue") comment = (f"Closing {issue.status.value} Redshift unencrypted cluster '{cluster_id}' issue " diff --git a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py index f8b2fdb5..e3c83442 100644 --- a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py @@ -41,7 +41,25 @@ def create_tickets_s3_unencrypted_buckets(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"S3 bucket unencrypted issue '{bucket_name}' " + f"is added to quarantine list. ") + + comment = (f"S3 bucket unencrypted '{bucket_name}' issue " + f"in '{account_name} / {account_id}' account is added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} S3 bucket '{bucket_name}' unencrypted issue") comment = (f"Closing {issue.status.value} S3 bucket '{bucket_name}' unencrypted issue " diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py index 8fad3747..8a8e7a01 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py @@ -48,7 +48,25 @@ def create_tickets_s3buckets(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"S3 bucket public ACL issue '{bucket_name}' " + f"is added to quarantine list. ") + + comment = (f"S3 bucket public ACL '{bucket_name}' issue " + f"in '{account_name} / {account_id}' account is added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} S3 bucket '{bucket_name}' public ACL issue") comment = (f"Closing {issue.status.value} S3 bucket '{bucket_name}' public ACL issue " diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py index b4411daa..02ccfe9f 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py @@ -45,7 +45,25 @@ def create_tickets_s3buckets(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"S3 bucket public policy issue '{bucket_name}' " + f"is added to quarantine list. ") + + comment = (f"S3 bucket public policy '{bucket_name}' issue " + f"in '{account_name} / {account_id}' account is added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} S3 bucket '{bucket_name}' public policy issue") comment = (f"Closing {issue.status.value} S3 bucket '{bucket_name}' public policy " diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 5d4578e3..5c646d7d 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -293,7 +293,26 @@ def create_tickets_securitygroups(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"Insecure security group issue '{group_name} / {group_id}' " + f"is added to quarantine list. ") + + comment = (f"Insecure security group '{group_name} / {group_id}' issue " + f"in '{account_name} / {account_id}' account, {group_region} " + f"region is added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} security group '{group_name} / {group_id}' issue") comment = (f"Closing {issue.status.value} security group '{group_name} / {group_id}' issue " diff --git a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py index 2f452024..75ca5a5e 100644 --- a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py @@ -47,7 +47,26 @@ def create_tickets_sqs_policy(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: + if issue.status in [IssueStatus.Quarantine]: + logging.debug(f"SQS queue public policy issue '{queue_name}' " + f"is added to quarantine list. ") + + comment = (f"SQS queue public policy '{queue_name}' issue " + f"in '{account_name} / {account_id}' account, {queue_region} " + f"region is added to quarantine list") + jira.update_issue( + ticket_id=issue.jira_details.ticket, + comment=comment + ) + + slack.report_issue( + msg=f"{comment}" + f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", + owner=owner, + account_id=account_id, + bu=bu, product=product, + ) + elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} SQS queue '{queue_name}' public policy issue") comment = (f"Closing {issue.status.value} SQS queue '{queue_name}' public policy " From f6ad5557a40ca81599dadd9809466e87fae1ecb5 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 24 Sep 2019 12:08:11 +0530 Subject: [PATCH 179/193] Updated with Temp whitlist item changes. Updated with Temp whitlist item changes. --- ...s_list.json => temp_whitelist_issues_list.json} | 0 .../describe_public_ami_issues.py | 4 ++-- .../describe_cloudtrails.py | 4 ++-- .../describe_ebs_public_snapshots.py | 4 ++-- .../describe_ebs_unencrypted_volumes.py | 4 ++-- .../describe_ecs_external_image_source_issues.py | 4 ++-- .../describe_ecs_logging_issues.py | 4 ++-- .../describe_ecs_privileged_access_issues.py | 4 ++-- ...escribe_elasticsearch_domains_logging_issues.py | 4 ++-- ...describe_elasticsearch_public_access_domains.py | 4 ++-- .../describe_elasticsearch_unencrypted_domains.py | 4 ++-- .../describe_iam_key_rotation.py | 6 +++--- .../describe_iam_accesskey_details.py | 6 +++--- .../describe_rds_public_snapshots.py | 4 ++-- .../describe_rds_instance_encryption.py | 4 ++-- .../describe_redshift_logging_issues.py | 4 ++-- .../describe_redshift_cluster_public_access.py | 4 ++-- .../describe_redshift_encryption.py | 4 ++-- .../describe_s3_bucket_acl.py | 4 ++-- .../describe_s3_bucket_policy.py | 4 ++-- .../describe_s3_encryption.py | 4 ++-- .../describe_sec_grps_unrestricted_access.py | 6 +++--- .../describe_sqs_public_policy.py | 4 ++-- hammer/library/config.py | 14 +++++++------- hammer/library/ddb_issues.py | 4 ++-- .../remediation/clean_ami_public_access.py | 7 ++++--- .../clean_elasticsearch_domain_logging.py | 7 ++++--- .../clean_elasticsearch_policy_permissions.py | 7 ++++--- .../remediation/clean_iam_key_rotation.py | 10 ++++++---- .../remediation/clean_iam_keys_inactive.py | 9 +++++---- .../remediation/clean_public_ebs_snapshots.py | 7 ++++--- .../remediation/clean_public_rds_snapshots.py | 7 ++++--- .../clean_redshift_cluster_unencrypted.py | 7 ++++--- .../remediation/clean_redshift_public_access.py | 7 ++++--- .../remediation/clean_s3bucket_acl_permissions.py | 7 ++++--- .../clean_s3bucket_policy_permissions.py | 9 +++++---- .../remediation/clean_s3bucket_unencrypted.py | 7 ++++--- .../remediation/clean_security_groups.py | 9 +++++---- .../remediation/clean_sqs_policy_permissions.py | 7 ++++--- .../reporting/create_cloudtrail_tickets.py | 6 +++--- .../create_ebs_public_snapshot_issue_tickets.py | 6 +++--- .../reporting/create_ebs_volume_issue_tickets.py | 6 +++--- ...eate_ecs_external_image_source_issue_tickets.py | 6 +++--- .../reporting/create_ecs_logging_issue_tickets.py | 6 +++--- .../create_ecs_privileged_access_issue_tickets.py | 6 +++--- ...e_elasticsearch_domain_logging_issue_tickets.py | 6 +++--- ...te_elasticsearch_public_access_issue_tickets.py | 6 +++--- ...eate_elasticsearch_unencrypted_issue_tickets.py | 6 +++--- .../reporting/create_iam_key_inactive_tickets.py | 7 ++++--- .../reporting/create_iam_key_rotation_tickets.py | 6 +++--- .../reporting/create_public_ami_issue_tickets.py | 6 +++--- .../create_rds_public_snapshot_issue_tickets.py | 6 +++--- ...reate_rds_unencrypted_instance_issue_tickets.py | 7 ++++--- .../create_redshift_logging_issue_tickets.py | 6 +++--- .../create_redshift_public_access_issue_tickets.py | 6 +++--- ...e_redshift_unencrypted_cluster_issue_tickets.py | 6 +++--- .../create_s3_unencrypted_bucket_issue_tickets.py | 6 +++--- .../reporting/create_s3bucket_acl_issue_tickets.py | 6 +++--- .../create_s3bucket_policy_issue_tickets.py | 6 +++--- .../reporting/create_security_groups_tickets.py | 6 +++--- .../reporting/create_sqs_policy_issue_tickets.py | 6 +++--- 61 files changed, 185 insertions(+), 168 deletions(-) rename deployment/configs/{quarantine_issues_list.json => temp_whitelist_issues_list.json} (100%) diff --git a/deployment/configs/quarantine_issues_list.json b/deployment/configs/temp_whitelist_issues_list.json similarity index 100% rename from deployment/configs/quarantine_issues_list.json rename to deployment/configs/temp_whitelist_issues_list.json diff --git a/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py b/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py index 31d86d1f..fef2f057 100644 --- a/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py +++ b/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py @@ -59,8 +59,8 @@ def lambda_handler(event, context): issue.issue_details.name = ami.name issue.issue_details.region = region - if config.publicAMIs.in_quarantine_list(account_id, ami.id): - issue.status = IssueStatus.Quarantine + if config.publicAMIs.in_temp_whitelist(account_id, ami.id): + issue.status = IssueStatus.Tempwhitelist elif config.publicAMIs.in_whitelist(account_id, ami.id): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py b/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py index e7cdce8f..81f86e3b 100755 --- a/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py +++ b/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py @@ -57,8 +57,8 @@ def lambda_handler(event, context): issue.issue_details.delivery_errors = checker.delivery_errors issue.add_trails(checker.trails) - if config.cloudtrails.in_quarantine_list(account_id, region): - issue.status = IssueStatus.Quarantine + if config.cloudtrails.in_temp_whitelist(account_id, region): + issue.status = IssueStatus.Tempwhitelist elif config.cloudtrails.in_whitelist(account_id, region): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py b/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py index a6bc8c4f..5a901899 100755 --- a/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py +++ b/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py @@ -58,8 +58,8 @@ def lambda_handler(event, context): issue.issue_details.volume_id = snapshot.volume_id issue.issue_details.tags = snapshot.tags - if config.ebsSnapshot.in_quarantine_list(account_id, snapshot.id): - issue.status = IssueStatus.Quarantine + if config.ebsSnapshot.in_temp_whitelist(account_id, snapshot.id): + issue.status = IssueStatus.Tempwhitelist elif config.ebsSnapshot.in_whitelist(account_id, snapshot.id): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py b/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py index 01172aa1..b5f3764e 100755 --- a/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py +++ b/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py @@ -60,8 +60,8 @@ def lambda_handler(event, context): issue.issue_details.attachments = volume.attachments issue.issue_details.tags = volume.tags - if config.ebsVolume.in_quarantine_list(account_id, volume.id): - issue.status = IssueStatus.Quarantine + if config.ebsVolume.in_temp_whitelist(account_id, volume.id): + issue.status = IssueStatus.Tempwhitelist elif config.ebsVolume.in_whitelist(account_id, volume.id): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py index 0ae826a6..8694c1e5 100644 --- a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py +++ b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py @@ -59,8 +59,8 @@ def lambda_handler(event, context): issue.issue_details.container_image_details = task_definition.container_image_details issue.issue_details.region = task_definition.account.region - if config.ecs_external_image_source.in_quarantine_list(account_id, task_definition.name): - issue.status = IssueStatus.Quarantine + if config.ecs_external_image_source.in_temp_whitelist(account_id, task_definition.name): + issue.status = IssueStatus.Tempwhitelist elif config.ecs_external_image_source.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py index 8cc0fa42..0c595cf4 100644 --- a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py +++ b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py @@ -59,8 +59,8 @@ def lambda_handler(event, context): issue.issue_details.disabled_logging_container_names = task_definition.disabled_logging_container_names issue.issue_details.tags = task_definition.tags - if config.ecs_logging.in_quarantine_list(account_id, task_definition.name): - issue.status = IssueStatus.Quarantine + if config.ecs_logging.in_temp_whitelist(account_id, task_definition.name): + issue.status = IssueStatus.Tempwhitelist elif config.ecs_logging.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py index 50bd32c1..f23edf50 100644 --- a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py +++ b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py @@ -58,8 +58,8 @@ def lambda_handler(event, context): issue.issue_details.tags = task_definition.tags issue.issue_details.privileged_container_names = task_definition.privileged_container_names issue.issue_details.region = task_definition.account.region - if config.ecs_privileged_access.in_quarantine_list(account_id, task_definition.name): - issue.status = IssueStatus.Quarantine + if config.ecs_privileged_access.in_temp_whitelist(account_id, task_definition.name): + issue.status = IssueStatus.Tempwhitelist elif config.ecs_privileged_access.in_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py index 2734a282..6eb906ba 100644 --- a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py +++ b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py @@ -59,8 +59,8 @@ def lambda_handler(event, context): issue.issue_details.arn = domain.arn issue.issue_details.tags = domain.tags - if config.esLogging.in_quarantine_list(account_id, domain.name): - issue.status = IssueStatus.Quarantine + if config.esLogging.in_temp_whitelist(account_id, domain.name): + issue.status = IssueStatus.Tempwhitelist elif config.esLogging.in_whitelist(account_id, domain.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py b/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py index ce45f83a..bc20bea8 100644 --- a/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py +++ b/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py @@ -60,8 +60,8 @@ def lambda_handler(event, context): issue.issue_details.tags = domain.tags issue.issue_details.policy = domain.policy - if config.esPublicAccess.in_quarantine_list(account_id, domain.name): - issue.status = IssueStatus.Quarantine + if config.esPublicAccess.in_temp_whitelist(account_id, domain.name): + issue.status = IssueStatus.Tempwhitelist elif config.esPublicAccess.in_whitelist(account_id, domain.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py index c4830513..b039d851 100644 --- a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py +++ b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py @@ -61,8 +61,8 @@ def lambda_handler(event, context): issue.issue_details.encrypted_at_rest = domain.encrypted_at_rest issue.issue_details.encrypted_at_transit = domain.encrypted_at_transit - if config.esEncrypt.in_quarantine_list(account_id, domain.name): - issue.status = IssueStatus.Quarantine + if config.esEncrypt.in_temp_whitelist(account_id, domain.name): + issue.status = IssueStatus.Tempwhitelist elif config.esEncrypt.in_whitelist(account_id, domain.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py b/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py index 29f9f6ba..2727f22d 100755 --- a/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py +++ b/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py @@ -57,9 +57,9 @@ def lambda_handler(event, context): issue.issue_details.username = user.id issue.issue_details.create_date = key.create_date.isoformat() - if config.iamUserKeysRotation.in_quarantine_list(account_id, key.id) \ - or config.iamUserKeysRotation.in_quarantine_list(account_id, user.id): - issue.status = IssueStatus.Quarantine + if config.iamUserKeysRotation.in_temp_whitelist(account_id, key.id) \ + or config.iamUserKeysRotation.in_temp_whitelist(account_id, user.id): + issue.status = IssueStatus.Tempwhitelist elif config.iamUserKeysRotation.in_whitelist(account_id, key.id) \ or config.iamUserKeysRotation.in_whitelist(account_id, user.id): issue.status = IssueStatus.Whitelisted diff --git a/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py b/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py index 32931df2..9d5f6e9f 100755 --- a/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py +++ b/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py @@ -58,9 +58,9 @@ def lambda_handler(event, context): issue.issue_details.last_used = key.last_used.isoformat() issue.issue_details.create_date = key.create_date.isoformat() - if config.iamUserInactiveKeys.in_quarantine_list(account_id, key.id) \ - or config.iamUserInactiveKeys.in_quarantine_list(account_id, user.id): - issue.status = IssueStatus.Quarantine + if config.iamUserInactiveKeys.in_temp_whitelist(account_id, key.id) \ + or config.iamUserInactiveKeys.in_temp_whitelist(account_id, user.id): + issue.status = IssueStatus.Tempwhitelist elif config.iamUserInactiveKeys.in_whitelist(account_id, key.id) \ or config.iamUserInactiveKeys.in_whitelist(account_id, user.id): issue.status = IssueStatus.Whitelisted diff --git a/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py b/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py index 20ba2b4f..c0f0bd07 100755 --- a/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py +++ b/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py @@ -60,8 +60,8 @@ def lambda_handler(event, context): issue.issue_details.engine = snapshot.engine issue.issue_details.tags = snapshot.tags - if config.rdsSnapshot.in_quarantine_list(account_id, snapshot.id): - issue.status = IssueStatus.Quarantine + if config.rdsSnapshot.in_temp_whitelist(account_id, snapshot.id): + issue.status = IssueStatus.Tempwhitelist elif config.rdsSnapshot.in_whitelist(account_id, snapshot.id): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py b/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py index df034fce..34fa4d32 100644 --- a/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py +++ b/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py @@ -60,8 +60,8 @@ def lambda_handler(event, context): issue.issue_details.engine = instance.engine issue.issue_details.tags = instance.tags - if config.rdsEncrypt.in_quarantine_list(account_id, instance.id): - issue.status = IssueStatus.Quarantine + if config.rdsEncrypt.in_temp_whitelist(account_id, instance.id): + issue.status = IssueStatus.Tempwhitelist elif config.rdsEncrypt.in_whitelist(account_id, instance.id): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py index 818c4bf3..06e35ef7 100644 --- a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py +++ b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py @@ -57,8 +57,8 @@ def lambda_handler(event, context): issue.issue_details.tags = cluster.tags issue.issue_details.region = cluster.account.region - if config.redshift_logging.in_quarantine_list(account_id, cluster.name): - issue.status = IssueStatus.Quarantine + if config.redshift_logging.in_temp_whitelist(account_id, cluster.name): + issue.status = IssueStatus.Tempwhitelist elif config.redshift_logging.in_whitelist(account_id, cluster.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py index e2ec998b..7db33692 100644 --- a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py +++ b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py @@ -57,8 +57,8 @@ def lambda_handler(event, context): issue.issue_details.tags = cluster.tags issue.issue_details.region = cluster.account.region - if config.redshift_public_access.in_quarantine_list(account_id, cluster.name): - issue.status = IssueStatus.Quarantine + if config.redshift_public_access.in_temp_whitelist(account_id, cluster.name): + issue.status = IssueStatus.Tempwhitelist elif config.redshift_public_access.in_whitelist(account_id, cluster.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py index 7c9e0ce8..4e1c5de3 100644 --- a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py +++ b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py @@ -57,8 +57,8 @@ def lambda_handler(event, context): issue.issue_details.tags = cluster.tags issue.issue_details.region = cluster.account.region - if config.redshiftEncrypt.in_quarantine_list(account_id, cluster.name): - issue.status = IssueStatus.Quarantine + if config.redshiftEncrypt.in_temp_whitelist(account_id, cluster.name): + issue.status = IssueStatus.Tempwhitelist elif config.redshiftEncrypt.in_whitelist(account_id, cluster.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py b/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py index a6aec4f7..30b69fe3 100755 --- a/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py +++ b/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py @@ -56,8 +56,8 @@ def lambda_handler(event, context): issue.issue_details.public_acls = bucket.get_public_acls() issue.issue_details.tags = bucket.tags - if config.s3acl.in_quarantine_list(account_id, bucket.name): - issue.status = IssueStatus.Quarantine + if config.s3acl.in_temp_whitelist(account_id, bucket.name): + issue.status = IssueStatus.Tempwhitelist elif config.s3acl.in_whitelist(account_id, bucket.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py b/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py index 745c9f18..e852cf4b 100755 --- a/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py +++ b/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py @@ -56,8 +56,8 @@ def lambda_handler(event, context): issue.issue_details.tags = bucket.tags issue.issue_details.policy = bucket.policy - if config.s3policy.in_quarantine_list(account_id, bucket.name): - issue.status = IssueStatus.Quarantine + if config.s3policy.in_temp_whitelist(account_id, bucket.name): + issue.status = IssueStatus.Tempwhitelist elif config.s3policy.in_whitelist(account_id, bucket.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py b/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py index 87f5b35b..4e14c5fb 100644 --- a/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py +++ b/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py @@ -55,8 +55,8 @@ def lambda_handler(event, context): issue.issue_details.owner = bucket.owner issue.issue_details.tags = bucket.tags - if config.s3Encrypt.in_quarantine_list(account_id, bucket.name): - issue.status = IssueStatus.Quarantine + if config.s3Encrypt.in_temp_whitelist(account_id, bucket.name): + issue.status = IssueStatus.Tempwhitelist elif config.s3Encrypt.in_whitelist(account_id, bucket.name): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py b/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py index b3123df6..74a2a47f 100755 --- a/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py +++ b/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py @@ -69,9 +69,9 @@ def lambda_handler(event, context): if not ip_range.restricted: issue.add_perm(perm.protocol, perm.from_port, perm.to_port, ip_range.cidr, ip_range.status) - if config.sg.in_quarantine_list(account_id, f"{sg.vpc_id}:{sg.name}")or \ - config.sg.in_quarantine_list(account_id, sg.id): - issue.status = IssueStatus.Quarantine + if config.sg.in_temp_whitelist(account_id, f"{sg.vpc_id}:{sg.name}")or \ + config.sg.in_temp_whitelist(account_id, sg.id): + issue.status = IssueStatus.Tempwhitelist elif config.sg.in_whitelist(account_id, f"{sg.vpc_id}:{sg.name}") or \ config.sg.in_whitelist(account_id, sg.id): issue.status = IssueStatus.Whitelisted diff --git a/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py b/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py index 85fcb20c..0750b84c 100644 --- a/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py +++ b/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py @@ -60,8 +60,8 @@ def lambda_handler(event, context): issue.issue_details.region = queue.account.region issue.issue_details.policy = queue.policy - if config.sqspolicy.in_quarantine_list(account_id, queue.url): - issue.status = IssueStatus.Quarantine + if config.sqspolicy.in_temp_whitelist(account_id, queue.url): + issue.status = IssueStatus.Tempwhitelist elif config.sqspolicy.in_whitelist(account_id, queue.url): issue.status = IssueStatus.Whitelisted else: diff --git a/hammer/library/config.py b/hammer/library/config.py index 40ff587d..c74c35d0 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -22,21 +22,21 @@ def __init__(self, whitelistFile="whitelist.json", fixnowFile="fixnow.json", ticketOwnersFile="ticket_owners.json", - quarantinelistFile="quarantine_issues_list.json"): + tempWhitelistFile="temp_whitelist_issues_list.json"): """ :param configFile: local path to configuration file in json format :param configIniFile: local path to configuration file in ini format (is used in r&r EC2, build from EC2 userdata) :param whitelistFile: local path to whitelist file in json format :param fixnowFile: local path to fixnow file in json format :param ticketOwnersFile: local path to file with default ticket owners by bu/account in json format - :param quarantinelistFile: local path to list of quarantine issues file in json format + :param tempWhitelistFile: local path to list of temporary whitelist issues file in json format """ self._config = self.json_load_from_file(configFile) self._config['whitelist'] = self.json_load_from_file(whitelistFile, default={}) self._config['fixnow'] = self.json_load_from_file(fixnowFile, default={}) - self._config['quarantine'] = self.json_load_from_file(quarantinelistFile, default={}) + self._config['tempwhitelist'] = self.json_load_from_file(tempWhitelistFile, default={}) self.local = LocalConfig(configIniFile) self.owners = OwnersConfig(self.json_load_from_file(ticketOwnersFile, default={})) @@ -484,7 +484,7 @@ def __init__(self, config, section): super().__init__(config, section) self._whitelist = config["whitelist"].get(section, {}) self._fixnow = config["fixnow"].get(section, {}) - self._quarantine_list = config["quarantine"].get(section, {}) + self._tempwhitelist_list = config["tempwhitelist"].get(section, {}) # main accounts dict self._accounts = config["aws"]["accounts"] self.name = section @@ -548,14 +548,14 @@ def in_whitelist(self, account_id, issue): """ return issue in self._whitelist.get(account_id, []) - def in_quarantine_list(self, account_id, issue): + def in_temp_whitelist(self, account_id, issue): """ :param account_id: AWS account Id :param issue: Issue id - :return: boolean, if issue Id in quarantine + :return: boolean, if issue Id in temp whitelist file """ - return issue in self._quarantine_list.get(account_id, []) + return issue in self._tempwhitelist_list.get(account_id, []) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index 9488e3a4..55246846 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -20,8 +20,8 @@ class IssueStatus(Enum): Resolved = "resolved" # set by reporting after closing ticket Closed = "closed" - # set by identification - issue still exists but was added to quarantine_list for future remediation - Quarantine = "quarantine" + # set by identification - issue still exists but was added to tempwhitelist_list for future remediation + Tempwhitelist = "tempwhitelist" class Details(object): diff --git a/hammer/reporting-remediation/remediation/clean_ami_public_access.py b/hammer/reporting-remediation/remediation/clean_ami_public_access.py index ae1c6b92..1a237412 100644 --- a/hammer/reporting-remediation/remediation/clean_ami_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_ami_public_access.py @@ -39,9 +39,10 @@ def clean_ami_public_access(self): in_whitelist = self.config.publicAMIs.in_whitelist(account_id, ami_id) - in_quarantine = self.config.publicAMIs.in_quarantine_list(account_id, ami_id) - if in_quarantine: - logging.debug(f"Skipping {ami_id} (in quarantine list. Will remediate this issue in future)") + in_temp_whitelist = self.config.publicAMIs.in_temp_whitelist(account_id, ami_id) + if in_temp_whitelist: + logging.debug(f"Skipping '{ami_id}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py index 7513f315..d5b6c434 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py @@ -41,9 +41,10 @@ def clean_elasticsearch_domain_domain_logging_issues(self, batch=False): domain_name = issue.issue_id in_whitelist = self.config.esLogging.in_whitelist(account_id, domain_name) - in_quarantine = self.config.esLogging.in_quarantine_list(account_id, domain_name) - if in_quarantine: - logging.debug(f"Skipping {domain_name} (in quarantine list. Will remediate this issue in future)") + in_temp_whitelist = self.config.esLogging.in_temp_whitelist(account_id, domain_name) + if in_temp_whitelist: + logging.debug(f"Skipping '{domain_name}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py index ed7e28e9..6d04a5b9 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py @@ -42,9 +42,10 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): in_whitelist = self.config.esPublicAccess.in_whitelist(account_id, domain_name) # in_fixlist = self.config.esPublicAccess.in_fixnow(account_id, domain_name) - in_quarantine = self.config.esPublicAccess.in_quarantine_list(account_id, domain_name) - if in_quarantine: - logging.debug(f"Skipping {domain_name} (in quarantine list. Will remediate this issue in future)") + in_temp_whitelist = self.config.esPublicAccess.in_temp_whitelist(account_id, domain_name) + if in_temp_whitelist: + logging.debug(f"Skipping '{domain_name}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py b/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py index 96c7f1ba..2083b97f 100755 --- a/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py +++ b/hammer/reporting-remediation/remediation/clean_iam_key_rotation.py @@ -42,10 +42,12 @@ def clean_iam_access_keys(self, batch=False): user_in_whitelist = self.config.iamUserKeysRotation.in_whitelist(account_id, username) key_in_whitelist = self.config.iamUserKeysRotation.in_whitelist(account_id, key_id) - user_in_quarantine = self.config.iamUserKeysRotation.in_quarantine_list(account_id, username) - key_in_quarantine = self.config.iamUserKeysRotation.in_quarantine_list(account_id, key_id) - if user_in_quarantine or key_in_quarantine: - logging.debug(f"Skipping {key_id} / {username} (in quarantine list. Will remediate this issue in future)") + + user_in_temp_whitelist = self.config.iamUserKeysRotation.in_temp_whitelist(account_id, username) + key_in_temp_whitelist = self.config.iamUserKeysRotation.in_temp_whitelist(account_id, key_id) + if user_in_temp_whitelist or key_in_temp_whitelist: + logging.debug(f"Skipping '{key_id} / {username}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if user_in_whitelist or key_in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py b/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py index 1eb0b5a4..ba714422 100755 --- a/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py +++ b/hammer/reporting-remediation/remediation/clean_iam_keys_inactive.py @@ -42,11 +42,12 @@ def clean_iam_access_keys(self, batch=False): user_in_whitelist = self.config.iamUserInactiveKeys.in_whitelist(account_id, username) key_in_whitelist = self.config.iamUserInactiveKeys.in_whitelist(account_id, key_id) - user_in_quarantine = self.config.iamUserInactiveKeys.in_quarantine_list(account_id, username) - key_in_quarantine = self.config.iamUserInactiveKeys.in_quarantine_list(account_id, key_id) - if user_in_quarantine or key_in_quarantine: + user_in_temp_whitelist = self.config.iamUserInactiveKeys.in_temp_whitelist(account_id, username) + key_in_temp_whitelist = self.config.iamUserInactiveKeys.in_temp_whitelist(account_id, key_id) + if user_in_temp_whitelist or key_in_temp_whitelist: logging.debug( - f"Skipping {key_id} / {username} (in quarantine list. Will remediate this issue in future)") + f"Skipping '{key_id} / {username}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if user_in_whitelist or key_in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py b/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py index f4a6c68e..53cb73fb 100755 --- a/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py +++ b/hammer/reporting-remediation/remediation/clean_public_ebs_snapshots.py @@ -42,9 +42,10 @@ def clean_public_ebs_snapshots(self, batch=False): continue in_whitelist = self.config.ebsSnapshot.in_whitelist(account_id, issue.issue_id) - in_quarantine = self.config.ebsSnapshot.in_quarantine_list(account_id, issue.issue_id) - if in_quarantine: - logging.debug(f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + in_temp_whitelist = self.config.ebsSnapshot.in_temp_whitelist(account_id, issue.issue_id) + if in_temp_whitelist: + logging.debug(f"Skipping '{issue.issue_id}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py b/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py index 81439beb..94aaac70 100755 --- a/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py +++ b/hammer/reporting-remediation/remediation/clean_public_rds_snapshots.py @@ -43,10 +43,11 @@ def clean_public_rds_snapshots(self, batch=False): continue in_whitelist = self.config.rdsSnapshot.in_whitelist(account_id, issue.issue_id) - in_quarantine = self.config.rdsSnapshot.in_quarantine_list(account_id, issue.issue_id) - if in_quarantine: + in_temp_whitelist = self.config.rdsSnapshot.in_temp_whitelist(account_id, issue.issue_id) + if in_temp_whitelist: logging.debug( - f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + f"Skipping '{issue.issue_id}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py index 42af5f7d..30344b4a 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py @@ -40,10 +40,11 @@ def cleanredshiftclusterunencryption(self, batch=False): cluster_id = issue.issue_id in_whitelist = self.config.redshiftEncrypt.in_whitelist(account_id, cluster_id) - in_quarantine = self.config.redshiftEncrypt.in_quarantine_list(account_id, issue.issue_id) - if in_quarantine: + in_temp_whitelist = self.config.redshiftEncrypt.in_temp_whitelist(account_id, issue.issue_id) + if in_temp_whitelist: logging.debug( - f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + f"Skipping '{issue.issue_id}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py index 289f3d54..374441d4 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py @@ -40,10 +40,11 @@ def clean_redshift_public_access(self, batch=False): cluster_id = issue.issue_id in_whitelist = self.config.redshift_public_access.in_whitelist(account_id, cluster_id) - in_quarantine = self.config.redshift_public_access.in_quarantine_list(account_id, issue.issue_id) - if in_quarantine: + in_temp_whitelist = self.config.redshift_public_access.in_temp_whitelist(account_id, issue.issue_id) + if in_temp_whitelist: logging.debug( - f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + f"Skipping '{issue.issue_id}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py b/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py index a1d894ca..88cd12ae 100755 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_acl_permissions.py @@ -42,10 +42,11 @@ def cleans3bucketaclpermissions(self, batch=False): in_whitelist = self.config.s3acl.in_whitelist(account_id, bucket_name) in_fixlist = True #self.config.s3acl.in_fixnow(account_id, bucket_name) - in_quarantine = self.config.s3acl.in_quarantine_list(account_id, issue.issue_id) - if in_quarantine: + in_temp_whitelist = self.config.s3acl.in_temp_whitelist(account_id, issue.issue_id) + if in_temp_whitelist: logging.debug( - f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + f"Skipping '{issue.issue_id}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py index c9ca5ec4..3cea5ffb 100755 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_policy_permissions.py @@ -41,11 +41,12 @@ def clean_s3bucket_policy_permissions(self, batch=False): bucket_name = issue.issue_id in_whitelist = self.config.s3policy.in_whitelist(account_id, bucket_name) - #in_fixlist = self.config.s3policy.in_fixnow(account_id, bucket_name) - in_quarantine = self.config.s3policy.in_quarantine_list(account_id, issue.issue_id) - if in_quarantine: + # in_fixlist = self.config.s3policy.in_fixnow(account_id, bucket_name) + in_temp_whitelist = self.config.s3policy.in_temp_whitelist(account_id, issue.issue_id) + if in_temp_whitelist: logging.debug( - f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + f"Skipping '{issue.issue_id}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py index c64237aa..305d59c3 100644 --- a/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_s3bucket_unencrypted.py @@ -41,10 +41,11 @@ def cleans3bucketunencrypted(self, batch=False): in_whitelist = self.config.s3Encrypt.in_whitelist(account_id, bucket_name) in_fixlist = True - in_quarantine = self.config.s3Encrypt.in_quarantine_list(account_id, issue.issue_id) - if in_quarantine: + in_temp_whitelist = self.config.s3Encrypt.in_temp_whitelist(account_id, issue.issue_id) + if in_temp_whitelist: logging.debug( - f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + f"Skipping '{issue.issue_id}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_security_groups.py b/hammer/reporting-remediation/remediation/clean_security_groups.py index 7a2d8f3e..a6afa9c0 100755 --- a/hammer/reporting-remediation/remediation/clean_security_groups.py +++ b/hammer/reporting-remediation/remediation/clean_security_groups.py @@ -47,11 +47,12 @@ def clean_security_groups(self, batch=False): name_in_whitelist = self.config.sg.in_whitelist(account_id, f"{group_vpc_id}:{group_name}") id_in_whitelist = self.config.sg.in_whitelist(account_id, group_id) - name_in_quarantine = self.config.sg.in_quarantine_list(account_id, f"{group_vpc_id}:{group_name}") - id_in_quarantine = self.config.sg.in_quarantine_list(account_id, group_id) - if name_in_quarantine or id_in_quarantine: + name_in_temp_whitelist = self.config.sg.in_temp_whitelist(account_id, f"{group_vpc_id}:{group_name}") + id_in_temp_whitelist = self.config.sg.in_temp_whitelist(account_id, group_id) + if name_in_temp_whitelist or id_in_temp_whitelist: logging.debug( - f"Skipping {group_name} / {group_id} (in quarantine list. Will remediate this issue in future)") + f"Skipping '{group_name}' / '{group_id}' (in temporary whitelist items." + f" Will remediate this issue in future)") continue if name_in_whitelist or id_in_whitelist: diff --git a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py index 3dcb101f..3e0a3188 100644 --- a/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_sqs_policy_permissions.py @@ -41,10 +41,11 @@ def clean_sqs_policy_permissions(self): queue_region = issue.issue_details.region in_whitelist = self.config.sqspolicy.in_whitelist(account_id, queue_url) - in_quarantine = self.config.sqspolicy.in_quarantine_list(account_id, issue.issue_id) - if in_quarantine: + in_temp_whitelist = self.config.sqspolicy.in_temp_whitelist(account_id, issue.issue_id) + if in_temp_whitelist: logging.debug( - f"Skipping {issue.issue_id} (in quarantine list. Will remediate this issue in future)") + f"Skipping '{issue.issue_id}' (in temporary whitelist items. " + f"Will remediate this issue in future)") continue if in_whitelist: diff --git a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py index 1ff6eee7..0ddbdf6e 100755 --- a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py +++ b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py @@ -56,12 +56,12 @@ def create_tickets_cloud_trail_logging(self): region = issue.issue_id # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug(f"CloudTrail logging issue with '{region}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist. ") comment = (f"CloudTrail logging issue with '{region}' " - f"in '{account_name} / {account_id}' account is added to quarantine list") + f"in '{account_name} / {account_id}' account is added to temporary whitelist.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py index 9709f41d..9c5ab201 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py @@ -43,12 +43,12 @@ def create_tickets_ebs_public_snapshots(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: - logging.debug(f"EBS public snapshot '{snapshot_id}' is added to quarantine list. ") + if issue.status in [IssueStatus.Tempwhitelist]: + logging.debug(f"EBS public snapshot '{snapshot_id}' is added to temporary whitelist items. ") comment = (f"EBS public snapshot '{snapshot_id}' " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py index 55bea9c0..f79d8de8 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py @@ -90,12 +90,12 @@ def create_tickets_ebsvolumes(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: - logging.debug(f"EBS unencrypted volume '{volume_id}' is added to quarantine list. ") + if issue.status in [IssueStatus.Tempwhitelist]: + logging.debug(f"EBS unencrypted volume '{volume_id}' is added to temporary whitelist items. ") comment = (f"EBS unencrypted volume '{volume_id}' " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py index 4f284995..5b82c493 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -43,12 +43,12 @@ def create_tickets_ecs_external_images(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: - logging.debug(f"ECS external image source '{task_definition_name}' is added to quarantine list. ") + if issue.status in [IssueStatus.Tempwhitelist]: + logging.debug(f"ECS external image source '{task_definition_name}' is added to temporary whitelist items. ") comment = (f"ECS external image source '{task_definition_name}' " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index c9fd1195..6e8d1eaa 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -43,13 +43,13 @@ def create_tickets_ecs_logging(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug( - f"ECS logging issue '{task_definition_name}' is added to quarantine list. ") + f"ECS logging issue '{task_definition_name}' is added to temporary whitelist items. ") comment = (f"ECS logging issue '{task_definition_name}' " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py index e49350c7..6ba58847 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py @@ -43,13 +43,13 @@ def create_tickets_ecs_privileged(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug( - f"ECS privileged access issue '{task_definition_name}' is added to quarantine list. ") + f"ECS privileged access issue '{task_definition_name}' is added to temporary whitelist items.") comment = (f"ECS privileged access issue '{task_definition_name}' " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py index 081cd9c5..d88ba6bd 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py @@ -43,13 +43,13 @@ def create_tickets_elasticsearch_domain_logging(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug( - f"Elasticsearch domain logging issue '{domain_name}' is added to quarantine list. ") + f"Elasticsearch logging issue '{domain_name}' is added to temporary whitelist items.") comment = (f"Elasticsearch domain logging issue '{domain_name}' " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py index ff94b241..38cf5e40 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -46,14 +46,14 @@ def create_tickets_elasticsearch_public_access(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug( f"Elasticsearch publicly accessible domain issue '{domain_name}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist items.") comment = (f"Elasticsearch publicly accessible domain issue '{domain_name}' " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index 5fa89d5c..22108689 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -44,14 +44,14 @@ def create_tickets_elasticsearch_unencryption(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug( f"Elasticsearch unencrypted domain issue '{domain_name}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist items.") comment = (f"Elasticsearch unencrypted domain issue '{domain_name}' " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py index 4c9dae9e..b7b517e3 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py @@ -38,12 +38,13 @@ def create_jira_ticket(self): username = issue.issue_details.username # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug( - f"IAM Inactive access key issue '{key_id} / {username}' is added to quarantine list. ") + f"IAM Inactive access key issue '{key_id} / {username}' is " + f"added to temporary whitelist items.") comment = (f"IAM Inactive access key issue '{key_id} / {username}' " - f"in '{account_name} / {account_id}' account is added to quarantine list") + f"in '{account_name} / {account_id}' account is added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py index 52d60aa5..6d71d6f4 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py @@ -38,13 +38,13 @@ def create_jira_ticket(self): username = issue.issue_details.username # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug( f"IAM stale access key issue '{key_id} / {username}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist items.") comment = (f"IAM stale access key issue '{key_id} / {username}' " - f"in '{account_name} / {account_id}' account is added to quarantine list") + f"in '{account_name} / {account_id}' account is added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py index 44c49041..d1bd39ba 100644 --- a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py @@ -42,12 +42,12 @@ def create_tickets_public_ami(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: - logging.debug(f"AMI '{ami_id}' is added to quarantine list. ") + if issue.status in [IssueStatus.Tempwhitelist]: + logging.debug(f"AMI '{ami_id}' is added to temporary whitelist items.") comment = (f"AMI '{ami_id}' public access issue " f"in '{account_name} / {account_id}' account, {ami_region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py index d2ee76d8..605b9129 100755 --- a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py @@ -42,12 +42,12 @@ def create_tickets_rds_public_snapshots(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: - logging.debug(f"RDS public snapshot '{snapshot_id}' is added to quarantine list. ") + if issue.status in [IssueStatus.Tempwhitelist]: + logging.debug(f"RDS public snapshot '{snapshot_id}' is added to temporary whitelist items.") comment = (f"RDS public snapshot '{snapshot_id}' issue " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py index d910dbb1..7ed189cf 100644 --- a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py @@ -43,12 +43,13 @@ def create_tickets_rds_unencrypted_instances(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: - logging.debug(f"RDS unencrypted instance '{instance_name}' is added to quarantine list. ") + if issue.status in [IssueStatus.Tempwhitelist]: + logging.debug(f"RDS unencrypted instance '{instance_name}' " + f"is added to temporary whitelist items.") comment = (f"RDS unencrypted instance '{instance_name}' issue " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py index 063992c9..c7712514 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py @@ -42,12 +42,12 @@ def create_tickets_redshift_logging(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: - logging.debug(f"Redshift cluster logging '{cluster_id}' is added to quarantine list. ") + if issue.status in [IssueStatus.Tempwhitelist]: + logging.debug(f"Redshift cluster logging '{cluster_id}' is added to temporary whitelist items.") comment = (f"Redshift cluster logging '{cluster_id}' issue " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py index 69ba67a4..7b3c1c95 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py @@ -42,13 +42,13 @@ def create_tickets_redshift_public_access(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug(f"Redshift publicly accessible cluster issue '{cluster_id}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist items.") comment = (f"Redshift publicly accessible cluster '{cluster_id}' issue " f"in '{account_name} / {account_id}' account, {region} " - f"region added to quarantine list") + f"region added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py index 02933145..c8918a60 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -42,13 +42,13 @@ def create_tickets_redshift_unencrypted_cluster(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug(f"Redshift unencrypted cluster issue '{cluster_id}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist items.") comment = (f"Redshift unencrypted cluster '{cluster_id}' issue " f"in '{account_name} / {account_id}' account, {region} " - f"region is added to quarantine list") + f"region is added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py index e3c83442..309bb42d 100644 --- a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py @@ -41,12 +41,12 @@ def create_tickets_s3_unencrypted_buckets(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug(f"S3 bucket unencrypted issue '{bucket_name}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist items.") comment = (f"S3 bucket unencrypted '{bucket_name}' issue " - f"in '{account_name} / {account_id}' account is added to quarantine list") + f"in '{account_name} / {account_id}' account is added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py index 8a8e7a01..441df7c1 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py @@ -48,12 +48,12 @@ def create_tickets_s3buckets(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug(f"S3 bucket public ACL issue '{bucket_name}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist items.") comment = (f"S3 bucket public ACL '{bucket_name}' issue " - f"in '{account_name} / {account_id}' account is added to quarantine list") + f"in '{account_name} / {account_id}' account is added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py index 02ccfe9f..fb59b0d8 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py @@ -45,12 +45,12 @@ def create_tickets_s3buckets(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug(f"S3 bucket public policy issue '{bucket_name}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist items.") comment = (f"S3 bucket public policy '{bucket_name}' issue " - f"in '{account_name} / {account_id}' account is added to quarantine list") + f"in '{account_name} / {account_id}' account is added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index 5c646d7d..c8d3d267 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -293,13 +293,13 @@ def create_tickets_securitygroups(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug(f"Insecure security group issue '{group_name} / {group_id}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist items.") comment = (f"Insecure security group '{group_name} / {group_id}' issue " f"in '{account_name} / {account_id}' account, {group_region} " - f"region is added to quarantine list") + f"region is added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment diff --git a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py index 75ca5a5e..f311dc03 100644 --- a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py @@ -47,13 +47,13 @@ def create_tickets_sqs_policy(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Quarantine]: + if issue.status in [IssueStatus.Tempwhitelist]: logging.debug(f"SQS queue public policy issue '{queue_name}' " - f"is added to quarantine list. ") + f"is added to temporary whitelist items.") comment = (f"SQS queue public policy '{queue_name}' issue " f"in '{account_name} / {account_id}' account, {queue_region} " - f"region is added to quarantine list") + f"region is added to temporary whitelist items.") jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment From effe085c00dbd57989228513a43927a3b9f1ed8a Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 24 Sep 2019 19:21:08 +0530 Subject: [PATCH 180/193] Updated with review comments. Updated with review comments. --- hammer/library/ddb_issues.py | 13 +++++++++++++ .../reporting/create_cloudtrail_tickets.py | 3 ++- .../create_ebs_public_snapshot_issue_tickets.py | 3 ++- .../reporting/create_ebs_volume_issue_tickets.py | 3 ++- ...reate_ecs_external_image_source_issue_tickets.py | 6 ++++-- .../reporting/create_ecs_logging_issue_tickets.py | 3 ++- .../create_ecs_privileged_access_issue_tickets.py | 6 ++++-- ...te_elasticsearch_domain_logging_issue_tickets.py | 3 ++- ...ate_elasticsearch_public_access_issue_tickets.py | 3 ++- ...reate_elasticsearch_unencrypted_issue_tickets.py | 3 ++- .../reporting/create_iam_key_inactive_tickets.py | 3 ++- .../reporting/create_iam_key_rotation_tickets.py | 3 ++- .../reporting/create_public_ami_issue_tickets.py | 3 ++- .../create_rds_public_snapshot_issue_tickets.py | 3 ++- ...create_rds_unencrypted_instance_issue_tickets.py | 3 ++- .../create_redshift_logging_issue_tickets.py | 3 ++- .../create_redshift_public_access_issue_tickets.py | 3 ++- ...te_redshift_unencrypted_cluster_issue_tickets.py | 3 ++- .../create_s3_unencrypted_bucket_issue_tickets.py | 3 ++- .../reporting/create_s3bucket_acl_issue_tickets.py | 3 ++- .../create_s3bucket_policy_issue_tickets.py | 3 ++- .../reporting/create_security_groups_tickets.py | 3 ++- .../reporting/create_sqs_policy_issue_tickets.py | 3 ++- 23 files changed, 61 insertions(+), 24 deletions(-) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index 55246846..11f6363c 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -478,3 +478,16 @@ def set_status_updated(cls, ddb_table, issue): """ issue.timestamps.updated = issue.timestamps.reported cls.put(ddb_table, issue) + + @classmethod + def set_status_temp_whitelisted(cls, ddb_table, issue): + """ + Put issue with closed status and updated closed timestamp + + :param ddb_table: boto3 DDB table resource + :param issue: Issue instance + + :return: nothing + """ + issue.timestamps.temp_whitelisted = datetime.now(timezone.utc).isoformat() + cls.put(ddb_table, issue) \ No newline at end of file diff --git a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py index 0ddbdf6e..48037537 100755 --- a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py +++ b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py @@ -56,7 +56,7 @@ def create_tickets_cloud_trail_logging(self): region = issue.issue_id # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"CloudTrail logging issue with '{region}' " f"is added to temporary whitelist. ") @@ -72,6 +72,7 @@ def create_tickets_cloud_trail_logging(self): f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", account_id=account_id ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} '{region}' CloudTrail logging issue") diff --git a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py index 9c5ab201..4bb8e848 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py @@ -43,7 +43,7 @@ def create_tickets_ebs_public_snapshots(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"EBS public snapshot '{snapshot_id}' is added to temporary whitelist items. ") comment = (f"EBS public snapshot '{snapshot_id}' " @@ -61,6 +61,7 @@ def create_tickets_ebs_public_snapshots(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} EBS public snapshot '{snapshot_id}' issue") diff --git a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py index f79d8de8..2bd83333 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py @@ -90,7 +90,7 @@ def create_tickets_ebsvolumes(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"EBS unencrypted volume '{volume_id}' is added to temporary whitelist items. ") comment = (f"EBS unencrypted volume '{volume_id}' " @@ -108,6 +108,7 @@ def create_tickets_ebsvolumes(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} EBS unencrypted volume '{volume_id}' issue") diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py index 5b82c493..73285d9e 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -43,8 +43,9 @@ def create_tickets_ecs_external_images(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: - logging.debug(f"ECS external image source '{task_definition_name}' is added to temporary whitelist items. ") + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + logging.debug(f"ECS external image source '{task_definition_name}' " + f"is added to temporary whitelist items. ") comment = (f"ECS external image source '{task_definition_name}' " f"in '{account_name} / {account_id}' account, {region} " @@ -61,6 +62,7 @@ def create_tickets_ecs_external_images(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} ECS external image source '{task_definition_name}' issue") diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index 6e8d1eaa..81f00656 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -43,7 +43,7 @@ def create_tickets_ecs_logging(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug( f"ECS logging issue '{task_definition_name}' is added to temporary whitelist items. ") @@ -62,6 +62,7 @@ def create_tickets_ecs_logging(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} ECS logging enabled '{task_definition_name}' issue") diff --git a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py index 6ba58847..55835e6d 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py @@ -43,9 +43,10 @@ def create_tickets_ecs_privileged(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug( - f"ECS privileged access issue '{task_definition_name}' is added to temporary whitelist items.") + f"ECS privileged access issue '{task_definition_name}' " + f"is added to temporary whitelist items.") comment = (f"ECS privileged access issue '{task_definition_name}' " f"in '{account_name} / {account_id}' account, {region} " @@ -62,6 +63,7 @@ def create_tickets_ecs_privileged(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} ECS privileged access disabled " f"'{task_definition_name}' issue") diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py index d88ba6bd..0ad9d20b 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py @@ -43,7 +43,7 @@ def create_tickets_elasticsearch_domain_logging(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug( f"Elasticsearch logging issue '{domain_name}' is added to temporary whitelist items.") @@ -62,6 +62,7 @@ def create_tickets_elasticsearch_domain_logging(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} Elasticsearch domain logging " f"'{domain_name}' issue") diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py index 38cf5e40..0e5acd5d 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -46,7 +46,7 @@ def create_tickets_elasticsearch_public_access(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug( f"Elasticsearch publicly accessible domain issue '{domain_name}' " f"is added to temporary whitelist items.") @@ -66,6 +66,7 @@ def create_tickets_elasticsearch_public_access(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} Elasticsearch publicly accessible domain '" f"{domain_name}' issue") diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index 22108689..0c7007d7 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -44,7 +44,7 @@ def create_tickets_elasticsearch_unencryption(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug( f"Elasticsearch unencrypted domain issue '{domain_name}' " f"is added to temporary whitelist items.") @@ -64,6 +64,7 @@ def create_tickets_elasticsearch_unencryption(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} Elasticsearch unencrypted domain " f"'{domain_name}' issue") diff --git a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py index b7b517e3..1ce53e0a 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py @@ -38,7 +38,7 @@ def create_jira_ticket(self): username = issue.issue_details.username # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug( f"IAM Inactive access key issue '{key_id} / {username}' is " f"added to temporary whitelist items.") @@ -55,6 +55,7 @@ def create_jira_ticket(self): f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", account_id=account_id ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} inactive access key '{key_id} / {username}' issue") diff --git a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py index 6d71d6f4..9d0e687a 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py @@ -38,7 +38,7 @@ def create_jira_ticket(self): username = issue.issue_details.username # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug( f"IAM stale access key issue '{key_id} / {username}' " f"is added to temporary whitelist items.") @@ -55,6 +55,7 @@ def create_jira_ticket(self): f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", account_id=account_id ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing stale access key {issue.status.value} '{key_id} / {username}' issue") diff --git a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py index d1bd39ba..b1f94c76 100644 --- a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py @@ -42,7 +42,7 @@ def create_tickets_public_ami(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"AMI '{ami_id}' is added to temporary whitelist items.") comment = (f"AMI '{ami_id}' public access issue " @@ -60,6 +60,7 @@ def create_tickets_public_ami(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} AMI '{ami_id}' public access issue") diff --git a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py index 605b9129..929f4d82 100755 --- a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py @@ -42,7 +42,7 @@ def create_tickets_rds_public_snapshots(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"RDS public snapshot '{snapshot_id}' is added to temporary whitelist items.") comment = (f"RDS public snapshot '{snapshot_id}' issue " @@ -60,6 +60,7 @@ def create_tickets_rds_public_snapshots(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} RDS public snapshot '{snapshot_id}' issue") diff --git a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py index 7ed189cf..d4d1a467 100644 --- a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py @@ -43,7 +43,7 @@ def create_tickets_rds_unencrypted_instances(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"RDS unencrypted instance '{instance_name}' " f"is added to temporary whitelist items.") @@ -62,6 +62,7 @@ def create_tickets_rds_unencrypted_instances(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} RDS unencrypted instance '{instance_name}' issue") diff --git a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py index c7712514..6e7eb7f5 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py @@ -42,7 +42,7 @@ def create_tickets_redshift_logging(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"Redshift cluster logging '{cluster_id}' is added to temporary whitelist items.") comment = (f"Redshift cluster logging '{cluster_id}' issue " @@ -60,6 +60,7 @@ def create_tickets_redshift_logging(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} Redshift logging '{cluster_id}' issue") diff --git a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py index 7b3c1c95..513f5c2c 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py @@ -42,7 +42,7 @@ def create_tickets_redshift_public_access(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"Redshift publicly accessible cluster issue '{cluster_id}' " f"is added to temporary whitelist items.") @@ -61,6 +61,7 @@ def create_tickets_redshift_public_access(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} Redshift publicly accessible " f"cluster '{cluster_id}' issue") diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py index c8918a60..81c9b012 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -42,7 +42,7 @@ def create_tickets_redshift_unencrypted_cluster(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"Redshift unencrypted cluster issue '{cluster_id}' " f"is added to temporary whitelist items.") @@ -61,6 +61,7 @@ def create_tickets_redshift_unencrypted_cluster(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} Redshift unencrypted cluster '{cluster_id}' issue") diff --git a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py index 309bb42d..4bf72275 100644 --- a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py @@ -41,7 +41,7 @@ def create_tickets_s3_unencrypted_buckets(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"S3 bucket unencrypted issue '{bucket_name}' " f"is added to temporary whitelist items.") @@ -59,6 +59,7 @@ def create_tickets_s3_unencrypted_buckets(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} S3 bucket '{bucket_name}' unencrypted issue") diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py index 441df7c1..4fd5a1e0 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py @@ -48,7 +48,7 @@ def create_tickets_s3buckets(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"S3 bucket public ACL issue '{bucket_name}' " f"is added to temporary whitelist items.") @@ -66,6 +66,7 @@ def create_tickets_s3buckets(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} S3 bucket '{bucket_name}' public ACL issue") diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py index fb59b0d8..d01925b4 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py @@ -45,7 +45,7 @@ def create_tickets_s3buckets(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"S3 bucket public policy issue '{bucket_name}' " f"is added to temporary whitelist items.") @@ -63,6 +63,7 @@ def create_tickets_s3buckets(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} S3 bucket '{bucket_name}' public policy issue") diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index c8d3d267..c03d2df2 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -293,7 +293,7 @@ def create_tickets_securitygroups(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"Insecure security group issue '{group_name} / {group_id}' " f"is added to temporary whitelist items.") @@ -312,6 +312,7 @@ def create_tickets_securitygroups(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} security group '{group_name} / {group_id}' issue") diff --git a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py index f311dc03..77d4f700 100644 --- a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py @@ -47,7 +47,7 @@ def create_tickets_sqs_policy(self): bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist]: + if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: logging.debug(f"SQS queue public policy issue '{queue_name}' " f"is added to temporary whitelist items.") @@ -66,6 +66,7 @@ def create_tickets_sqs_policy(self): account_id=account_id, bu=bu, product=product, ) + IssueOperations.set_status_temp_whitelisted(ddb_table, issue) elif issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} SQS queue '{queue_name}' public policy issue") From a6b5154ee45ea884dec718b272ee1b33da7742a1 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Tue, 24 Sep 2019 23:06:14 +0530 Subject: [PATCH 181/193] Updated with review comments. Updated with review comments. --- hammer/library/ddb_issues.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index 11f6363c..fb744954 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -20,7 +20,7 @@ class IssueStatus(Enum): Resolved = "resolved" # set by reporting after closing ticket Closed = "closed" - # set by identification - issue still exists but was added to tempwhitelist_list for future remediation + # set by identification - issue still exists but was added to temporary whitelist_list for future remediation Tempwhitelist = "tempwhitelist" From 513305e050b450cdd49afa798341f1e04d2cd698 Mon Sep 17 00:00:00 2001 From: "yevheniia.pasiechna@dowjones.com" Date: Wed, 25 Sep 2019 12:14:12 +0300 Subject: [PATCH 182/193] Config name changes to label names --- hammer/library/jiraoperations.py | 4 ++-- .../remediation/clean_elasticsearch_domain_logging.py | 2 +- .../remediation/clean_elasticsearch_policy_permissions.py | 2 +- .../remediation/clean_redshift_cluster_unencrypted.py | 2 +- .../remediation/clean_redshift_public_access.py | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hammer/library/jiraoperations.py b/hammer/library/jiraoperations.py index 7be5803a..c45aeac7 100755 --- a/hammer/library/jiraoperations.py +++ b/hammer/library/jiraoperations.py @@ -58,7 +58,7 @@ class JiraReporting(object): """ Base class for JIRA reporting """ def __init__(self, config, module=''): self.config = config - self.jira = JiraOperations(self.config, module=module) + self.jira = JiraOperations(self.config) self.jira_labels = JiraLabels(config, module) self.module_jira_labels = self.jira_labels.module_labels @@ -150,7 +150,7 @@ def add_label(self, ticket_id, label): class JiraOperations(object): """ Base class for interaction with JIRA """ - def __init__(self, config, module=''): + def __init__(self, config): # do not print excess warnings urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # JIRA configuration from config.json/DDB diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py index f1facfc8..00dc492e 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_domain_logging.py @@ -31,7 +31,7 @@ def clean_elasticsearch_domain_domain_logging_issues(self, batch=False): retention_period = self.config.esLogging.remediation_retention_period - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='esDomainLogging') slack = SlackNotification(self.config) for account_id, account_name in self.config.esLogging.remediation_accounts.items(): diff --git a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py index d92f4365..4c14f701 100644 --- a/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py +++ b/hammer/reporting-remediation/remediation/clean_elasticsearch_policy_permissions.py @@ -31,7 +31,7 @@ def clean_elasticsearch_domain_policy_permissions(self, batch=False): retention_period = self.config.esPublicAccess.remediation_retention_period - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='esPublicAccessDomain') slack = SlackNotification(self.config) for account_id, account_name in self.config.esPublicAccess.remediation_accounts.items(): diff --git a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py index 99df8cf6..6e5ac97b 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_cluster_unencrypted.py @@ -30,7 +30,7 @@ def cleanredshiftclusterunencryption(self, batch=False): retention_period = self.config.redshiftEncrypt.remediation_retention_period - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='redshiftUnencrypted') slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): diff --git a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py index a67f29be..496ebe28 100644 --- a/hammer/reporting-remediation/remediation/clean_redshift_public_access.py +++ b/hammer/reporting-remediation/remediation/clean_redshift_public_access.py @@ -30,7 +30,7 @@ def clean_redshift_public_access(self, batch=False): retention_period = self.config.redshift_public_access.remediation_retention_period - jira = JiraReporting(self.config) + jira = JiraReporting(self.config, module='redshiftPublicAccess') slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): From 29917a57602da57b9c099fd38bf62777ed0b843c Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Thu, 26 Sep 2019 19:40:04 +0530 Subject: [PATCH 183/193] Updated tempwhitelist issue changes. Updated tempwhitelist issue changes. --- ...mp_whitelist_issues_list.json => temp_whitelist_issues.json} | 0 hammer/library/config.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename deployment/configs/{temp_whitelist_issues_list.json => temp_whitelist_issues.json} (100%) diff --git a/deployment/configs/temp_whitelist_issues_list.json b/deployment/configs/temp_whitelist_issues.json similarity index 100% rename from deployment/configs/temp_whitelist_issues_list.json rename to deployment/configs/temp_whitelist_issues.json diff --git a/hammer/library/config.py b/hammer/library/config.py index c74c35d0..29c43c0e 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -22,7 +22,7 @@ def __init__(self, whitelistFile="whitelist.json", fixnowFile="fixnow.json", ticketOwnersFile="ticket_owners.json", - tempWhitelistFile="temp_whitelist_issues_list.json"): + tempWhitelistFile="temp_whitelist_issues.json"): """ :param configFile: local path to configuration file in json format :param configIniFile: local path to configuration file in ini format (is used in r&r EC2, build from EC2 userdata) From c53390b8edce125974845963d01c5d9264d441b5 Mon Sep 17 00:00:00 2001 From: "yevheniia.pasiechna@dowjones.com" Date: Mon, 30 Sep 2019 18:05:08 +0300 Subject: [PATCH 184/193] test changes --- hammer/identification/lambdas/requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/hammer/identification/lambdas/requirements.txt b/hammer/identification/lambdas/requirements.txt index 7001ed12..663bd1f6 100755 --- a/hammer/identification/lambdas/requirements.txt +++ b/hammer/identification/lambdas/requirements.txt @@ -1,2 +1 @@ -boto3==1.9.42 requests \ No newline at end of file From 0ef49a11855b3ba89838156d3af87f2053abf7a1 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 4 Oct 2019 15:36:47 +0530 Subject: [PATCH 185/193] Updated with boto3 version for Elasticsearch issue. Updated with boto3 version for Elasticsearch issue. And deleted from all lambdas. --- .../requirements.txt | 2 ++ hammer/identification/lambdas/requirements.txt | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/requirements.txt diff --git a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/requirements.txt b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/requirements.txt new file mode 100644 index 00000000..7001ed12 --- /dev/null +++ b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/requirements.txt @@ -0,0 +1,2 @@ +boto3==1.9.42 +requests \ No newline at end of file diff --git a/hammer/identification/lambdas/requirements.txt b/hammer/identification/lambdas/requirements.txt index 7001ed12..663bd1f6 100755 --- a/hammer/identification/lambdas/requirements.txt +++ b/hammer/identification/lambdas/requirements.txt @@ -1,2 +1 @@ -boto3==1.9.42 requests \ No newline at end of file From 706bcdcbf12ca11387b2e436ad445493a745467b Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 4 Oct 2019 17:28:59 +0530 Subject: [PATCH 186/193] Updated with AMI remediation issue. Updated with AMI remediation issue. --- hammer/library/aws/ec2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/hammer/library/aws/ec2.py b/hammer/library/aws/ec2.py index 76c3775d..8fd7882e 100755 --- a/hammer/library/aws/ec2.py +++ b/hammer/library/aws/ec2.py @@ -286,6 +286,7 @@ def __str__(self): def modify_image_attribute(self): EC2Operations.modify_image_attribute(self.account.client("ec2"), self.id) + return True class PublicAMIChecker(object): From aa57ed6ea24179461e1964e1b6fa13ecfadc6553 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 4 Oct 2019 18:17:29 +0530 Subject: [PATCH 187/193] Updated with PublicAMI remediation changes. Updated with PublicAMI remediation changes. --- hammer/library/aws/ec2.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hammer/library/aws/ec2.py b/hammer/library/aws/ec2.py index 8fd7882e..af02f2d5 100755 --- a/hammer/library/aws/ec2.py +++ b/hammer/library/aws/ec2.py @@ -285,8 +285,7 @@ def __str__(self): f")") def modify_image_attribute(self): - EC2Operations.modify_image_attribute(self.account.client("ec2"), self.id) - return True + return EC2Operations.modify_image_attribute(self.account.client("ec2"), self.id) class PublicAMIChecker(object): From 6b31dc778940a6a87594a2679f069a2089fe64b0 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla <30701892+vigneswararaomacharla@users.noreply.github.com> Date: Mon, 7 Oct 2019 19:49:06 +0530 Subject: [PATCH 188/193] Updated PublicAMI Lambda function memory changes. Updated PublicAMI Lambda function memory changes. --- deployment/cf-templates/identification.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 82caa983..054dd88b 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -1223,7 +1223,7 @@ ]}, "InitiateLambdaHandler": "initiate_to_desc_public_ami_issues.lambda_handler", "EvaluateLambdaHandler": "describe_public_ami_issues.lambda_handler", - "EvaluateLambdaMemorySize": 256, + "EvaluateLambdaMemorySize": 512, "LambdaLogsForwarderArn": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }, "EventRuleDescription": "Hammer ScheduledRule to initiate public AMI access evaluations", "EventRuleName": {"Fn::Join" : ["", [{ "Ref": "ResourcesPrefix" }, "InitiateEvaluationAMIPublicAccess"] ] }, @@ -1602,4 +1602,4 @@ "Outputs": { "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} } -} \ No newline at end of file +} From db0047c7e7d539a1c3438d95f04917016d336474 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 11 Oct 2019 19:57:32 +0530 Subject: [PATCH 189/193] Updated with review comments. Updated with review comments. --- .../reporting/create_cloudtrail_tickets.py | 5 ++++- .../create_ebs_public_snapshot_issue_tickets.py | 8 +++++--- ...create_ecs_external_image_source_issue_tickets.py | 4 +++- .../reporting/create_ecs_logging_issue_tickets.py | 4 +++- .../create_ecs_privileged_access_issue_tickets.py | 4 +++- ...ate_elasticsearch_domain_logging_issue_tickets.py | 7 +++++-- ...eate_elasticsearch_public_access_issue_tickets.py | 8 ++++++-- ...create_elasticsearch_unencrypted_issue_tickets.py | 4 +++- .../reporting/create_iam_key_inactive_tickets.py | 10 +++++++--- .../reporting/create_iam_key_rotation_tickets.py | 4 +++- .../reporting/create_public_ami_issue_tickets.py | 11 ++++++++--- .../create_rds_public_snapshot_issue_tickets.py | 11 ++++++++--- .../create_rds_unencrypted_instance_issue_tickets.py | 5 ++++- .../create_redshift_logging_issue_tickets.py | 4 +++- .../create_redshift_public_access_issue_tickets.py | 7 +++++-- ...ate_redshift_unencrypted_cluster_issue_tickets.py | 8 ++++++-- .../create_s3_unencrypted_bucket_issue_tickets.py | 10 +++++++--- .../reporting/create_s3bucket_acl_issue_tickets.py | 10 +++++++--- .../create_s3bucket_policy_issue_tickets.py | 10 +++++++--- .../reporting/create_security_groups_tickets.py | 8 ++++++-- .../reporting/create_sqs_policy_issue_tickets.py | 12 ++++++++---- 21 files changed, 111 insertions(+), 43 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py index 48037537..15a27f9f 100755 --- a/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py +++ b/hammer/reporting-remediation/reporting/create_cloudtrail_tickets.py @@ -54,9 +54,12 @@ def create_tickets_cloud_trail_logging(self): issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, CloudTrailIssue) for issue in issues: region = issue.issue_id + + in_temp_whitelist = self.config.cloudtrails.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) \ + and issue.timestamps.temp_whitelisted is None: logging.debug(f"CloudTrail logging issue with '{region}' " f"is added to temporary whitelist. ") diff --git a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py index 4bb8e848..bf459b13 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_public_snapshot_issue_tickets.py @@ -37,13 +37,14 @@ def create_tickets_ebs_public_snapshots(self): volume_id = issue.issue_details.volume_id region = issue.issue_details.region tags = issue.issue_details.tags + in_temp_whitelist = self.config.ebsSnapshot.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug(f"EBS public snapshot '{snapshot_id}' is added to temporary whitelist items. ") comment = (f"EBS public snapshot '{snapshot_id}' " @@ -120,8 +121,9 @@ def create_tickets_ebs_public_snapshots(self): f"*Volume ID*: {volume_id}\n" f"\n") - auto_remediation_date = (self.config.now + self.config.ebsSnapshot.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.ebsSnapshot.remediation and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): + auto_remediation_date = (self.config.now + self.config.ebsSnapshot.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) diff --git a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py index 73285d9e..ad3d31aa 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_external_image_source_issue_tickets.py @@ -37,13 +37,15 @@ def create_tickets_ecs_external_images(self): region = issue.issue_details.region tags = issue.issue_details.tags container_image_details = issue.issue_details.container_image_details + + in_temp_whitelist = self.config.ecs_external_image_source.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug(f"ECS external image source '{task_definition_name}' " f"is added to temporary whitelist items. ") diff --git a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py index 81f00656..fda2a33c 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_logging_issue_tickets.py @@ -37,13 +37,15 @@ def create_tickets_ecs_logging(self): disabled_logging_container_names = issue.issue_details.disabled_logging_container_names region = issue.issue_details.region tags = issue.issue_details.tags + + in_temp_whitelist = self.config.ecs_logging.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug( f"ECS logging issue '{task_definition_name}' is added to temporary whitelist items. ") diff --git a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py index 55835e6d..d363fcff 100644 --- a/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ecs_privileged_access_issue_tickets.py @@ -37,13 +37,15 @@ def create_tickets_ecs_privileged(self): privileged_container_names = issue.issue_details.privileged_container_names region = issue.issue_details.region tags = issue.issue_details.tags + + in_temp_whitelist = self.config.ecs_privileged_access.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug( f"ECS privileged access issue '{task_definition_name}' " f"is added to temporary whitelist items.") diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py index 0ad9d20b..c5c6a1cf 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_domain_logging_issue_tickets.py @@ -37,13 +37,15 @@ def create_tickets_elasticsearch_domain_logging(self): region = issue.issue_details.region tags = issue.issue_details.tags + in_temp_whitelist = self.config.esLogging.in_temp_whitelist(account_id, issue.issue_id) + # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug( f"Elasticsearch logging issue '{domain_name}' is added to temporary whitelist items.") @@ -108,7 +110,8 @@ def create_tickets_elasticsearch_domain_logging(self): issue_description += JiraOperations.build_tags_table(tags) - if self.config.esLogging.remediation: + if self.config.esLogging.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): auto_remediation_date = (self.config.now + self.config.esLogging.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py index 0e5acd5d..fb1c6990 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_public_access_issue_tickets.py @@ -40,13 +40,16 @@ def create_tickets_elasticsearch_public_access(self): region = issue.issue_details.region tags = issue.issue_details.tags policy = issue.issue_details.policy + + in_temp_whitelist = self.config.esPublicAccess.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) \ + and issue.timestamps.temp_whitelisted is None: logging.debug( f"Elasticsearch publicly accessible domain issue '{domain_name}' " f"is added to temporary whitelist items.") @@ -112,7 +115,8 @@ def create_tickets_elasticsearch_public_access(self): issue_description += JiraOperations.build_tags_table(tags) - if self.config.esPublicAccess.remediation: + if self.config.esPublicAccess.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): auto_remediation_date = (self.config.now + self.config.esPublicAccess.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}" \ f"{{color}}\n\n" diff --git a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py index 0c7007d7..b6c2b048 100644 --- a/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_elasticsearch_unencrypted_issue_tickets.py @@ -38,13 +38,15 @@ def create_tickets_elasticsearch_unencryption(self): tags = issue.issue_details.tags encrypted_at_rest = issue.issue_details.encrypted_at_rest encrypted_at_transit = issue.issue_details.encrypted_at_transit + + in_temp_whitelist = self.config.esEncrypt.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug( f"Elasticsearch unencrypted domain issue '{domain_name}' " f"is added to temporary whitelist items.") diff --git a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py index 1ce53e0a..16adc82b 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_inactive_tickets.py @@ -36,9 +36,11 @@ def create_jira_ticket(self): for issue in issues: key_id = issue.issue_id username = issue.issue_details.username + + in_temp_whitelist = self.config.iamUserInactiveKeys.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug( f"IAM Inactive access key issue '{key_id} / {username}' is " f"added to temporary whitelist items.") @@ -100,8 +102,10 @@ def create_jira_ticket(self): f"*Key last used*: {last_used}\n" f"\n") - auto_remediation_date = (self.config.now + self.config.iamUserInactiveKeys.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.iamUserInactiveKeys.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): + auto_remediation_date = (self.config.now + self.config.iamUserInactiveKeys.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += f"*Recommendation*: Deactivate specified inactive user access key. " diff --git a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py index 9d0e687a..52fd8ea5 100755 --- a/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py +++ b/hammer/reporting-remediation/reporting/create_iam_key_rotation_tickets.py @@ -36,9 +36,11 @@ def create_jira_ticket(self): for issue in issues: key_id = issue.issue_id username = issue.issue_details.username + + in_temp_whitelist = self.config.iamUserKeysRotation.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug( f"IAM stale access key issue '{key_id} / {username}' " f"is added to temporary whitelist items.") diff --git a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py index b1f94c76..e267ac9a 100644 --- a/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_public_ami_issue_tickets.py @@ -36,13 +36,16 @@ def create_tickets_public_ami(self): ami_id = issue.issue_id ami_region = issue.issue_details.region tags = issue.issue_details.tags + + in_temp_whitelist = self.config.publicAMIs.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.issue_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) \ + and issue.timestamps.temp_whitelisted is None: logging.debug(f"AMI '{ami_id}' is added to temporary whitelist items.") comment = (f"AMI '{ami_id}' public access issue " @@ -131,8 +134,10 @@ def create_tickets_public_ami(self): f"*AMI Id*: {ami_id}\n" f"\n") - auto_remediation_date = (self.config.now + self.config.publicAMIs.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.publicAMIs.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): + auto_remediation_date = (self.config.now + self.config.publicAMIs.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) diff --git a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py index 929f4d82..1f7f68e1 100755 --- a/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_public_snapshot_issue_tickets.py @@ -36,13 +36,16 @@ def create_tickets_rds_public_snapshots(self): snapshot_id = issue.issue_id region = issue.issue_details.region tags = issue.issue_details.tags + + in_temp_whitelist = self.config.rdsSnapshot.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) \ + and issue.timestamps.temp_whitelisted is None: logging.debug(f"RDS public snapshot '{snapshot_id}' is added to temporary whitelist items.") comment = (f"RDS public snapshot '{snapshot_id}' issue " @@ -117,8 +120,10 @@ def create_tickets_rds_public_snapshots(self): f"*Region*: {region}\n" f"*RDS Snapshot ID*: {snapshot_id}\n") - auto_remediation_date = (self.config.now + self.config.rdsSnapshot.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.rdsSnapshot.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): + auto_remediation_date = (self.config.now + self.config.rdsSnapshot.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) diff --git a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py index d4d1a467..cd41e7a8 100644 --- a/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_rds_unencrypted_instance_issue_tickets.py @@ -37,13 +37,16 @@ def create_tickets_rds_unencrypted_instances(self): instance_name = issue.issue_details.name region = issue.issue_details.region tags = issue.issue_details.tags + + in_temp_whitelist = self.config.rdsEncrypt.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist])\ + and issue.timestamps.temp_whitelisted is None: logging.debug(f"RDS unencrypted instance '{instance_name}' " f"is added to temporary whitelist items.") diff --git a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py index 6e7eb7f5..124f58e7 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_logging_issue_tickets.py @@ -36,13 +36,15 @@ def create_tickets_redshift_logging(self): cluster_id = issue.issue_id region = issue.issue_details.region tags = issue.issue_details.tags + + in_temp_whitelist = self.config.redshift_logging.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug(f"Redshift cluster logging '{cluster_id}' is added to temporary whitelist items.") comment = (f"Redshift cluster logging '{cluster_id}' issue " diff --git a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py index 513f5c2c..1e14262d 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_public_access_issue_tickets.py @@ -36,13 +36,15 @@ def create_tickets_redshift_public_access(self): cluster_id = issue.issue_id region = issue.issue_details.region tags = issue.issue_details.tags + + in_temp_whitelist = self.config.redshift_public_access.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug(f"Redshift publicly accessible cluster issue '{cluster_id}' " f"is added to temporary whitelist items.") @@ -108,7 +110,8 @@ def create_tickets_redshift_public_access(self): f"*Region*: {region}\n" f"*Redshift Cluster ID*: {cluster_id}\n") - if self.config.redshift_public_access.remediation: + if self.config.redshift_public_access.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): auto_remediation_date = (self.config.now + self.config.redshift_public_access.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" diff --git a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py index 81c9b012..5ff96a4e 100644 --- a/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_redshift_unencrypted_cluster_issue_tickets.py @@ -36,13 +36,16 @@ def create_tickets_redshift_unencrypted_cluster(self): cluster_id = issue.issue_id region = issue.issue_details.region tags = issue.issue_details.tags + + in_temp_whitelist = self.config.redshiftEncrypt.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist])\ + and issue.timestamps.temp_whitelisted is None: logging.debug(f"Redshift unencrypted cluster issue '{cluster_id}' " f"is added to temporary whitelist items.") @@ -108,7 +111,8 @@ def create_tickets_redshift_unencrypted_cluster(self): issue_description += JiraOperations.build_tags_table(tags) - if self.config.redshiftEncrypt.remediation: + if self.config.redshiftEncrypt.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): auto_remediation_date = (self.config.now + self.config.redshiftEncrypt.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" diff --git a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py index 4bf72275..c9e314d8 100644 --- a/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3_unencrypted_bucket_issue_tickets.py @@ -35,13 +35,15 @@ def create_tickets_s3_unencrypted_buckets(self): for issue in issues: bucket_name = issue.issue_id tags = issue.issue_details.tags + + in_temp_whitelist = self.config.s3Encrypt.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.issue_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug(f"S3 bucket unencrypted issue '{bucket_name}' " f"is added to temporary whitelist items.") @@ -132,8 +134,10 @@ def create_tickets_s3_unencrypted_buckets(self): f"*Bucket Owner*: {owner}\n" f"\n") - auto_remediation_date = (self.config.now + self.config.s3Encrypt.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.s3Encrypt.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): + auto_remediation_date = (self.config.now + self.config.s3Encrypt.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py index 4fd5a1e0..15afb083 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_acl_issue_tickets.py @@ -42,13 +42,15 @@ def create_tickets_s3buckets(self): for issue in issues: bucket_name = issue.issue_id tags = issue.issue_details.tags + + in_temp_whitelist = self.config.s3acl.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.issue_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug(f"S3 bucket public ACL issue '{bucket_name}' " f"is added to temporary whitelist items.") @@ -137,8 +139,10 @@ def create_tickets_s3buckets(self): f"*Bucket Owner*: {owner}\n" f"\n") - auto_remediation_date = (self.config.now + self.config.s3acl.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.s3acl.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): + auto_remediation_date = (self.config.now + self.config.s3acl.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) diff --git a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py index d01925b4..cc03c58c 100755 --- a/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_s3bucket_policy_issue_tickets.py @@ -39,13 +39,15 @@ def create_tickets_s3buckets(self): bucket_name = issue.issue_id tags = issue.issue_details.tags policy = issue.issue_details.policy + + in_temp_whitelist = self.config.s3policy.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.issue_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (issue.status in [IssueStatus.Tempwhitelist] or in_temp_whitelist) and issue.timestamps.temp_whitelisted is None: logging.debug(f"S3 bucket public policy issue '{bucket_name}' " f"is added to temporary whitelist items.") @@ -141,8 +143,10 @@ def create_tickets_s3buckets(self): f"*Bucket Owner*: {owner}\n" f"\n") - auto_remediation_date = (self.config.now + self.config.s3policy.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.s3policy.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): + auto_remediation_date = (self.config.now + self.config.s3policy.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) diff --git a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py index c03d2df2..29d54605 100755 --- a/hammer/reporting-remediation/reporting/create_security_groups_tickets.py +++ b/hammer/reporting-remediation/reporting/create_security_groups_tickets.py @@ -287,13 +287,16 @@ def create_tickets_securitygroups(self): group_region = issue.issue_details.region group_vpc_id = issue.issue_details.vpc_id tags = issue.issue_details.tags + + in_temp_whitelist = self.config.sg.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) \ + and issue.timestamps.temp_whitelisted is None: logging.debug(f"Insecure security group issue '{group_name} / {group_id}' " f"is added to temporary whitelist items.") @@ -537,7 +540,8 @@ def create_tickets_securitygroups(self): f"{threat}" f"{account_details}") - if status == RestrictionStatus.OpenCompletely: + if (status == RestrictionStatus.OpenCompletely) \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): auto_remediation_date = (self.config.now + self.config.sg.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" diff --git a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py index 77d4f700..3ba02043 100644 --- a/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_sqs_policy_issue_tickets.py @@ -41,13 +41,16 @@ def create_tickets_sqs_policy(self): queue_region = issue.issue_details.region tags = issue.issue_details.tags policy = issue.issue_details.policy + + in_temp_whitelist = self.config.sqspolicy.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.issue_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) \ + and issue.timestamps.temp_whitelisted is None: logging.debug(f"SQS queue public policy issue '{queue_name}' " f"is added to temporary whitelist items.") @@ -145,9 +148,10 @@ def create_tickets_sqs_policy(self): f"*SQS queue name*: {queue_name}\n" f"*SQS queue region*: {queue_region}\n" f"\n") - - auto_remediation_date = (self.config.now + self.config.sqspolicy.issue_retention_date).date() - issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" + if self.config.sqspolicy.remediation \ + and not (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]): + auto_remediation_date = (self.config.now + self.config.sqspolicy.issue_retention_date).date() + issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) From adf8fdd68a2e0c981020f4e4c068fe5f622e47b3 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Fri, 11 Oct 2019 20:03:46 +0530 Subject: [PATCH 190/193] Updated with review comments. Updated with review comments. --- .../reporting/create_ebs_volume_issue_tickets.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py index 2bd83333..3bad6185 100755 --- a/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py +++ b/hammer/reporting-remediation/reporting/create_ebs_volume_issue_tickets.py @@ -84,13 +84,15 @@ def create_tickets_ebsvolumes(self): volume_id = issue.issue_id region = issue.issue_details.region tags = issue.issue_details.tags + + in_temp_whitelist = self.config.ebsVolume.in_temp_whitelist(account_id, issue.issue_id) # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product - if issue.status in [IssueStatus.Tempwhitelist] and issue.timestamps.temp_whitelisted is None: + if (in_temp_whitelist or issue.status in [IssueStatus.Tempwhitelist]) and issue.timestamps.temp_whitelisted is None: logging.debug(f"EBS unencrypted volume '{volume_id}' is added to temporary whitelist items. ") comment = (f"EBS unencrypted volume '{volume_id}' " @@ -193,7 +195,8 @@ def create_tickets_ebsvolumes(self): issue_description += "*Recommendation*: Encrypt EBS volume. " if self.config.whitelisting_procedure_url: - issue_description += (f"For any other exceptions, please follow the [whitelisting procedure|{self.config.whitelisting_procedure_url}] " + issue_description += (f"For any other exceptions, please follow the " + f"[whitelisting procedure|{self.config.whitelisting_procedure_url}] " f"and provide a strong business reasoning. ") issue_summary = (f"EBS unencrypted volume '{volume_id}' " From c493a7ff14f693d7473fba1fbd3af10e81827812 Mon Sep 17 00:00:00 2001 From: vigneswararaomacharla Date: Wed, 23 Oct 2019 19:08:47 +0530 Subject: [PATCH 191/193] Updated with schedulers. Updated with schedulers. --- deployment/cf-templates/identification.json | 36 ++++++++++----------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index 054dd88b..eaddc546 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -729,7 +729,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "35 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": {"Ref": "SourceIdentificationSG"}, @@ -769,7 +769,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "15 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationCloudTrails" }, @@ -929,7 +929,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "0 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationIAMUserInactiveKeys" }, @@ -969,7 +969,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "20 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSVolumes" }, @@ -1009,7 +1009,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "25 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationEBSSnapshots" }, @@ -1089,7 +1089,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationSQSPublicPolicy" }, @@ -1129,7 +1129,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "10 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationS3Encryption" }, @@ -1169,7 +1169,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationRDSEncryption" }, @@ -1209,7 +1209,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "45 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationAMIPublicAccess" }, @@ -1249,7 +1249,7 @@ {"Ref": "ResourcesPrefix"}, {"Ref": "IdentificationIAMRole"} ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationRedshiftPublicAccess" }, @@ -1289,7 +1289,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "30 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationRedshiftClusterEncryption" }, @@ -1330,7 +1330,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "50 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationRedshiftLogging" }, @@ -1370,7 +1370,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "50 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationECSPrivilegedAccess" }, @@ -1410,7 +1410,7 @@ {"Ref": "ResourcesPrefix"}, {"Ref": "IdentificationIAMRole"} ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["",["cron(","40 ",{"Ref": "IdentificationCheckRateExpression"},")"]]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["",["cron(","50 ",{"Ref": "IdentificationCheckRateExpression"},")"]]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": {"Ref": "SourceIdentificationECSLogging"}, @@ -1450,7 +1450,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "50 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationECSExternalImageSource" }, @@ -1490,7 +1490,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "50 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationElasticSearchEncryption" }, @@ -1530,7 +1530,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "50 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationElasticSearchLogging" }, @@ -1570,7 +1570,7 @@ { "Ref": "ResourcesPrefix" }, { "Ref": "IdentificationIAMRole" } ] ]}, - "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "40 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, + "IdentificationCheckRateExpression": {"Fn::Join": ["", [ "cron(", "50 ", { "Ref": "IdentificationCheckRateExpression" }, ")" ] ]}, "LambdaSubnets": {"Ref": "LambdaSubnets"}, "LambdaSecurityGroups": {"Ref": "LambdaSecurityGroups"}, "IdentificationLambdaSource": { "Ref": "SourceIdentificationElasticSearchPublicAccess" }, From fa489eb8ed5b8d2bf9c79d303615aa09c942bac7 Mon Sep 17 00:00:00 2001 From: Yevheniia Pasiechna Date: Thu, 13 Feb 2020 13:49:16 +0200 Subject: [PATCH 192/193] Fixed scan API request with specified tags parameter --- hammer/library/ddb_issues.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index fb744954..a1cbd6ea 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -135,6 +135,8 @@ def from_dict(item, issue_class=None): def contains_tags(self, tags): if not tags: return True + if not self.issue_details.tags: + return False for k in tags: if k not in self.issue_details.tags: return False From a98371c1b20f614a28c3a65b14858928283f506d Mon Sep 17 00:00:00 2001 From: Alexey Chuprikov Date: Mon, 1 Jun 2020 13:17:20 +0300 Subject: [PATCH 193/193] Fix ius-release installation The URL of package has been changed. This fails cloud-init script. --- deployment/cf-templates/reporting-remediation.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/cf-templates/reporting-remediation.json b/deployment/cf-templates/reporting-remediation.json index d3bb9aed..268096d6 100755 --- a/deployment/cf-templates/reporting-remediation.json +++ b/deployment/cf-templates/reporting-remediation.json @@ -170,7 +170,7 @@ "yum -y install openssl-devel\n", "# remove for following install to not fail\n", "yum -y remove ius-release\n", - "yum -y install https://centos7.iuscommunity.org/ius-release.rpm --nogpgcheck\n", + "yum -y install https://repo.ius.io/ius-release-el7.rpm --nogpgcheck\n", "yum -y install python36u python36u-pip python36u-devel\n", "pip3.6 install awscli\n", "rm -rf /hammer-correlation-engine\n",