Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Upgrade Python code to Python 3.12 and refactor the whole code and make improvements #8

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
75 changes: 49 additions & 26 deletions backup_function.tf
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,18 @@ EOF

resource "aws_iam_role_policy" "ebs_backup_policy" {
name = "ebs_backup_policy"
role = "${aws_iam_role.ebs_backup_role.id}"
role = aws_iam_role.ebs_backup_role.id

policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["logs:*"],
"Action": [
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
Expand All @@ -52,76 +55,96 @@ resource "aws_iam_role_policy" "ebs_backup_policy" {
EOF
}

resource "aws_cloudwatch_log_group" "schedule_ebs_snapshot_backups" {
name = "/aws/lambda/schedule_ebs_snapshot_backups"
retention_in_days = 14
}

data "archive_file" "schedule_ebs_snapshot_backups_zip" {
type = "zip"
source_file = "${path.module}/schedule-ebs-snapshot-backups.py"
output_path = "${path.module}/schedule-ebs-snapshot-backups.zip"
source_file = "${path.module}/schedule_ebs_snapshot_backups.py"
output_path = "${path.module}/schedule_ebs_snapshot_backups.zip"
}

resource "aws_lambda_function" "schedule_ebs_snapshot_backups" {
filename = "${path.module}/schedule-ebs-snapshot-backups.zip"
filename = "${path.module}/schedule_ebs_snapshot_backups.zip"
function_name = "schedule_ebs_snapshot_backups"
description = "Automatically backs up instances tagged with backup: true"
role = "${aws_iam_role.ebs_backup_role.arn}"
role = aws_iam_role.ebs_backup_role.arn
timeout = 60
handler = "schedule-ebs-snapshot-backups.lambda_handler"
runtime = "python2.7"
source_code_hash = "${data.archive_file.schedule_ebs_snapshot_backups_zip.output_base64sha256}"
handler = "schedule_ebs_snapshot_backups.lambda_handler"
runtime = "python3.12"
source_code_hash = data.archive_file.schedule_ebs_snapshot_backups_zip.output_base64sha256

logging_config {
log_format = "Text"
log_group = aws_cloudwatch_log_group.schedule_ebs_snapshot_backups.name
}
}

resource "aws_cloudwatch_log_group" "ebs_snapshot_janitor" {
name = "/aws/lambda/ebs_snapshot_janitor"
retention_in_days = 14
}

data "archive_file" "ebs_snapshot_janitor_zip" {
type = "zip"
source_file = "${path.module}/ebs-snapshot-janitor.py"
output_path = "${path.module}/ebs-snapshot-janitor.zip"
source_file = "${path.module}/ebs_snapshot_janitor.py"
output_path = "${path.module}/ebs_snapshot_janitor.zip"
}

resource "aws_lambda_function" "ebs_snapshot_janitor" {
filename = "${path.module}/ebs-snapshot-janitor.zip"
filename = "${path.module}/ebs_snapshot_janitor.zip"
function_name = "ebs_snapshot_janitor"
description = "Cleans up old EBS backups"
role = "${aws_iam_role.ebs_backup_role.arn}"
role = aws_iam_role.ebs_backup_role.arn
timeout = 60
handler = "ebs-snapshot-janitor.lambda_handler"
runtime = "python2.7"
source_code_hash = "${data.archive_file.ebs_snapshot_janitor_zip.output_base64sha256}"
handler = "ebs_snapshot_janitor.lambda_handler"
runtime = "python3.12"
source_code_hash = data.archive_file.ebs_snapshot_janitor_zip.output_base64sha256

logging_config {
log_format = "Text"
log_group = aws_cloudwatch_log_group.ebs_snapshot_janitor.name
}
}

resource "aws_cloudwatch_event_rule" "schedule_ebs_snapshot_backups" {
name = "schedule_ebs_snapshot_backups"
description = "Schedule for ebs snapshot backups"
schedule_expression = "${var.ebs_snapshot_backups_schedule}"
schedule_expression = var.ebs_snapshot_backups_schedule
}

resource "aws_cloudwatch_event_rule" "schedule_ebs_snapshot_janitor" {
name = "schedule_ebs_snapshot_janitor"
description = "Schedule for ebs snapshot janitor"
schedule_expression = "${var.ebs_snapshot_janitor_schedule}"
schedule_expression = var.ebs_snapshot_janitor_schedule
}

resource "aws_cloudwatch_event_target" "schedule_ebs_snapshot_backups" {
rule = "${aws_cloudwatch_event_rule.schedule_ebs_snapshot_backups.name}"
rule = aws_cloudwatch_event_rule.schedule_ebs_snapshot_backups.name
target_id = "schedule_ebs_snapshot_backups"
arn = "${aws_lambda_function.schedule_ebs_snapshot_backups.arn}"
arn = aws_lambda_function.schedule_ebs_snapshot_backups.arn
}

resource "aws_cloudwatch_event_target" "schedule_ebs_snapshot_janitor" {
rule = "${aws_cloudwatch_event_rule.schedule_ebs_snapshot_janitor.name}"
rule = aws_cloudwatch_event_rule.schedule_ebs_snapshot_janitor.name
target_id = "ebs_snapshot_janitor"
arn = "${aws_lambda_function.ebs_snapshot_janitor.arn}"
arn = aws_lambda_function.ebs_snapshot_janitor.arn
}

resource "aws_lambda_permission" "allow_cloudwatch_to_call_backup" {
statement_id = "AllowExecutionFromCloudWatch_schedule_ebs_snapshot_backups"
action = "lambda:InvokeFunction"
function_name = "${aws_lambda_function.schedule_ebs_snapshot_backups.function_name}"
function_name = aws_lambda_function.schedule_ebs_snapshot_backups.function_name
principal = "events.amazonaws.com"
source_arn = "${aws_cloudwatch_event_rule.schedule_ebs_snapshot_backups.arn}"
source_arn = aws_cloudwatch_event_rule.schedule_ebs_snapshot_backups.arn
}

resource "aws_lambda_permission" "allow_cloudwatch_to_call_janitor" {
statement_id = "AllowExecutionFromCloudWatch_ebs_snapshot_janitor"
action = "lambda:InvokeFunction"
function_name = "${aws_lambda_function.ebs_snapshot_janitor.function_name}"
function_name = aws_lambda_function.ebs_snapshot_janitor.function_name
principal = "events.amazonaws.com"
source_arn = "${aws_cloudwatch_event_rule.schedule_ebs_snapshot_janitor.arn}"
source_arn = aws_cloudwatch_event_rule.schedule_ebs_snapshot_janitor.arn
}
38 changes: 19 additions & 19 deletions ebs-snapshot-janitor.py → ebs_snapshot_janitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
# pylint: disable=missing-module-docstring
import re
import datetime
import boto3

ec = boto3.client('ec2')
iam = boto3.client('iam')
def lambda_handler(event, context): # pylint: disable=unused-argument
"""
This function looks at all the snapshots of AWS EBS volumes that have a
"DeleteOn" tag containing the current day formatted as YYYY-MM-DD.
This function should be run at least daily.
"""
account_ids = []

"""
This function looks at *all* snapshots that have a "DeleteOn" tag containing
the current day formatted as YYYY-MM-DD. This function should be run at least
daily.
"""
ec2_client = boto3.client('ec2')
iam_client = boto3.client('iam')

def lambda_handler(event, context):
account_ids = list()
try:
"""
You can replace this try/except by filling in `account_ids` yourself.
Expand All @@ -34,20 +35,19 @@ def lambda_handler(event, context):
> iam = boto3.client('iam')
> print iam.get_user()['User']['Arn'].split(':')[4]
"""
iam.get_user()
except Exception as e:
iam_client.get_user()
except Exception as exception: # pylint: disable=broad-except
# use the exception message to get the account ID the function executes under
account_ids.append(re.search(r'(arn:aws:sts::)([0-9]+)', str(e)).groups()[1])

account_ids.append(re.search(r'(arn:aws:sts::)([0-9]+)', str(exception)).groups()[1])

delete_on = datetime.date.today().strftime('%Y-%m-%d')
filters = [
{'Name': 'tag-key', 'Values': ['DeleteOn']},
{'Name': 'tag-value', 'Values': [delete_on]},
]
snapshot_response = ec.describe_snapshots(OwnerIds=account_ids, Filters=filters)

snapshot_response = ec2_client.describe_snapshots(OwnerIds=account_ids, Filters=filters)

for snap in snapshot_response['Snapshots']:
print "Deleting snapshot %s" % snap['SnapshotId']
ec.delete_snapshot(SnapshotId=snap['SnapshotId'])
for snapshot in snapshot_response['Snapshots']:
snapshot_id = snapshot['SnapshotId']
print(f"Deleting snapshot {snapshot_id}")
ec2_client.delete_snapshot(SnapshotId=snapshot_id)
Original file line number Diff line number Diff line change
Expand Up @@ -11,28 +11,32 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
# pylint: disable=missing-module-docstring
import collections
import datetime
import boto3

ec = boto3.client('ec2')
def lambda_handler(event, context): # pylint: disable=unused-argument
"""
This function creates snapshots of AWS EBS volumes attached to AWS EC2 instances
that have a "Backup" tag containing either 'true', 'yes', or '1'.
This function should be run at least daily.
"""
ec2_client = boto3.client('ec2')

def lambda_handler(event, context):
reservations = ec.describe_instances(
reservations = ec2_client.describe_instances(
Filters=[
{'Name': 'tag:Backup', 'Values': ['true', 'yes', '1']},
]
).get(
'Reservations', []
)

instances = sum(
[
[i for i in r['Instances']]
for r in reservations
], [])
instances = [
instance for reservation in reservations for instance in reservation['Instances']
]

print "Found %d instances that need backing up" % len(instances)
print(f"Found {str(len(instances))} instances that need backing up")

to_tag = collections.defaultdict(list)

Expand All @@ -48,28 +52,24 @@ def lambda_handler(event, context):
if dev.get('Ebs', None) is None:
continue
vol_id = dev['Ebs']['VolumeId']
print "Found EBS volume %s on instance %s" % (
vol_id, instance['InstanceId'])
print(f"Found EBS volume {vol_id} on instance {instance['InstanceId']}")

snap = ec.create_snapshot(
snap = ec2_client.create_snapshot(
VolumeId=vol_id,
)

to_tag[retention_days].append(snap['SnapshotId'])

print "Retaining snapshot %s of volume %s from instance %s for %d days" % (
snap['SnapshotId'],
vol_id,
instance['InstanceId'],
retention_days,
print(
f"Retaining snapshot {snap['SnapshotId']} of volume {vol_id} "
f"from instance {instance['InstanceId']} for {str(retention_days)} days"
)


for retention_days in to_tag.keys():
for retention_days in to_tag.keys(): # pylint: disable=consider-using-dict-items
delete_date = datetime.date.today() + datetime.timedelta(days=retention_days)
delete_fmt = delete_date.strftime('%Y-%m-%d')
print "Will delete %d snapshots on %s" % (len(to_tag[retention_days]), delete_fmt)
ec.create_tags(
print(f"Will delete {str(len(to_tag[retention_days]))} snapshots on {delete_fmt}")
ec2_client.create_tags(
Resources=to_tag[retention_days],
Tags=[
{'Key': 'DeleteOn', 'Value': delete_fmt},
Expand Down