Update destroy role, fix bugs

This commit is contained in:
Arsenii Petrovich 2019-07-04 15:08:36 +03:00
parent f2d7456cd1
commit 973c02f7b1
12 changed files with 98 additions and 54 deletions

View File

@ -6,3 +6,4 @@ deprecation_warnings = False
host_key_checking = false host_key_checking = false
log_path = log.txt log_path = log.txt
hash_behaviour = merge hash_behaviour = merge
display_skipped_hosts = false

View File

@ -1,8 +1,7 @@
- name: Destroy infrastructure - name: Destroy infrastructure
hosts: all hosts: all
serial: 1
roles: roles:
- { role: destroy, when: "confirmation|bool == True" } - { role: destroy, when: "confirmation|bool == True and inventory_hostname == groups[group_names[0]][0]" }
vars_prompt: vars_prompt:
- name: "confirmation" - name: "confirmation"
prompt: "Are you sure you want to destroy all the infra?" prompt: "Are you sure you want to destroy all the infra?"

View File

@ -1,78 +1,115 @@
- name: Ansible delete file glob
find:
paths: /tmp/
file_type: directory
patterns: "files-{{ group_names[0] }}"
register: files_to_delete
- name: Ansible remove file glob
file:
path: "{{ item.path }}"
state: absent
with_items: "{{ files_to_delete.files }}"
- name: Copy files
copy:
src: "roles/main_infra/files/"
dest: "/tmp/files-{{ group_names[0] }}/"
- name: Local or remote backend selector (remote) - name: Local or remote backend selector (remote)
template: template:
src: roles/main_infra/templates/remote-backend-selector.tf.j2 src: roles/main_infra/templates/remote-backend-selector.tf.j2
dest: roles/main_infra/files/remote-backend-selector.tf dest: "/tmp/files-{{ group_names[0] }}/remote-backend-selector.tf"
when: when:
- backend|bool == true - backend|bool == true
- name: Local or remote backend selector (local) - name: Local or remote backend selector (local)
file: file:
state: absent state: absent
dest: roles/main_infra/files/remote-backend-selector.tf dest: "/tmp/files-{{ group_names[0] }}/"
when: when:
- not backend | default ('false') | bool - not backend | default ('false') | bool
- name: Generating variables file - name: Generating variables file
template: template:
src: roles/main_infra/templates/terraform.tfvars.j2 src: roles/main_infra/templates/terraform.tfvars.j2
dest: roles/main_infra/files/terraform.tfvars dest: "/tmp/files-{{ group_names[0] }}/terraform.tfvars"
- name: Generating backend file - name: Generating backend file
template: template:
src: roles/main_infra/templates/backend.tfvars.j2 src: roles/main_infra/templates/backend.tfvars.j2
dest: roles/main_infra/files/backend.tfvars dest: "/tmp/files-{{ group_names[0] }}/backend.tfvars"
when: backend | bool when: backend | bool
- name: Generate Terraform files # This is due to the TF0.11-12 bug which do not allow to completely destroy resources if interpolation syntax is used in outputs.tf at edge cases
template:
src: "{{ item.key }}"
dest: "{{ item.value }}"
with_dict: { roles/main_infra/templates/hosts.tf.j2: roles/main_infra/files/hosts.tf, roles/main_infra/templates/routing.tf.j2: roles/main_infra/files/routing.tf, roles/main_infra/templates/provider.tf.j2: roles/main_infra/files/provider.tf }
# This is due to the TF0.11 bug which do not allow to completely destroy resources if interpolation syntax is used in outputs.tf at edge cases
- name: Check if outputs.tf exists - name: Check if outputs.tf exists
stat: path=roles/main_infra/files/outputs.tf stat:
path: "/tmp/files-{{ group_names[0] }}/outputs.tf"
register: outputs_stat register: outputs_stat
- name: Temporarily remove outputs.tf file - name: Temporarily remove outputs.tf file
command: mv roles/main_infra/files/outputs.tf roles/main_infra/files/outputs.tf.backup command: "mv /tmp/files-{{ group_names[0] }}/outputs.tf /tmp/files-{{ group_names[0] }}/outputs.tf.backup"
when: outputs_stat.stat.exists when: outputs_stat.stat.exists
- name: Check if .terraform folder exists - name: Check if .terraform folder exists
stat: stat:
path: "roles/main_infra/files/.terraform/" path: "/tmp/files-{{ group_names[0] }}/.terraform/"
register: stat_result register: stat_result
- name: Remove .terraform folder - name: Remove .terraform folder
file: file:
path: roles/main_infra/files/.terraform/ path: "/tmp/files-{{ group_names[0] }}/.terraform/"
state: absent state: absent
when: stat_result.stat.exists when: stat_result.stat.exists
- name: Terraform destroy main infra - name: Terraform plan to destroy main infra
shell: "echo yes | {{ terraform_location }} {{ item }}" shell: "echo yes | {{ terraform_location }} {{ item }}"
args: args:
chdir: "roles/main_infra/files" chdir: "/tmp/files-{{ group_names[0] }}/"
with_items: with_items:
- "init {{ '-backend-config=backend.tfvars' if backend|bool == true else '' }}" - "init {{ '-backend-config=backend.tfvars' if backend|bool else '' }}"
- destroy - plan -destroy -out terraform.tfplan
- show -no-color terraform.tfplan
register: tf_plan
- name: Terraform show destroy plan
debug:
var: tf_plan.results[2].stdout_lines
- name: User prompt
pause:
prompt: "Are you absolutely sure you want to execute the destruction plan shown above? [False]"
register: user_answer
until: user_answer.user_input | lower in conditional
retries: 10000
delay: 1
vars:
conditional: ['yes','no','true','false']
when: inventory_hostname == groups['all'][0]
- name: Terraform destroy
shell: "{{ terraform_location }} destroy -auto-approve"
args:
chdir: "/tmp/files-{{ group_names[0] }}"
when: hostvars[groups['all'][0]].user_answer.user_input | bool
- name: Delete vars from parameter store - name: Delete vars from parameter store
include: parameter_store.yml include: parameter_store.yml
- name: Check if outputs.tf.backup exists - name: Check if outputs.tf.backup exists
stat: path=roles/main_infra/files/outputs.tf.backup stat:
path: "/tmp/files-{{ group_names[0] }}/outputs.tf.backup"
register: outputs_backup_stat register: outputs_backup_stat
- name: Get back outputs.tf file - name: Get back outputs.tf file
command: mv roles/main_infra/files/outputs.tf.backup roles/main_infra/files/outputs.tf command: "mv /tmp/files-{{ group_names[0] }}/outputs.tf.backup /tmp/files-{{ group_names[0] }}/outputs.tf"
when: outputs_backup_stat.stat.exists when: outputs_backup_stat.stat.exists
- name: User prompt - name: User prompt
pause: pause:
prompt: "Do you want to delete S3 bucket with state file and DynamoDB attached to it also? [Yes/No] Default: No" prompt: "Do you want to delete S3 bucket with state file and DynamoDB attached to it also? [Yes/No] Default: No"
register: user_answer register: user_answer
until: user_answer.user_input | lower in conditional until: user_answer.user_input | lower in conditional
retries: 10000 retries: 10000
delay: 1 delay: 1
vars: vars:

View File

@ -17,7 +17,7 @@ resource "aws_codedeploy_deployment_group" "explorer" {
app_name = aws_codedeploy_app.explorer.name app_name = aws_codedeploy_app.explorer.name
deployment_group_name = "${var.prefix}-explorer-dg${count.index}" deployment_group_name = "${var.prefix}-explorer-dg${count.index}"
service_role_arn = aws_iam_role.deployer.arn service_role_arn = aws_iam_role.deployer.arn
autoscaling_groups = ["${aws_launch_configuration.explorer.name}-asg-${element(var.chains, count.index)}"] autoscaling_groups = [aws_autoscaling_group.explorer[count.index].name]
deployment_style { deployment_style {
deployment_option = "WITH_TRAFFIC_CONTROL" deployment_option = "WITH_TRAFFIC_CONTROL"

View File

@ -1,7 +1,9 @@
# Internal DNS Zone # Internal DNS Zone
resource "aws_route53_zone" "main" { resource "aws_route53_zone" "main" {
name = "${var.prefix}.${var.dns_zone_name}" name = "${var.prefix}.${var.dns_zone_name}"
vpc_id = aws_vpc.vpc.id vpc {
vpc_id = aws_vpc.vpc.id
}
tags = { tags = {
prefix = var.prefix prefix = var.prefix

View File

@ -1,5 +1,6 @@
data "aws_ami" "explorer" { data "aws_ami" "explorer" {
most_recent = true most_recent = true
owners = ["amazon"]
filter { filter {
name = "name" name = "name"
@ -10,11 +11,6 @@ data "aws_ami" "explorer" {
name = "virtualization-type" name = "virtualization-type"
values = ["hvm"] values = ["hvm"]
} }
filter {
name = "owner-alias"
values = ["amazon"]
}
} }
resource "aws_launch_configuration" "explorer" { resource "aws_launch_configuration" "explorer" {
@ -40,13 +36,13 @@ resource "aws_launch_configuration" "explorer" {
} }
resource "aws_placement_group" "explorer" { resource "aws_placement_group" "explorer" {
count = "var.use_placement_group[var.chains[count.index]] ? 1 : 0" count = length(matchkeys(keys(var.use_placement_group),values(var.use_placement_group),["True"]))
name = "${var.prefix}-${var.chains[count.index]}-explorer-pg" name = "${var.prefix}-${var.chains[count.index]}-explorer-pg"
strategy = "cluster" strategy = "cluster"
} }
resource "aws_autoscaling_group" "explorer" { resource "aws_autoscaling_group" "explorer" {
count = "length(var.chains)" count = length(var.chains)
name = "${var.prefix}-${var.chains[count.index]}-asg" name = "${var.prefix}-${var.chains[count.index]}-asg"
max_size = "4" max_size = "4"
min_size = "1" min_size = "1"
@ -55,7 +51,7 @@ resource "aws_autoscaling_group" "explorer" {
vpc_zone_identifier = [aws_subnet.default.id] vpc_zone_identifier = [aws_subnet.default.id]
availability_zones = data.aws_availability_zones.available.names availability_zones = data.aws_availability_zones.available.names
target_group_arns = [aws_lb_target_group.explorer[0].arn] target_group_arns = [aws_lb_target_group.explorer[0].arn]
placement_group = "${var.prefix}-${var.chains[count.index]}-explorer-pg : null" placement_group = var.use_placement_group[var.chains[count.index]] == "True" ? "${var.prefix}-${var.chains[count.index]}-explorer-pg" : null
# Health checks are performed by CodeDeploy hooks # Health checks are performed by CodeDeploy hooks
health_check_type = "EC2" health_check_type = "EC2"
@ -102,7 +98,7 @@ resource "aws_autoscaling_group" "explorer" {
# TODO: These autoscaling policies are not currently wired up to any triggers # TODO: These autoscaling policies are not currently wired up to any triggers
resource "aws_autoscaling_policy" "explorer-up" { resource "aws_autoscaling_policy" "explorer-up" {
count = "length(var.chains)" count = length(var.chains)
name = "${var.prefix}-${var.chains[count.index]}-explorer-autoscaling-policy-up" name = "${var.prefix}-${var.chains[count.index]}-explorer-autoscaling-policy-up"
autoscaling_group_name = aws_autoscaling_group.explorer[count.index].name autoscaling_group_name = aws_autoscaling_group.explorer[count.index].name
adjustment_type = "ChangeInCapacity" adjustment_type = "ChangeInCapacity"
@ -111,6 +107,7 @@ resource "aws_autoscaling_policy" "explorer-up" {
} }
resource "aws_autoscaling_policy" "explorer-down" { resource "aws_autoscaling_policy" "explorer-down" {
count = length(var.chains)
name = "${var.prefix}-${var.chains[count.index]}-explorer-autoscaling-policy-down" name = "${var.prefix}-${var.chains[count.index]}-explorer-autoscaling-policy-down"
autoscaling_group_name = aws_autoscaling_group.explorer[count.index].name autoscaling_group_name = aws_autoscaling_group.explorer[count.index].name
adjustment_type = "ChangeInCapacity" adjustment_type = "ChangeInCapacity"

View File

@ -59,7 +59,7 @@ resource "aws_lb_target_group" "explorer" {
} }
resource "aws_alb_listener" "alb_listener" { resource "aws_alb_listener" "alb_listener" {
count = "length(var.chains)" count = length(var.chains)
load_balancer_arn = aws_lb.explorer[count.index].arn load_balancer_arn = aws_lb.explorer[count.index].arn
port = var.use_ssl[element(var.chains, count.index)] ? "443" : "80" port = var.use_ssl[element(var.chains, count.index)] ? "443" : "80"
protocol = var.use_ssl[element(var.chains, count.index)] ? "HTTPS" : "HTTP" protocol = var.use_ssl[element(var.chains, count.index)] ? "HTTPS" : "HTTP"

View File

@ -16,7 +16,7 @@ resource "aws_subnet" "default" {
## ALB subnet ## ALB subnet
resource "aws_subnet" "alb" { resource "aws_subnet" "alb" {
vpc_id = aws_vpc.vpc.id vpc_id = aws_vpc.vpc.id
cidr_block = var.public_subnet_cidr #cidr_block = var.public_subnet_cidr
cidr_block = cidrsubnet(var.db_subnet_cidr, 5, 1) cidr_block = cidrsubnet(var.db_subnet_cidr, 5, 1)
availability_zone = data.aws_availability_zones.available.names[1] availability_zone = data.aws_availability_zones.available.names[1]
map_public_ip_on_launch = true map_public_ip_on_launch = true

View File

@ -1,9 +1,17 @@
variable "aws_profile" { variable "aws_profile" {
default = "null" default = null
} }
variable "aws_region" { variable "aws_region" {
default = "null" default = null
}
variable "aws_access_key" {
default = null
}
variable "aws_secret_key" {
default = null
} }
variable "prefix" { variable "prefix" {

View File

@ -58,9 +58,9 @@
args: args:
chdir: "/tmp/files-{{ group_names[0] }}" chdir: "/tmp/files-{{ group_names[0] }}"
with_items: with_items:
- "init{{ ' -backend-config=backend.tfvars' if backend|bool == true else '' }}" - "init{{ ' -backend-config=backend.tfvars' if backend|bool else '' }}"
- plan -out terraform.tfplan - plan -out terraform.tfplan
- show terraform.tfplan -json -no-color - show -no-color terraform.tfplan
- name: Show Terraform plan - name: Show Terraform plan
debug: debug:
@ -86,13 +86,10 @@
args: args:
chdir: "/tmp/files-{{ group_names[0] }}" chdir: "/tmp/files-{{ group_names[0] }}"
when: hostvars[groups['all'][0]].user_answer.user_input | bool when: hostvars[groups['all'][0]].user_answer.user_input | bool
ignore_errors: True retries: 1
delay: 3
- name: Ensure Terraform resources has been provisioned register: result
shell: "echo yes | {{ terraform_location }} apply" until: result.rc == 0
args:
chdir: "/tmp/files-{{ group_names[0] }}"
when: hostvars[groups['all'][0]].user_answer.user_input | bool
- name: Terraform output info into variable - name: Terraform output info into variable
shell: "{{ terraform_location }} output -json" shell: "{{ terraform_location }} output -json"
@ -103,7 +100,7 @@
- name: Output info from Terraform - name: Output info from Terraform
debug: debug:
var: output.stdout_lines var: (output.stdout|from_json).instructions.value
when: hostvars[groups['all'][0]].user_answer.user_input | bool when: hostvars[groups['all'][0]].user_answer.user_input | bool
- name: Ansible delete file glob - name: Ansible delete file glob

View File

@ -1,6 +1,6 @@
terraform { terraform {
backend "s3" { backend "s3" {
{% if aws_access_key is undefined %} {% if aws_access_key is undefined or aws_access_key == '' %}
profile = "{{ aws_profile|default("default") }}" profile = "{{ aws_profile|default("default") }}"
{% else %} {% else %}
access_key = "{{ aws_access_key }}" access_key = "{{ aws_access_key }}"

View File

@ -1,7 +1,10 @@
aws_profile = "{{ aws_profile|default("default") if aws_access_key is not defined or aws_access_key='' else 'null' }}" {% if aws_access_key is undefined or aws_access_key == '' %}
aws_access_key = "{{ aws_access_key | default("null") }}" aws_profile = "{{ aws_profile|default('default') }}"
aws_secret_key = "{{ aws_secret_key | default("null") }}" {% else %}
aws_region = "{{ aws_region | default("us-east-1") }}" aws_access_key = "{{ aws_access_key | default('null') }}"
aws_secret_key = "{{ aws_secret_key | default('null') }}"
{% endif %}
aws_region = "{{ aws_region | default('us-east-1') }}"
prefix = "{{ group_names[0] }}" prefix = "{{ group_names[0] }}"
key_name = "{{ ec2_ssh_key_name }}" key_name = "{{ ec2_ssh_key_name }}"