Update destroy role, fix bugs

This commit is contained in:
Arsenii Petrovich 2019-07-04 15:08:36 +03:00
parent f2d7456cd1
commit 973c02f7b1
12 changed files with 98 additions and 54 deletions

View File

@ -6,3 +6,4 @@ deprecation_warnings = False
host_key_checking = false
log_path = log.txt
hash_behaviour = merge
display_skipped_hosts = false

View File

@ -1,8 +1,7 @@
- name: Destroy infrastructure
hosts: all
serial: 1
roles:
- { role: destroy, when: "confirmation|bool == True" }
- { role: destroy, when: "confirmation|bool == True and inventory_hostname == groups[group_names[0]][0]" }
vars_prompt:
- name: "confirmation"
prompt: "Are you sure you want to destroy all the infra?"

View File

@ -1,78 +1,115 @@
- name: Ansible delete file glob
find:
paths: /tmp/
file_type: directory
patterns: "files-{{ group_names[0] }}"
register: files_to_delete
- name: Ansible remove file glob
file:
path: "{{ item.path }}"
state: absent
with_items: "{{ files_to_delete.files }}"
- name: Copy files
copy:
src: "roles/main_infra/files/"
dest: "/tmp/files-{{ group_names[0] }}/"
- name: Local or remote backend selector (remote)
template:
src: roles/main_infra/templates/remote-backend-selector.tf.j2
dest: roles/main_infra/files/remote-backend-selector.tf
dest: "/tmp/files-{{ group_names[0] }}/remote-backend-selector.tf"
when:
- backend|bool == true
- name: Local or remote backend selector (local)
file:
state: absent
dest: roles/main_infra/files/remote-backend-selector.tf
dest: "/tmp/files-{{ group_names[0] }}/"
when:
- not backend | default ('false') | bool
- name: Generating variables file
template:
src: roles/main_infra/templates/terraform.tfvars.j2
dest: roles/main_infra/files/terraform.tfvars
dest: "/tmp/files-{{ group_names[0] }}/terraform.tfvars"
- name: Generating backend file
template:
src: roles/main_infra/templates/backend.tfvars.j2
dest: roles/main_infra/files/backend.tfvars
dest: "/tmp/files-{{ group_names[0] }}/backend.tfvars"
when: backend | bool
- name: Generate Terraform files
template:
src: "{{ item.key }}"
dest: "{{ item.value }}"
with_dict: { roles/main_infra/templates/hosts.tf.j2: roles/main_infra/files/hosts.tf, roles/main_infra/templates/routing.tf.j2: roles/main_infra/files/routing.tf, roles/main_infra/templates/provider.tf.j2: roles/main_infra/files/provider.tf }
# This is due to the TF0.11 bug which do not allow to completely destroy resources if interpolation syntax is used in outputs.tf at edge cases
# This is due to the TF0.11-12 bug which do not allow to completely destroy resources if interpolation syntax is used in outputs.tf at edge cases
- name: Check if outputs.tf exists
stat: path=roles/main_infra/files/outputs.tf
stat:
path: "/tmp/files-{{ group_names[0] }}/outputs.tf"
register: outputs_stat
- name: Temporarily remove outputs.tf file
command: mv roles/main_infra/files/outputs.tf roles/main_infra/files/outputs.tf.backup
command: "mv /tmp/files-{{ group_names[0] }}/outputs.tf /tmp/files-{{ group_names[0] }}/outputs.tf.backup"
when: outputs_stat.stat.exists
- name: Check if .terraform folder exists
stat:
path: "roles/main_infra/files/.terraform/"
path: "/tmp/files-{{ group_names[0] }}/.terraform/"
register: stat_result
- name: Remove .terraform folder
file:
path: roles/main_infra/files/.terraform/
path: "/tmp/files-{{ group_names[0] }}/.terraform/"
state: absent
when: stat_result.stat.exists
- name: Terraform destroy main infra
- name: Terraform plan to destroy main infra
shell: "echo yes | {{ terraform_location }} {{ item }}"
args:
chdir: "roles/main_infra/files"
chdir: "/tmp/files-{{ group_names[0] }}/"
with_items:
- "init {{ '-backend-config=backend.tfvars' if backend|bool == true else '' }}"
- destroy
- "init {{ '-backend-config=backend.tfvars' if backend|bool else '' }}"
- plan -destroy -out terraform.tfplan
- show -no-color terraform.tfplan
register: tf_plan
- name: Terraform show destroy plan
debug:
var: tf_plan.results[2].stdout_lines
- name: User prompt
pause:
prompt: "Are you absolutely sure you want to execute the destruction plan shown above? [False]"
register: user_answer
until: user_answer.user_input | lower in conditional
retries: 10000
delay: 1
vars:
conditional: ['yes','no','true','false']
when: inventory_hostname == groups['all'][0]
- name: Terraform destroy
shell: "{{ terraform_location }} destroy -auto-approve"
args:
chdir: "/tmp/files-{{ group_names[0] }}"
when: hostvars[groups['all'][0]].user_answer.user_input | bool
- name: Delete vars from parameter store
include: parameter_store.yml
- name: Check if outputs.tf.backup exists
stat: path=roles/main_infra/files/outputs.tf.backup
stat:
path: "/tmp/files-{{ group_names[0] }}/outputs.tf.backup"
register: outputs_backup_stat
- name: Get back outputs.tf file
command: mv roles/main_infra/files/outputs.tf.backup roles/main_infra/files/outputs.tf
command: "mv /tmp/files-{{ group_names[0] }}/outputs.tf.backup /tmp/files-{{ group_names[0] }}/outputs.tf"
when: outputs_backup_stat.stat.exists
- name: User prompt
pause:
prompt: "Do you want to delete S3 bucket with state file and DynamoDB attached to it also? [Yes/No] Default: No"
register: user_answer
until: user_answer.user_input | lower in conditional
until: user_answer.user_input | lower in conditional
retries: 10000
delay: 1
vars:

View File

@ -17,7 +17,7 @@ resource "aws_codedeploy_deployment_group" "explorer" {
app_name = aws_codedeploy_app.explorer.name
deployment_group_name = "${var.prefix}-explorer-dg${count.index}"
service_role_arn = aws_iam_role.deployer.arn
autoscaling_groups = ["${aws_launch_configuration.explorer.name}-asg-${element(var.chains, count.index)}"]
autoscaling_groups = [aws_autoscaling_group.explorer[count.index].name]
deployment_style {
deployment_option = "WITH_TRAFFIC_CONTROL"

View File

@ -1,7 +1,9 @@
# Internal DNS Zone
resource "aws_route53_zone" "main" {
name = "${var.prefix}.${var.dns_zone_name}"
vpc_id = aws_vpc.vpc.id
vpc {
vpc_id = aws_vpc.vpc.id
}
tags = {
prefix = var.prefix

View File

@ -1,5 +1,6 @@
data "aws_ami" "explorer" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
@ -10,11 +11,6 @@ data "aws_ami" "explorer" {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "owner-alias"
values = ["amazon"]
}
}
resource "aws_launch_configuration" "explorer" {
@ -40,13 +36,13 @@ resource "aws_launch_configuration" "explorer" {
}
resource "aws_placement_group" "explorer" {
count = "var.use_placement_group[var.chains[count.index]] ? 1 : 0"
count = length(matchkeys(keys(var.use_placement_group),values(var.use_placement_group),["True"]))
name = "${var.prefix}-${var.chains[count.index]}-explorer-pg"
strategy = "cluster"
}
resource "aws_autoscaling_group" "explorer" {
count = "length(var.chains)"
count = length(var.chains)
name = "${var.prefix}-${var.chains[count.index]}-asg"
max_size = "4"
min_size = "1"
@ -55,7 +51,7 @@ resource "aws_autoscaling_group" "explorer" {
vpc_zone_identifier = [aws_subnet.default.id]
availability_zones = data.aws_availability_zones.available.names
target_group_arns = [aws_lb_target_group.explorer[0].arn]
placement_group = "${var.prefix}-${var.chains[count.index]}-explorer-pg : null"
placement_group = var.use_placement_group[var.chains[count.index]] == "True" ? "${var.prefix}-${var.chains[count.index]}-explorer-pg" : null
# Health checks are performed by CodeDeploy hooks
health_check_type = "EC2"
@ -102,7 +98,7 @@ resource "aws_autoscaling_group" "explorer" {
# TODO: These autoscaling policies are not currently wired up to any triggers
resource "aws_autoscaling_policy" "explorer-up" {
count = "length(var.chains)"
count = length(var.chains)
name = "${var.prefix}-${var.chains[count.index]}-explorer-autoscaling-policy-up"
autoscaling_group_name = aws_autoscaling_group.explorer[count.index].name
adjustment_type = "ChangeInCapacity"
@ -111,6 +107,7 @@ resource "aws_autoscaling_policy" "explorer-up" {
}
resource "aws_autoscaling_policy" "explorer-down" {
count = length(var.chains)
name = "${var.prefix}-${var.chains[count.index]}-explorer-autoscaling-policy-down"
autoscaling_group_name = aws_autoscaling_group.explorer[count.index].name
adjustment_type = "ChangeInCapacity"

View File

@ -59,7 +59,7 @@ resource "aws_lb_target_group" "explorer" {
}
resource "aws_alb_listener" "alb_listener" {
count = "length(var.chains)"
count = length(var.chains)
load_balancer_arn = aws_lb.explorer[count.index].arn
port = var.use_ssl[element(var.chains, count.index)] ? "443" : "80"
protocol = var.use_ssl[element(var.chains, count.index)] ? "HTTPS" : "HTTP"

View File

@ -16,7 +16,7 @@ resource "aws_subnet" "default" {
## ALB subnet
resource "aws_subnet" "alb" {
vpc_id = aws_vpc.vpc.id
cidr_block = var.public_subnet_cidr
#cidr_block = var.public_subnet_cidr
cidr_block = cidrsubnet(var.db_subnet_cidr, 5, 1)
availability_zone = data.aws_availability_zones.available.names[1]
map_public_ip_on_launch = true

View File

@ -1,9 +1,17 @@
variable "aws_profile" {
default = "null"
default = null
}
variable "aws_region" {
default = "null"
default = null
}
variable "aws_access_key" {
default = null
}
variable "aws_secret_key" {
default = null
}
variable "prefix" {

View File

@ -58,9 +58,9 @@
args:
chdir: "/tmp/files-{{ group_names[0] }}"
with_items:
- "init{{ ' -backend-config=backend.tfvars' if backend|bool == true else '' }}"
- "init{{ ' -backend-config=backend.tfvars' if backend|bool else '' }}"
- plan -out terraform.tfplan
- show terraform.tfplan -json -no-color
- show -no-color terraform.tfplan
- name: Show Terraform plan
debug:
@ -86,13 +86,10 @@
args:
chdir: "/tmp/files-{{ group_names[0] }}"
when: hostvars[groups['all'][0]].user_answer.user_input | bool
ignore_errors: True
- name: Ensure Terraform resources has been provisioned
shell: "echo yes | {{ terraform_location }} apply"
args:
chdir: "/tmp/files-{{ group_names[0] }}"
when: hostvars[groups['all'][0]].user_answer.user_input | bool
retries: 1
delay: 3
register: result
until: result.rc == 0
- name: Terraform output info into variable
shell: "{{ terraform_location }} output -json"
@ -103,7 +100,7 @@
- name: Output info from Terraform
debug:
var: output.stdout_lines
var: (output.stdout|from_json).instructions.value
when: hostvars[groups['all'][0]].user_answer.user_input | bool
- name: Ansible delete file glob

View File

@ -1,6 +1,6 @@
terraform {
backend "s3" {
{% if aws_access_key is undefined %}
{% if aws_access_key is undefined or aws_access_key == '' %}
profile = "{{ aws_profile|default("default") }}"
{% else %}
access_key = "{{ aws_access_key }}"

View File

@ -1,7 +1,10 @@
aws_profile = "{{ aws_profile|default("default") if aws_access_key is not defined or aws_access_key='' else 'null' }}"
aws_access_key = "{{ aws_access_key | default("null") }}"
aws_secret_key = "{{ aws_secret_key | default("null") }}"
aws_region = "{{ aws_region | default("us-east-1") }}"
{% if aws_access_key is undefined or aws_access_key == '' %}
aws_profile = "{{ aws_profile|default('default') }}"
{% else %}
aws_access_key = "{{ aws_access_key | default('null') }}"
aws_secret_key = "{{ aws_secret_key | default('null') }}"
{% endif %}
aws_region = "{{ aws_region | default('us-east-1') }}"
prefix = "{{ group_names[0] }}"
key_name = "{{ ec2_ssh_key_name }}"