diff --git a/README.md b/README.md index 2c4ae71..35003c7 100644 --- a/README.md +++ b/README.md @@ -186,6 +186,11 @@ Also note, that changing `backend` variable will force Terraform to forget about You can easily manipulate your deployment from any machine with sufficient prerequisites. If `upload_config_to_s3` variable is set to true, the deployer will automatically upload your `all.yml` file to the s3 bucket, so you can easily download it to any other machine. Simply download this file to your `group_vars` folder and your new deployer will pick up the current deployment instead of creating a new one. + +## Attaching the existing RDS instance to the current deployment + +In some cases you may want not to create a new database, but to add the existing one to use within the deployment. In order to do that configure all the proper values at `group_vars/all.yml` including yours DB ID and name and execute the `ansible-playbook attach_existing_rds.yml` command. This will add the current DB instance into TF managed resource group. After that run `ansible-playbook deploy.yml` as usually. + ## Common Errors and Questions ### S3: 403 error during provisioning diff --git a/attach_existing_rds.yml b/attach_existing_rds.yml new file mode 100644 index 0000000..e5049df --- /dev/null +++ b/attach_existing_rds.yml @@ -0,0 +1,11 @@ +- name: Attach existing RDS instance + hosts: localhost + roles: + - { role: check } + - { role: s3, when: "backend|bool == true" } + - { role: dynamodb, when: "backend|bool == true" } + - { role: attach_existing_rds } + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_REGION: "{{ region }}" diff --git a/roles/attach_existing_rds/defaults/main.yml b/roles/attach_existing_rds/defaults/main.yml new file mode 120000 index 0000000..54ac7fb --- /dev/null +++ b/roles/attach_existing_rds/defaults/main.yml @@ -0,0 +1 @@ +../../main_infra/defaults/main.yml \ No newline at end of file diff --git a/roles/attach_existing_rds/tasks/main.yml b/roles/attach_existing_rds/tasks/main.yml new file mode 100644 index 0000000..46ac491 --- /dev/null +++ b/roles/attach_existing_rds/tasks/main.yml @@ -0,0 +1,33 @@ +- name: Local or remote backend selector (remote) + template: + src: roles/main_infra/templates/remote-backend-selector.tf.j2 + dest: roles/main_infra/files/remote-backend-selector.tf + when: + - backend|bool == true + +- name: Local or remote backend selector (local) + file: + state: absent + dest: roles/main_infra/files/remote-backend-selector.tf + when: + - backend | default ('false') | bool != true + +- name: Generating variables file + template: + src: roles/main_infra/templates/terraform.tfvars.j2 + dest: roles/main_infra/files/terraform.tfvars + +- name: Generating backend file + template: + src: roles/main_infra/templates/backend.tfvars.j2 + dest: roles/main_infra/files/backend.tfvars + when: backend|bool == true + +#Workaround since terraform module return unexpected error. +- name: Terraform provisioning + shell: "echo yes | {{ terraform_location }} {{ item }}" + args: + chdir: "roles/main_infra/files" + with_items: + - "init{{ ' -backend-config=backend.tfvars' if backend|bool == true else '' }}" + - "import {{ db_id }}" diff --git a/roles/destroy/tasks/main.yml b/roles/destroy/tasks/main.yml index c15df13..09b506f 100644 --- a/roles/destroy/tasks/main.yml +++ b/roles/destroy/tasks/main.yml @@ -48,12 +48,19 @@ command: mv roles/main_infra/files/outputs.tf.backup roles/main_infra/files/outputs.tf when: outputs_backup_stat.stat.exists +- name: User prompt + pause: + prompt: "Do you want to delete S3 bucket with state file and DynamoDB attached to it also? [Yes/No] Default: No" + register: user_answer + - name: Destroy S3 bucket s3_bucket: name: "{{ bucket }}" state: absent force: yes + when: user_answer.user_input|bool == True - dynamodb_table: name: "{{ dynamodb_table }}" state: absent + when: user_answer.user_input|bool == True diff --git a/roles/main_infra/files/rds.tf b/roles/main_infra/files/rds.tf index d153635..c70e2d9 100644 --- a/roles/main_infra/files/rds.tf +++ b/roles/main_infra/files/rds.tf @@ -1,4 +1,5 @@ resource "aws_db_instance" "default" { + name = "${var.prefix}-${var.db_name}" identifier = "${var.prefix}-${var.db_id}" engine = "postgres" engine_version = "10.5" diff --git a/roles/main_infra/tasks/main.yml b/roles/main_infra/tasks/main.yml index 7181564..ace86b9 100644 --- a/roles/main_infra/tasks/main.yml +++ b/roles/main_infra/tasks/main.yml @@ -24,21 +24,38 @@ when: backend|bool == true #Workaround since terraform module return unexpected error. -- name: Terraform provisioning +- name: Terraform plan construct shell: "echo yes | {{ terraform_location }} {{ item }}" + register: tf_plan args: chdir: "roles/main_infra/files" with_items: - "init{{ ' -backend-config=backend.tfvars' if backend|bool == true else '' }}" - plan - - apply + +- name: Show Terraform plan + debug: + var: tf_plan.stdout_lines + +- name: User prompt + pause: + prompt: "Are you absolutely sure you want to execute the deployment plan shown above? [Yes/No] Default: No" + register: user_answer + +- name: Terraform provisioning + shell: "echo yes | {{ terraform_location }} apply" + args: + chdir: "roles/main_infra/files" + when: user_answer.user_input|bool == True - name: Terraform output info into variable shell: "{{ terraform_location }} output" register: output args: chdir: "roles/main_infra/files" + when: user_answer.user_input|bool == True - name: Output info from Terraform debug: var: output.stdout_lines + when: user_answer.user_input|bool == True