Merge branch 'upd6' into HEAD

This commit is contained in:
a@a.ru 2019-05-16 22:20:01 +03:00
commit eb72725ef7
13 changed files with 138 additions and 73 deletions

2
.gitignore vendored
View File

@ -1,3 +1,5 @@
log.txt
# Terraform State
*.terraform*
*terraform.tfstate.d*

View File

@ -70,7 +70,8 @@ The single point of configuration in this script is a `group_vars/all.yml` file.
- `aws_access_key` and `aws_secret_key` is a credentials pair that provides access to AWS for the deployer;
- `backend` variable defines whether deployer should keep state files remote or locally. Set `backend` variable to `true` if you want to save state file to the remote S3 bucket;
- `upload_config_to_s3` - set to `true` if you want to upload config`all.yml` file to the S3 bucket automatically during deployment. Will not work if `backend` is set to false;
- `upload_config_to_s3` - set to `true` if you want to upload config `all.yml` file to the S3 bucket automatically after the deployment. Will not work if `backend` is set to false;
- `upload_debug_info_to_s3` - set to `true` if you want to upload full log output to the S3 bucket automatically after the deployment. Will not work if `backend` is set to false. *IMPORTANT*: Locally logs are stored at `log.txt` which is not cleaned automatically. Please, do not forget to clean it manually or using the `clean.yml` playbook;
- `bucket` represents a globally unique name of the bucket where your configs and state will be stored. It will be created automatically during the deployment;
- `prefix` - is a unique tag to use for provisioned resources (5 alphanumeric chars or less);
- `chains` - maps chains to the URLs of HTTP RPC endpoints, an ordinary blockchain node can be used;
@ -209,7 +210,7 @@ Despite the fact that Terraform cache is automatically cleared automatically bef
## Migrating deployer to another machine
You can easily manipulate your deployment from any machine with sufficient prerequisites. If `upload_config_to_s3` variable is set to true, the deployer will automatically upload your `all.yml` file to the s3 bucket, so you can easily download it to any other machine. Simply download this file to your `group_vars` folder and your new deployer will pick up the current deployment instead of creating a new one.
You can easily manipulate your deployment from any machine with sufficient prerequisites. If `upload_debug_info_to_s3` variable is set to true, the deployer will automatically upload your `all.yml` file to the s3 bucket, so you can easily download it to any other machine. Simply download this file to your `group_vars` folder and your new deployer will pick up the current deployment instead of creating a new one.
## Attaching the existing RDS instance to the current deployment
@ -219,9 +220,13 @@ In some cases you may want not to create a new database, but to add the existing
**Note 1**: while executing `ansible-playbook attach_existing_rds.yml` the S3 and DynamoDB will be automatically created (if `backend` variable is set to `true`) to store Terraform state files.
**Note 2**: the actual name of your resource must include prefix that you will use in this deployment.
Example:
Real resource: tf-poa
`prefix` variable: tf
`chain_db_id` variable: poa
**Note 3**: make sure MultiAZ is disabled on your database.

View File

@ -4,3 +4,4 @@ pipelining = True
inventory = hosts
deprecation_warnings = False
host_key_checking=false
log_path=log.txt

View File

@ -1,11 +1,25 @@
- name: Attach existing RDS instance
hosts: localhost
roles:
- { role: check }
- { role: s3, when: "backend|bool == true" }
- { role: dynamodb, when: "backend|bool == true" }
- { role: attach_existing_rds }
tasks:
- block:
- include_role:
name: check
- include_role:
name: "{{ item }}"
with_items:
- s3
- dynamodb
when: backend|bool == true
- include_role:
name: attach_existing_rds
always:
- include_role:
name: s3_config
when: backend|bool == true and upload_config_to_s3|bool == true
- include_role:
name: s3_debug
when: backend|bool == true and upload_debug_info_to_s3|bool == true
vars_prompt:
- name: "confirmation"
prompt: "Are you sure you want to attach the existing RDS? If backend variable is set to True, this action includes creating the S3 and DynamoDB table for storing Terraform state files."
prompt: "Are you sure you want to attach the existing RDS? If backend variable is set to True, this action also includes creating the S3 and DynamoDB table for storing Terraform state files."
default: False

View File

@ -11,3 +11,4 @@
- roles/main_infra/files/main.tfvars
- roles/main_infra/files/backend.tfvars
- roles/main_infra/files/terraform.tfplan
- log.txt

View File

@ -1,7 +1,21 @@
- name: Prepare infrastructure
hosts: localhost
roles:
- { role: check }
- { role: s3, when: "backend|bool == true" }
- { role: dynamodb, when: "backend|bool == true" }
- { role: main_infra }
tasks:
- block:
- include_role:
name: check
- include_role:
name: "{{ item }}"
with_items:
- s3
- dynamodb
when: backend|bool == true
- include_role:
name: main_infra
always:
- include_role:
name: s3_config
when: backend|bool == true and upload_config_to_s3|bool == true
- include_role:
name: s3_debug
when: backend|bool == true and upload_debug_info_to_s3|bool == true

View File

@ -1,15 +1,21 @@
- name: Save config file
hosts: localhost
roles:
- { role: s3, when: "backend|bool == true" }
- name: Deploy BlockScout
hosts: localhost
tasks:
- name: Use role in loop
include_role:
name: main_software
loop: "{{ chain_custom_environment.keys() }}"
loop_control:
loop_var: chain
index_var: index
- block:
- name: Use role in loop
include_role:
name: main_software
loop: "{{ chain_custom_environment.keys() }}"
loop_control:
loop_var: chain
index_var: index
always:
- include_role:
name: s3
when: backend|bool == true and (upload_debug_info_to_s3|bool == true or upload_config_to_s3|bool ==true)
- include_role:
name: s3_config
when: backend|bool == true and upload_config_to_s3|bool == true
- include_role:
name: s3_debug
when: backend|bool == true and upload_debug_info_to_s3|bool == true

View File

@ -11,8 +11,9 @@ aws_region: "us-east-1"
## If set to true backend will be uploaded and stored at S3 bucket, so you can easily manage your deployment from any machine. It is highly recommended to do not change this variable
backend: true
## If this is set to true along with backend variable, this config file will be saved to s3 bucket. Please, make sure to name it as all.yml. Otherwise, no upload will be performed
## If this is set to true along with backend variable, this config file/the log output will be saved to s3 bucket. Please, make sure to name the config file "all.yml". Otherwise, no upload will be performed
upload_config_to_s3: true
upload_debug_info_to_s3: true
## The bucket and dynamodb_table variables will be used only when backend variable is set to true
## Name of the bucket where TF state files will be stored

View File

@ -46,50 +46,3 @@
secret_key: "{{ aws_secret_key|default(omit) }}"
profile: "{{ aws_profile|default(omit) }}"
region: "{{ aws_region|default(omit) }}"
- name: Check if config file exists
stat:
path: "{{ playbook_dir }}/group_vars/all.yml"
register: stat_result
when: upload_config_to_s3|bool == True
- name: Copy temporary file to be uploaded
command: "cp {{ playbook_dir }}/group_vars/all.yml {{ playbook_dir }}/group_vars/all.yml.temp"
when: upload_config_to_s3|bool == True
- name: Remove insecure AWS variables
replace:
path: "{{ playbook_dir }}/group_vars/all.yml.temp"
regexp: 'aws_.*'
replace: '<There was an aws-related insecure variable to keep at S3. Removed>'
when: upload_config_to_s3|bool == True
- name: Remove other insecure variables
replace:
path: "{{ playbook_dir }}/group_vars/all.yml.temp"
regexp: 'secret_.*'
replace: '<There was an insecure variable to keep at S3. Removed>'
when: upload_config_to_s3|bool == True
- name: Upload config to S3 bucket
aws_s3:
bucket: "{{ prefix }}-{{ bucket }}"
object: all.yml
src: "{{ playbook_dir }}/group_vars/all.yml.temp"
mode: put
profile: "{{ profile }}"
aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}"
region: "{{ region }}"
vars:
access_key: "{{ aws_access_key|default(omit) }}"
secret_key: "{{ aws_secret_key|default(omit) }}"
profile: "{{ aws_profile|default(omit) }}"
region: "{{ aws_region|default(omit) }}"
when: upload_config_to_s3|bool == True and stat_result.stat.exists == True
- name: Remove temp file
file:
path: "{{ playbook_dir }}/group_vars/all.yml.temp"
state: absent
when: upload_config_to_s3|bool == True

View File

@ -0,0 +1 @@
aws_profile: "default"

View File

@ -0,0 +1,45 @@
- name: Check if config file exists
stat:
path: "{{ playbook_dir }}/group_vars/all.yml"
register: stat_result
- name: Copy temporary file to be uploaded
command: "cp {{ playbook_dir }}/group_vars/all.yml {{ playbook_dir }}/group_vars/all.yml.temp"
when: stat_result.stat.exists == True
- name: Remove insecure AWS variables
replace:
path: "{{ playbook_dir }}/group_vars/all.yml.temp"
regexp: 'aws_.*'
replace: '<There was an aws-related insecure variable to keep at S3. Removed>'
when: stat_result.stat.exists == True
- name: Remove other insecure variables
replace:
path: "{{ playbook_dir }}/group_vars/all.yml.temp"
regexp: 'secret_.*'
replace: '<There was an insecure variable to keep at S3. Removed>'
when: stat_result.stat.exists == True
- name: Upload config to S3 bucket
aws_s3:
bucket: "{{ prefix }}-{{ bucket }}"
object: all.yml
src: "{{ playbook_dir }}/group_vars/all.yml.temp"
mode: put
profile: "{{ profile }}"
aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}"
region: "{{ region }}"
vars:
access_key: "{{ aws_access_key|default(omit) }}"
secret_key: "{{ aws_secret_key|default(omit) }}"
profile: "{{ aws_profile|default(omit) }}"
region: "{{ aws_region|default(omit) }}"
when: stat_result.stat.exists == True
- name: Remove temp file
file:
path: "{{ playbook_dir }}/group_vars/all.yml.temp"
state: absent
when: stat_result.stat.exists == True

View File

@ -0,0 +1 @@
aws_profile: "default"

View File

@ -0,0 +1,21 @@
- name: Check log file exists
stat:
path: "{{ playbook_dir }}/log.txt"
register: stat_result
- name: Upload logs to s3
aws_s3:
bucket: "{{ prefix }}-{{ bucket }}"
object: log.txt
src: "{{ playbook_dir }}/log.txt"
mode: put
profile: "{{ profile }}"
aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}"
region: "{{ region }}"
vars:
access_key: "{{ aws_access_key|default(omit) }}"
secret_key: "{{ aws_secret_key|default(omit) }}"
profile: "{{ aws_profile|default(omit) }}"
region: "{{ aws_region|default(omit) }}"
when: stat_result.stat.exists == true