From f31883411b53ded7737a283500c380710970b8d7 Mon Sep 17 00:00:00 2001 From: "a@a.ru" Date: Mon, 20 May 2019 23:38:14 +0300 Subject: [PATCH 01/24] add possibiltiy to deploy all BlockScouts at once --- .gitignore | 6 + deploy_software.yml | 8 +- group_vars/all.yml.example | 49 ------ group_vars/blockscout.yml.example | 26 --- group_vars/infrastructure.yml.example | 50 +----- host_vars/all.yml.example | 47 ++++++ host_vars/blockscout.yml.example | 5 + host_vars/infrastructure.yml.example | 22 +++ hosts | 1 - hosts.example | 11 ++ roles/main_infra/files/libexec/init.sh | 15 ++ roles/main_infra/tasks/main.yml | 29 ++-- roles/main_infra/tasks/parameter_store.yml | 2 +- roles/main_infra/templates/backend.tfvars.j2 | 4 +- roles/main_infra/templates/hosts.tf.j2 | 6 +- roles/main_infra/templates/routing.tf.j2 | 2 +- .../main_infra/templates/terraform.tfvars.j2 | 80 +++++----- roles/main_software/tasks/main.yml | 149 ++++++++++-------- roles/s3/tasks/main.yml | 8 +- roles/s3_config/tasks/config.yml | 38 +++++ roles/s3_config/tasks/main.yml | 53 +------ roles/s3_debug/tasks/main.yml | 2 +- 22 files changed, 305 insertions(+), 308 deletions(-) delete mode 100644 group_vars/blockscout.yml.example create mode 100644 host_vars/all.yml.example create mode 100644 host_vars/blockscout.yml.example create mode 100644 host_vars/infrastructure.yml.example delete mode 100644 hosts create mode 100644 hosts.example create mode 100644 roles/s3_config/tasks/config.yml diff --git a/.gitignore b/.gitignore index b59e1d0..29fb423 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,10 @@ roles/main_infra/files/provider.tf /PREFIX group_vars/* +host_vars/* +!host_vars/all.yml.example +!host_vars/blockscout.yml.example +!host_vars/infrastructure.yml.example !group_vars/all.yml.example !group_vars/blockscout.yml.example !group_vars/infrastructure.yml.example @@ -29,3 +33,5 @@ group_vars/* .*.swp blockscout-*/ + +hosts diff --git a/deploy_software.yml b/deploy_software.yml index 21c38ce..d3f5213 100644 --- a/deploy_software.yml +++ b/deploy_software.yml @@ -1,14 +1,10 @@ - name: Deploy BlockScout - hosts: localhost + hosts: all tasks: - block: - - name: Use role in loop + - name: Deploy include_role: name: main_software - loop: "{{ chain_custom_environment.keys() }}" - loop_control: - loop_var: chain - index_var: index always: - include_role: name: s3 diff --git a/group_vars/all.yml.example b/group_vars/all.yml.example index b6b29bc..6b22d2f 100644 --- a/group_vars/all.yml.example +++ b/group_vars/all.yml.example @@ -18,52 +18,3 @@ upload_debug_info_to_s3: true ## The bucket and dynamodb_table variables will be used only when backend variable is set to true ## Name of the bucket where TF state files will be stored bucket: "poa-terraform-state" - -## All resources will be prefixed with this one -prefix: "poa" - -## This dictionary represents a set of environment variables required for each chain. Variables that commented out are optional. -chain_custom_environment: - core: - NETWORK: "(POA)" # Name of the organization/community that hosts the chain - SUBNETWORK: "Core Network" # Actual name of the particular network - NETWORK_ICON: "_network_icon.html" # Either _test_network_icon.html or _network_icon.html, depending on the type of the network (prod/test). - LOGO: "/images/blockscout_logo.svg" # Chain logo - ETHEREUM_JSONRPC_VARIANT: "parity" # Chain client installed at ETHEREUM_JSONRPC_HTTP_URL - ETHEREUM_JSONRPC_HTTP_URL: "http://localhost:8545" # Network RPC endpoint - ETHEREUM_JSONRPC_TRACE_URL: "http://localhost:8545" # Network RPC endpoint in trace mode. Can be the same as the previous variable - ETHEREUM_JSONRPC_WS_URL: "ws://localhost:8546" # Network RPC endpoint in websocket mode - NETWORK_PATH: "/poa/core" # relative URL path, for example: blockscout.com/$NETWORK_PATH - SECRET_KEY_BASE: "TPGMvGK0iIwlXBQuQDA5KRqk77VETbEBlG4gAWeb93TvBsYAjvoAvdODMd6ZeguPwf2YTRY3n7uvxXzQP4WayQ==" # Secret key for production assets protection. Use `mix phx.gen.secret` or `openssl rand -base64 64 | tr -d '\n'` to generate - PORT: 4000 # Port the application runs on - COIN: "POA" # Coin name at the Coinmarketcap, used to display current exchange rate - POOL_SIZE: 20 # Defines the number of database connections allowed - ECTO_USE_SSL: "false" # Specifies whether or not to use SSL on Ecto queries - ALB_SSL_POLICY: "ELBSecurityPolicy-2016-08" #SSL policy for Load Balancer. Required if ECTO_USE_SSL is set to true - ALB_CERTIFICATE_ARN: "arn:aws:acm:us-east-1:290379793816:certificate/6d1bab74-fb46-4244-aab2-832bf519ab24" #ARN of the certificate to attach to the LB. Required if ECTO_USE_SSL is set to - true - HEART_BEAT_TIMEOUT: 30 # Heartbeat is an Erlang monitoring service that will restart BlockScout if it becomes unresponsive. This variables configures the timeout before Blockscout will be restarted. - HEART_COMMAND: "sudo systemctl restart explorer.service" # This variable represents a command that is used to restart the service - BLOCKSCOUT_VERSION: "v1.3.11-beta" # Added to the footer to signify the current BlockScout version - RELEASE_LINK: "https://github.com/poanetwork/blockscout/releases/tag/v1.3.9-beta" # The link to Blockscout release notes in the footer. - ELIXIR_VERSION: "v1.8.1" # Elixir version to install on the node before Blockscout deploy - BLOCK_TRANSFORMER: "base" # Transformer for blocks: base or clique. - GRAPHIQL_TRANSACTION: "0xbc426b4792c48d8ca31ec9786e403866e14e7f3e4d39c7f2852e518fae529ab4" # Random tx hash on the network, used as default for graphiql tx. - TXS_COUNT_CACHE_PERIOD: 7200 # Interval in seconds to restart the task, which calculates the total txs count. - ADDRESS_WITH_BALANCES_UPDATE_INTERVAL: 1800 #Interval in seconds to restart the task, which calculates addresses with balances - LINK_TO_OTHER_EXPLORERS: "false" # If true, links to other explorers are added in the footer - USE_PLACEMENT_GROUP: "false" # If true, BlockScout instance will be created in the placement group - #The following variables are optional - #FIRST_BLOCK: 0 # The block number, where indexing begins from. - #COINMARKETCAP_PAGES: 10 # Sets the number of pages at Coinmarketcap to search coin at. Defaults to 10 - #METADATA_CONTRACT: # Address of metadata smart contract. Used by POA Network to obtain Validators information to display in the UI - #VALIDATORS_CONTRACT: #Address of the EMission Fund smart contract - #SUPPLY_MODULE: "false" # Used by the xDai Chain to calculate the total supply of the chain - #SOURCE_MODULE: "false" # Used to calculate the total supply - #DATABASE_URL: # Database URL. Usually generated automatically, but this variable can be used to modify the URL of the databases during the updates. - #CHECK_ORIGIN: "false" # Used to check the origin of requests when the origin header is present - #DATADOG_HOST: # Host configuration variable for Datadog integration - #DATADOG_PORT: # Port configuration variable for Datadog integration - #SPANDEX_BATCH_SIZE: # Spandex and Datadog configuration setting. - #SPANDEX_SYNC_THRESHOLD: # Spandex and Datadog configuration setting. - #BLOCK_COUNT_CACHE_TTL: #Time to live of block count cache in milliseconds diff --git a/group_vars/blockscout.yml.example b/group_vars/blockscout.yml.example deleted file mode 100644 index f31ec5d..0000000 --- a/group_vars/blockscout.yml.example +++ /dev/null @@ -1,26 +0,0 @@ -# BlockScout related variables - -## Exact path to the TF binary on your local machine -terraform_location: "/usr/local/bin/terraform" - -## An address of BlockScout repo to download -blockscout_repo: https://github.com/poanetwork/blockscout - -## A branch at `blockscout_repo` with ready-to-deploy version of BlockScout -chain_branch: - core: "production-core" - sokol: "production-sokol" - -## Usually you don't want to merge branches, so it is commented out by default -#chain_merge_commit: -# core: "2cdead1" -# sokol: "2cdead1" - -## If you want you can download and configure repo on your own. It should has the following name - blockscout-{{ chain_name }} and exist inside root playbook folder. Use the following variable to prevent playbooks from overriding -skip_fetch: false - -## Login data for the test database. Please, use postgres database with the version specified at BlockScout repo prerequisites -ps_host: localhost -ps_user: myuser -ps_password: mypass -ps_db: mydb diff --git a/group_vars/infrastructure.yml.example b/group_vars/infrastructure.yml.example index 6532971..c728b99 100644 --- a/group_vars/infrastructure.yml.example +++ b/group_vars/infrastructure.yml.example @@ -1,4 +1,4 @@ -# Infrastructure related variables +# Infrastructure related group variables ## Name of the DynamoDB table where current lease of TF state file will be stored dynamodb_table: "poa-terraform-lock" @@ -7,9 +7,6 @@ dynamodb_table: "poa-terraform-lock" ec2_ssh_key_name: "sokol-test" ec2_ssh_key_content: "" -## EC2 Instance will have the following size: -instance_type: "m5.large" - ## VPC containing Blockscout resources will be created as following: vpc_cidr: "10.0.0.0/16" public_subnet_cidr: "10.0.0.0/24" @@ -23,48 +20,3 @@ dns_zone_name: "poa.internal" ## Size of the EC2 instance EBS root volume root_block_size: 120 - -# DB related variables - -## This value represents the name of the DB that will be created/attached. Must be unique. Will be prefixed with `prefix` variable. -chain_db_id: - core: "core" - sokol: "sokol" - -## Each network should have it's own DB. This variable maps chain to DB name. Should not be messed with db_id variable, which represents the RDS instance ID. -chain_db_name: - core: "core" - sokol: "sokol" - -## The following variables describes the DB configurations for each network including usernames, password, instance class, etc. -chain_db_username: - core: "core" - sokol: "sokol" - -chain_db_password: - core: "fkowfjpoi309021" - sokol: "kopsdOPpa9213K" - -chain_db_instance_class: - core: "db.m4.xlarge" - sokol: "db.m4.large" - -## Size of storage in GiB. -chain_db_storage: - core: "200" - sokol: "100" - -## Type of disk to be used for the DB. -chain_db_storage_type: - core: "io1" - sokol: "gp2" - -## This should be set only if chain_db_storage is set to io1 -#chain_db_iops: -# core: "1000" -# sokol: "1500" - -## Blockscout uses Postgres as the DB engine. This variable describes the Postgres version used in each particular chain. -chain_db_version: - core: "10.5" - sokol: "10.6" diff --git a/host_vars/all.yml.example b/host_vars/all.yml.example new file mode 100644 index 0000000..e83cb6c --- /dev/null +++ b/host_vars/all.yml.example @@ -0,0 +1,47 @@ +ansible_host: localhost # An address of machine where BlockScout staging will be built +ansible_connection: local # Comment out if your ansible_host is not localhost + +chain: poa # Can be not unique. Represents chain name. + +env_vars: + #NETWORK: "(POA)" # Name of the organization/community that hosts the chain + #SUBNETWORK: "Core Network" # Actual name of the particular network + #NETWORK_ICON: "_network_icon.html" # Either _test_network_icon.html or _network_icon.html, depending on the type of the network (prod/test). + #LOGO: "/images/blockscout_logo.svg" # Chain logo + #ETHEREUM_JSONRPC_VARIANT: "parity" # Chain client installed at ETHEREUM_JSONRPC_HTTP_URL + #ETHEREUM_JSONRPC_HTTP_URL: "http://localhost:8545" # Network RPC endpoint + #ETHEREUM_JSONRPC_TRACE_URL: "http://localhost:8545" # Network RPC endpoint in trace mode. Can be the same as the previous variable + #ETHEREUM_JSONRPC_WS_URL: "ws://localhost:8546" # Network RPC endpoint in websocket mode + #NETWORK_PATH: "/poa/core" # relative URL path, for example: blockscout.com/$NETWORK_PATH + #SECRET_KEY_BASE: "TPGMvGK0iIwlXBQuQDA5KRqk77VETbEBlG4gAWeb93TvBsYAjvoAvdODMd6ZeguPwf2YTRY3n7uvxXzQP4WayQ==" # Secret key for production assets protection. Use `mix phx.gen.secret` or `openssl rand -base64 64 | tr -d '\n'` to generate + #PORT: 4000 # Port the application runs on + #COIN: "POA" # Coin name at the Coinmarketcap, used to display current exchange rate + #POOL_SIZE: 20 # Defines the number of database connections allowed + #ECTO_USE_SSL: "false" # Specifies whether or not to use SSL on Ecto queries + #ALB_SSL_POLICY: "ELBSecurityPolicy-2016-08" #SSL policy for Load Balancer. Required if ECTO_USE_SSL is set to true + #ALB_CERTIFICATE_ARN: "arn:aws:acm:us-east-1:290379793816:certificate/6d1bab74-fb46-4244-aab2-832bf519ab24" #ARN of the certificate to attach to the LB. Required if ECTO_USE_SSL is set to true + #HEART_BEAT_TIMEOUT: 30 # Heartbeat is an Erlang monitoring service that will restart BlockScout if it becomes unresponsive. This variables configures the timeout before Blockscout will be restarted. + #HEART_COMMAND: "sudo systemctl restart explorer.service" # This variable represents a command that is used to restart the service + BLOCKSCOUT_VERSION: "v1.3.13-beta" # Added to the footer to signify the current BlockScout version + RELEASE_LINK: "https://github.com/poanetwork/blockscout/releases/tag/v1.3.13-beta" # The link to Blockscout release notes in the footer. + #ELIXIR_VERSION: "v1.8.1" # Elixir version to install on the node before Blockscout deploy + #BLOCK_TRANSFORMER: "base" # Transformer for blocks: base or clique. + #GRAPHIQL_TRANSACTION: "0xbc426b4792c48d8ca31ec9786e403866e14e7f3e4d39c7f2852e518fae529ab4" # Random tx hash on the network, used as default for graphiql tx. + #TXS_COUNT_CACHE_PERIOD: 7200 # Interval in seconds to restart the task, which calculates the total txs count. + #ADDRESS_WITH_BALANCES_UPDATE_INTERVAL: 1800 #Interval in seconds to restart the task, which calculates addresses with balances + #LINK_TO_OTHER_EXPLORERS: "false" # If true, links to other explorers are added in the footer + #USE_PLACEMENT_GROUP: "false" # If true, BlockScout instance will be created in the placement group + ##The following variables are optional + #FIRST_BLOCK: 0 # The block number, where indexing begins from. + #COINMARKETCAP_PAGES: 10 # Sets the number of pages at Coinmarketcap to search coin at. Defaults to 10 + #METADATA_CONTRACT: # Address of metadata smart contract. Used by POA Network to obtain Validators information to display in the UI + #VALIDATORS_CONTRACT: #Address of the EMission Fund smart contract + #SUPPLY_MODULE: "false" # Used by the xDai Chain to calculate the total supply of the chain + #SOURCE_MODULE: "false" # Used to calculate the total supply + #DATABASE_URL: # Database URL. Usually generated automatically, but this variable can be used to modify the URL of the databases during the updates. + #CHECK_ORIGIN: "false" # Used to check the origin of requests when the origin header is present + #DATADOG_HOST: # Host configuration variable for Datadog integration + #DATADOG_PORT: # Port configuration variable for Datadog integration + #SPANDEX_BATCH_SIZE: # Spandex and Datadog configuration setting. + #SPANDEX_SYNC_THRESHOLD: # Spandex and Datadog configuration setting. + #BLOCK_COUNT_CACHE_TTL: #Time to live of block count cache in milliseconds diff --git a/host_vars/blockscout.yml.example b/host_vars/blockscout.yml.example new file mode 100644 index 0000000..92441f3 --- /dev/null +++ b/host_vars/blockscout.yml.example @@ -0,0 +1,5 @@ +skip_fetch: true + +blockscout_repo: https://github.com/poanetwork/blockscout +branch: "production-core" +#merge_commit: "2cdead1" diff --git a/host_vars/infrastructure.yml.example b/host_vars/infrastructure.yml.example new file mode 100644 index 0000000..2a56cbd --- /dev/null +++ b/host_vars/infrastructure.yml.example @@ -0,0 +1,22 @@ +terraform_location: "/usr/local/bin/terraform" + +db_id: "core" # This value represents the name of the DB that will be created/attached. Must be unique. Will be prefixed with `prefix` variable. +db_name: "core" # Each network should have it's own DB. This variable maps chain to DB name. Should not be messed with db_id variable, which represents the RDS instance ID. + +## The following variables describes the DB configurations for each network including usernames, password, instance class, etc. +db_username: "core" +db_password: "fkowfjpoi309021" +db_instance_class: "db.t3.medium" +db_storage: "100" # in GiB +db_storage_type: "gp2" # see https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html for details +#db_iops: "1000" # This should be set only if chain_db_storage is set to `io1` +db_version: "10.6" #Blockscout uses Postgres as the DB engine. This variable describes the Postgres version used in each particular chain. + +instance_type: "m5.large" # EC2 BlockScout Instance will have this type +use_placement_group: false # Choose wheter or not to group BlockScout instances into group + +# Please, specify the credentials for the test Postgres installation +ps_host: localhost +ps_user: myuser +ps_password: mypass +ps_db: mydb diff --git a/hosts b/hosts deleted file mode 100644 index 2302eda..0000000 --- a/hosts +++ /dev/null @@ -1 +0,0 @@ -localhost ansible_connection=local diff --git a/hosts.example b/hosts.example new file mode 100644 index 0000000..9588f6e --- /dev/null +++ b/hosts.example @@ -0,0 +1,11 @@ +# Each group and host name must be unique + +[poa] +sokol +core + +[eth] +kovan +main +ropst +rink diff --git a/roles/main_infra/files/libexec/init.sh b/roles/main_infra/files/libexec/init.sh index de4ef7b..d485c5b 100755 --- a/roles/main_infra/files/libexec/init.sh +++ b/roles/main_infra/files/libexec/init.sh @@ -103,6 +103,21 @@ log "Setting up application environment.." mkdir -p /opt/app chown -R ec2-user /opt/app +log "Creating logrotate config" + +cat < /etc/logrotate.d/blockscout + +/var/log/messages* { + rotate 5 + size 1G + compress + missingok + delaycompress + copytruncate +} + +EOF + log "Creating explorer systemd service.." cat < /lib/systemd/system/explorer.service diff --git a/roles/main_infra/tasks/main.yml b/roles/main_infra/tasks/main.yml index e4d9526..a31002a 100644 --- a/roles/main_infra/tasks/main.yml +++ b/roles/main_infra/tasks/main.yml @@ -3,27 +3,27 @@ src: remote-backend-selector.tf.j2 dest: roles/main_infra/files/remote-backend-selector.tf when: - - backend|bool == true + - backend|bool - name: Local or remote backend selector (local) file: state: absent dest: roles/main_infra/files/remote-backend-selector.tf when: - - backend | default ('false') | bool != true + - backend | default('false') | bool - name: Generating variables file template: src: terraform.tfvars.j2 dest: roles/main_infra/files/terraform.tfvars vars: - db_iops: "{{ chain_db_iops | default({}) }}" + db_iops: "{{ db_iops | default({}) }}" - name: Generating backend file template: src: backend.tfvars.j2 dest: roles/main_infra/files/backend.tfvars - when: backend|bool == true + when: backend | default('false') | bool - name: Check if .terraform folder exists stat: @@ -34,13 +34,13 @@ file: path: roles/main_infra/files/.terraform/ state: absent - when: stat_result.stat.exists == True + when: stat_result.stat.exists - name: Generate Terraform files template: src: "{{ item.key }}" dest: "{{ item.value }}" - with_dict: {hosts.tf.j2: roles/main_infra/files/hosts.tf,routing.tf.j2: roles/main_infra/files/routing.tf,provider.tf.j2: roles/main_infra/files/provider.tf} + with_dict: { hosts.tf.j2: roles/main_infra/files/hosts.tf, routing.tf.j2: roles/main_infra/files/routing.tf, provider.tf.j2: roles/main_infra/files/provider.tf } #Workaround since terraform module return unexpected error. - name: Terraform plan construct @@ -61,36 +61,35 @@ pause: prompt: "Are you absolutely sure you want to execute the deployment plan shown above? [False]" register: user_answer + until: user_answer.user_input | lower != "false" and user_answer.user_input | lower != "no" and user_answer.user_input | lower != "true" and user_answer.user_input | lower != "yes" + retries: 10000 + delay: 1 - name: Insert vars into parameter store include: parameter_store.yml - loop: "{{ chain_custom_environment.keys() }}" - loop_control: - loop_var: chain - index_var: index - when: user_answer.user_input|bool == True + when: user_answer.user_input | bool - name: Terraform provisioning shell: "echo yes | {{ terraform_location }} apply terraform.tfplan" args: chdir: "roles/main_infra/files" - when: user_answer.user_input|bool == True + when: user_answer.user_input | bool ignore_errors: True - name: Ensure Terraform resources has been provisioned shell: "echo yes | {{ terraform_location }} apply" args: chdir: "roles/main_infra/files" - when: user_answer.user_input|bool == True + when: user_answer.user_input | bool - name: Terraform output info into variable shell: "{{ terraform_location }} output -json" register: output args: chdir: "roles/main_infra/files" - when: user_answer.user_input|bool == True + when: user_answer.user_input | bool - name: Output info from Terraform debug: var: output.stdout_lines - when: user_answer.user_input|bool == True + when: user_answer.user_input | bool diff --git a/roles/main_infra/tasks/parameter_store.yml b/roles/main_infra/tasks/parameter_store.yml index 03edc30..bbf13b7 100644 --- a/roles/main_infra/tasks/parameter_store.yml +++ b/roles/main_infra/tasks/parameter_store.yml @@ -1,7 +1,7 @@ - name: Prepare variables for Parameter Store set_fact: chain_ps_env: "{{ chain_ps_env | combine ({item.key|lower : item.value}) }}" - with_dict: "{{ chain_custom_environment[chain] }}" + with_dict: "{{ hostvars[inventory_hostname]['env_vars'] }}" vars: chain_ps_env: {} diff --git a/roles/main_infra/templates/backend.tfvars.j2 b/roles/main_infra/templates/backend.tfvars.j2 index c086578..af83daa 100644 --- a/roles/main_infra/templates/backend.tfvars.j2 +++ b/roles/main_infra/templates/backend.tfvars.j2 @@ -1,3 +1,3 @@ -bucket = "{{ prefix }}-{{ bucket }}" -dynamodb_table = "{{ prefix }}-{{ dynamodb_table }}" +bucket = "{{ group_names[0] }}-{{ bucket }}" +dynamodb_table = "{{ group_names[0] }}-{{ dynamodb_table }}" key = "terraform.tfstate" diff --git a/roles/main_infra/templates/hosts.tf.j2 b/roles/main_infra/templates/hosts.tf.j2 index 43ff904..27f8d77 100644 --- a/roles/main_infra/templates/hosts.tf.j2 +++ b/roles/main_infra/templates/hosts.tf.j2 @@ -39,7 +39,7 @@ resource "aws_launch_configuration" "explorer" { } } -{% for key, value in chain_custom_environment.iteritems() %} +{% for key, value in env_vars.iteritems() %} {% if value['USE_PLACEMENT_GROUP']|default('true') == "true" %} resource "aws_placement_group" "explorer-{{key}}" { name = "${var.prefix}-{{key}}-explorer-pg" @@ -48,13 +48,13 @@ resource "aws_placement_group" "explorer-{{key}}" { {% endif %} {% endfor %} -{% for key, value in chain_custom_environment.iteritems() %} +{% for key, value in env_vars.iteritems() %} resource "aws_autoscaling_group" "explorer-{{key}}" { name = "${aws_launch_configuration.explorer.name}-asg-{{key}}" max_size = "4" min_size = "1" desired_capacity = "1" -{% if value['USE_PLACEMENT_GROUP']|default('true') == "true" %} placement_group = "${var.prefix}-{{key}}-explorer-pg" +{% if use_placement_group | default('false') == "true" %} placement_group = "${var.prefix}-{{key}}-explorer-pg" {% endif %} launch_configuration = "${aws_launch_configuration.explorer.name}" vpc_zone_identifier = ["${aws_subnet.default.id}"] diff --git a/roles/main_infra/templates/routing.tf.j2 b/roles/main_infra/templates/routing.tf.j2 index 7593d1c..9dd190f 100644 --- a/roles/main_infra/templates/routing.tf.j2 +++ b/roles/main_infra/templates/routing.tf.j2 @@ -58,7 +58,7 @@ resource "aws_lb_target_group" "explorer" { } } -{% for key, value in chain_custom_environment.iteritems() %} +{% for key, value in env_vars.iteritems() %} resource "aws_alb_listener" "alb_listener{{loop.index-1}}" { load_balancer_arn = "${aws_lb.explorer.*.arn[{{loop.index-1}}]}" port = "${lookup(var.use_ssl,element(var.chains,{{loop.index-1}})) ? "443" : "80" }" diff --git a/roles/main_infra/templates/terraform.tfvars.j2 b/roles/main_infra/templates/terraform.tfvars.j2 index c18a569..0ca8988 100644 --- a/roles/main_infra/templates/terraform.tfvars.j2 +++ b/roles/main_infra/templates/terraform.tfvars.j2 @@ -1,4 +1,4 @@ -prefix = "{{ prefix }}" +prefix = "{{ group_names[0] }}" key_name = "{{ ec2_ssh_key_name }}" key_content = "{{ ec2_ssh_key_content }}" vpc_cidr = "{{ vpc_cidr }}" @@ -9,92 +9,92 @@ instance_type = "{{ instance_type }}" root_block_size = "{{ root_block_size }}" pool_size = { -{% for key, value in chain_custom_environment.iteritems() %} -{{ key }}="{{ value['POOL_SIZE']|default('30') }}"{% if not loop.last %},{% endif %} +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['POOL_SIZE'] | default('30') }}"{% if not loop.last %},{% endif %} {% endfor %} } secret_key_base = { -{% for key, value in chain_custom_environment.iteritems() %} -{{ key }}="{{ value['SECRET_KEY_BASE']|default('TPGMvGK0iIwlXBQuQDA5KRqk77VETbEBlG4gAWeb93TvBsYAjvoAvdODMd6ZeguPwf2YTRY3n7uvxXzQP4WayQ==') }}"{% if not loop.last %},{% endif %} +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['SECRET_KEY_BASE']|default('TPGMvGK0iIwlXBQuQDA5KRqk77VETbEBlG4gAWeb93TvBsYAjvoAvdODMd6ZeguPwf2YTRY3n7uvxXzQP4WayQ==') }}"{% if not loop.last %},{% endif %} {% endfor %} } use_ssl = { -{% for key, value in chain_custom_environment.iteritems() %} -{{ key }}="{{ value['ECTO_USE_SSL']|default('false') }}"{% if not loop.last %},{% endif %} +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['ECTO_USE_SSL']|default('false') }}"{% if not loop.last %},{% endif %} {% endfor %} } alb_ssl_policy = { -{% for key, value in chain_custom_environment.iteritems() %} -{{ key }}="{{ value['ALB_SSL_POLICY']|default('') }}"{% if not loop.last %},{% endif %} +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['ALB_SSL_POLICY']|default('') }}"{% if not loop.last %},{% endif %} {% endfor %} } alb_certificate_arn = { -{% for key, value in chain_custom_environment.iteritems() %} -{{ key }}="{{ value['ALB_CERTIFICATE_ARN']|default('') }}"{% if not loop.last %},{% endif %} +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['ALB_CERTIFICATE_ARN']|default('') }}"{% if not loop.last %},{% endif %} {% endfor %} } chains = [ -{% for key,value in chain_custom_environment.iteritems() %} -"{{ key }}"{% if not loop.last %},{% endif %} +{% for host in groups[group_names[0]] %} +"{{ hostvars[host]['chain'] }}"{% if not loop.last %},{% endif %} {% endfor %} ] -chain_db_id = { -{% for key, value in chain_db_id.iteritems() %} -{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %} +db_id = { +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_id'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -chain_db_name = { -{% for key, value in chain_db_name.iteritems() %} -{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %} +db_name = { +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_name'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -chain_db_username = { -{% for key, value in chain_db_username.iteritems() %} -{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %} +db_username = { +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_username'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -chain_db_password = { -{% for key, value in chain_db_password.iteritems() %} -{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %} +db_password = { +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_password'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -chain_db_instance_class = { -{% for key, value in chain_db_instance_class.iteritems() %} -{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %} +db_instance_class = { +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_instance_class'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -chain_db_storage = { -{% for key, value in chain_db_storage.iteritems() %} -{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %} +db_storage = { +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_storage'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -chain_db_storage_type = { -{% for key, value in chain_db_storage_type.iteritems() %} -{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %} +db_storage_type = { +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_storage_type'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -chain_db_iops = { -{% for key, value in db_iops.iteritems() %} -{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %} +db_iops = { +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_iops'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -chain_db_version = { -{% for key, value in chain_db_version.iteritems() %} -{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %} +db_version = { +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_version'] }}"{% if not loop.last %},{% endif %} {% endfor %} } diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index 78bf729..5e8b762 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -1,52 +1,51 @@ - name: Clone BlockScout git: repo: "{{ blockscout_repo }}" - dest: "blockscout-{{ chain }}" - version: "{{ chain_branch[chain] }}" + dest: "blockscout-{{ group_names[0] }}-{{ chain }}" + version: "{{ branch }}" force: true when: skip_fetch | bool != true - name: Git clean command: "git clean -fdx" args: - chdir: "blockscout-{{ chain }}" + chdir: "blockscout-{{ group_names[0] }}-{{ chain }}" when: skip_fetch | bool != true - name: Merge branches - command: "git merge {{ chain_merge_commit[chain] }}" + command: "git merge {{ merge_commit_item }}" args: - chdir: "blockscout-{{ chain }}" - when: skip_fetch | bool != true and chain_merge_commit_item != 'false' + chdir: "blockscout-{{ group_names[0] }}-{{ chain }}" + when: merge_commit_item and not skip_fetch | bool vars: - chain_mc: "{{ chain_merge_commit | default({}) }}" - chain_merge_commit_item: "{{ chain_mc[chain] | default('false') }}" + merge_commit_item: "{{ merge_commit | default(false) }}" - name: Copy web config files copy: - src: "blockscout-{{ chain }}/apps/block_scout_web/config/dev.secret.exs.example" - dest: "blockscout-{{ chain }}/apps/block_scout_web/config/dev.secret.exs" + src: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs.example" + dest: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs" -- name: Template explorer config files +- name: Template explorer config files template: src: dev.secret.exs.j2 - dest: "blockscout-{{ chain }}/apps/explorer/config/dev.secret.exs" - when: ps_db is defined + dest: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs" + when: ps_user is defined - name: Copy default explorer config files copy: - src: "blockscout-{{ chain }}/apps/explorer/config/dev.secret.exs.example" - dest: "blockscout-{{ chain }}/apps/explorer/config/dev.secret.exs" - when: ps_db is undefined or ps_db == "" + src: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs.example" + dest: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs" + when: ps_user is undefined or ps_user == "" - name: Remove static assets from previous deployment, if any file: - path: "blockscout-{{ chain }}/apps/block_scout_web/priv/static" + path: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/priv/static" state: absent - name: Compile BlockScout command: "mix do {{ item }}" args: - chdir: "blockscout-{{ chain }}" + chdir: "blockscout-{{ group_names[0] }}-{{ chain }}" with_items: - deps.get - local.rebar --force @@ -59,117 +58,132 @@ - name: Install Node modules at apps/block_scout_web/assets command: npm install args: - chdir: "blockscout-{{ chain }}/apps/block_scout_web/assets" + chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets" - name: Execute webpack.js at apps/block_scout_web/assets/node_modules/webpack/bin command: node_modules/webpack/bin/webpack.js --mode production args: - chdir: "blockscout-{{ chain }}/apps/block_scout_web/assets" + chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets" - name: Instal Node modules at apps/explorer command: npm install args: - chdir: "blockscout-{{ chain }}/apps/explorer" + chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer" - name: Install SSL certificates command: mix phx.gen.cert blockscout blockscout.local args: - chdir: "blockscout-{{ chain }}/apps/block_scout_web" + chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web" - name: Fetch environment variables (via access key) set_fact: - chain_env: "{{ lookup('aws_ssm', path, aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, region=aws_region|default('us-east-1'), shortnames=true, bypath=true, recursive=true ) }}" + env: "{{ lookup('aws_ssm', path, aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, region=aws_region|default('us-east-1'), shortnames=true, bypath=true, recursive=true ) }}" vars: - path: "/{{ prefix }}/{{ chain }}" + path: "/{{ group_names[0] }}/{{ chain }}" when: aws_access_key is defined - name: Fetch environment variables (via profile) set_fact: - chain_env: "{{ lookup('aws_ssm', path, aws_profile=aws_profile, shortnames=true, bypath=true, recursive=true ) }}" + env_compiled: "{{ lookup('aws_ssm', path, aws_profile=aws_profile, shortnames=true, bypath=true, recursive=true ) }}" vars: - path: "/{{ prefix }}/{{ chain }}" + path: "/{{ group_names[0] }}/{{ chain }}" when: aws_access_key is undefined - name: Make config variables lowercase set_fact: - chain_lower_env: "{{ chain_lower_env | combine ({item.key|lower : item.value}) }}" - with_dict: "{{ chain_custom_environment_chain }}" - when: chain_custom_environment_chain|length > 0 + lower_env: "{{ lower_env | combine ({item.key|lower : item.value}) }}" + with_dict: "{{ custom_environment_chain }}" + when: custom_environment_chain|length > 0 vars: - chain_lower_env: {} - chain_custom_environment_chain: "{{ chain_cec[chain] | default({}) if chain_cec[chain]>0 else {} }}" - chain_cec: "{{ chain_custom_environment | default ({}) }}" + lower_env: {} + custom_environment_chain: "{{ env_vars | default({}) if env_vars>0 else {} }}" - name: Override env variables set_fact: - chain_env: "{{ chain_env | combine(chain_lower_env) }}" - when: chain_lower_env is defined + env_compiled: "{{ env_compilated | combine(lower_env) }}" + when: lower_env is defined - name: Uppercase chain set_fact: - chain_upper_env: "{{ chain_upper_env | combine ({item.key|upper : item.value}) }}" - with_dict: "{{ chain_env }}" + upper_env: "{{ upper_env | combine ({item.key|upper : item.value}) }}" + with_dict: "{{ env_compiled }}" vars: - chain_upper_env: {} + upper_env: {} - name: Start server block: + - set_fact: + server_port: "{{ 65535|random(seed=inventory_hostname,start=1024) }}" + + - set_fact: + server_env: "{{ upper_env | combine({'NETWORK_PATH':'/','PORT':server_port,'MIX_ENV':'prod'}) }}" + - name: Start server command: "mix phx.server" - environment: "{{ chain_upper_env | combine({'NETWORK_PATH':'/'}) }}" - ignore_errors: true + environment: "{{ server_env }}" args: - chdir: "blockscout-{{ chain }}" + chdir: "blockscout-{{ group_names[0] }}-{{ chain }}" async: 10000 poll: 0 - + + - debug: + msg: "Please, open your browser at following addresses:" + run_once: true + + - debug: + msg: "{{ ansible_host }}:{{ server_port }}" + - name: User prompt pause: - prompt: "Please, open your browser and open 4000 port at the machine were Ansible is currently run. BlockScout should appear. Ensure that there is no visual artifacts and then press Enter to continue. Press Ctrl+C and then A if you face any issues to cancel the deployment." - rescue: - - name: 'Stop execution' - fail: - msg: "Execution aborted." + prompt: "BlockScout should appear. Ensure that there is no visual artifacts and then press Enter to continue. Press Ctrl+C and then A if you face any issues to cancel the deployment. Note: Localhost stands for the machine were Ansible is currently run." + run_once: true + register: prompt always: - name: kill server command: "pkill -f {{ item }}" with_items: - beam.smp - - node - - erlang - failed_when: false - when: + - webpack.js + failed_when: false + +- name: Check for execution interrupt + fail: + msg: "Execution aborted" + when: prompt is failed - name: Build static assets command: mix phx.digest args: - chdir: "blockscout-{{ chain }}" + chdir: "blockscout-{{ group_names[0] }}-{{ chain }}" - name: User prompt pause: prompt: "Would you like to remove staging dependencies? [Yes/No] Default: Yes" register: user_answer + until: user_answer.user_input|lower != "false" and user_answer.user_input|lower != "no" and user_answer.user_input|lower != "true" and user_answer.user_input|lower != "yes" + retries: 10000 + delay: 1 - name: Remove dev dependencies file: state: absent path: "{{ item }}" with_items: - - "blockscout-{{ chain }}/_build/" - - "blockscout-{{ chain }}/deps/" - - "blockscout-{{ chain }}/apps/block_scout_web/assets/node_modules/" - - "blockscout-{{ chain }}/apps/explorer/node_modules/" - - "blockscout-{{ chain }}/logs/dev/" - when: user_answer.user_input|lower != "false" and user_answer.user_input|lower != "no" + - "blockscout-{{ group_names[0] }}-{{ chain }}/_build/" + - "blockscout-{{ group_names[0] }}-{{ chain }}/deps/" + - "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets/node_modules/" + - "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/node_modules/" + - "blockscout-{{ group_names[0] }}-{{ chain }}/logs/dev/" + when: user_answer.user_input | lower | bool - name: Fix bug with favicon replace: regexp: '\"favicon\.ico\"\:\"favicon-[a-z0-9]+?\.ico\"' replace: '"images/favicon.ico":"favicon.ico"' - path: "blockscout-{{ chain }}/apps/block_scout_web/priv/static/cache_manifest.json" + path: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/priv/static/cache_manifest.json" - name: Upload Blockscout to S3 - command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ prefix }}-explorer --s3-location s3://{{ prefix }}-explorer-codedeploy-releases/blockscout-{{ chain }}.zip --source=blockscout-{{ chain }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ group_names[0] }}-explorer --s3-location s3://{{ group_names[0] }}-explorer-codedeploy-releases/blockscout-{{ group_names[0] }}-{{ chain }}.zip --source=blockscout-{{ group_names[0] }}-{{ chain }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" register: push_output - name: Upload output @@ -180,10 +194,13 @@ pause: prompt: "Do you want to update the Parameter Store variables? [Yes/No] Default: Yes" register: user_answer + until: user_answer.user_input | lower != "false" and user_answer.user_input | lower != "no" and user_answer.user_input | lower != "true" and user_answer.user_input | lower != "yes" + retries: 10000 + delay: 1 - name: Update chain variables aws_ssm_parameter_store: - name: "/{{ prefix }}/{{ chain }}/{{ item.key }}" + name: "/{{ group_names[0] }}/{{ chain }}/{{ item.key }}" value: "{{ item.value }}" profile: "{{ profile }}" aws_access_key: "{{ access_key }}" @@ -194,15 +211,17 @@ secret_key: "{{ aws_secret_key|default(omit) }}" profile: "{{ aws_profile|default(omit) }}" region: "{{ aws_region|default(omit) }}" - with_dict: "{{ chain_lower_env }}" - - when: user_answer.user_input|lower != "false" and user_answer.user_input|lower != "no" + with_dict: "{{ lower_env }}" + when: user_answer.user_input | lower | bool - name: User prompt pause: prompt: "Do you want to deploy BlockScout? [Yes/No] Default: Yes" register: user_answer + until: user_answer.user_input | lower != "false" and user_answer.user_input | lower != "no" and user_answer.user_input | lower != "true" and user_answer.user_input | lower != "yes" + retries: 10000 + delay: 1 - name: Deploy Blockscout - command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ prefix }}-explorer-dg{{ index }} --deployment-config-name CodeDeployDefault.OneAtATime --description '{{ chain_upper_env['BLOCKSCOUT_VERSION'] }}' {{ '--profile='~aws_profile if aws_profile is defined else '' }}" - when: user_answer.user_input|lower != "false" and user_answer.user_input|lower != "no" + command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ group_names[0] }}-explorer-dg{{ groups[group_names[0]].index(inventory_hostname) }} --deployment-config-name CodeDeployDefault.OneAtATime --description '{{ env_compiled['BLOCKSCOUT_VERSION'] }}' {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + when: user_answer.user_input | lower | bool diff --git a/roles/s3/tasks/main.yml b/roles/s3/tasks/main.yml index e91b7d3..aba59a4 100644 --- a/roles/s3/tasks/main.yml +++ b/roles/s3/tasks/main.yml @@ -1,6 +1,6 @@ - name: Create S3 bucket aws_s3: - bucket: "{{ prefix }}-{{ bucket }}" + bucket: "{{ group_names[0] }}-{{ bucket }}" mode: create permission: private profile: "{{ profile }}" @@ -15,11 +15,11 @@ - name: Apply tags and versioning to create S3 bucket s3_bucket: - name: "{{ prefix }}-{{ bucket }}" + name: "{{ group_names[0] }}-{{ bucket }}" versioning: yes tags: origin: terraform - prefix: "{{ prefix }}" + prefix: "{{ inventory_hostname }}" profile: "{{ profile }}" aws_access_key: "{{ access_key }}" aws_secret_key: "{{ secret_key }}" @@ -32,7 +32,7 @@ - name: Add lifecycle management policy to created S3 bucket s3_lifecycle: - name: "{{ prefix }}-{{ bucket }}" + name: "{{ group_names[0] }}-{{ bucket }}" rule_id: "expire" noncurrent_version_expiration_days: 90 status: enabled diff --git a/roles/s3_config/tasks/config.yml b/roles/s3_config/tasks/config.yml new file mode 100644 index 0000000..c542cb9 --- /dev/null +++ b/roles/s3_config/tasks/config.yml @@ -0,0 +1,38 @@ +- name: Check if config file exists + stat: + path: "{{ playbook_dir }}/{{ file }}" + register: stat_result + +- name: Copy temporary file to be uploaded + command: "cp {{ playbook_dir }}/{{ file }} {{ playbook_dir }}/{{ file }}.temp" + when: stat_result.stat.exists + +- name: Remove insecure AWS variables + replace: + path: "{{ playbook_dir }}/{{ file }}.temp" + regexp: 'aws_.*' + replace: '' + when: stat_result.stat.exists + +- name: Upload config to S3 bucket + aws_s3: + bucket: "{{ group_names[0] }}-{{ bucket }}" + object: all.yml + src: "{{ playbook_dir }}/{{ file }}.temp" + mode: put + profile: "{{ profile }}" + aws_access_key: "{{ access_key }}" + aws_secret_key: "{{ secret_key }}" + region: "{{ region }}" + vars: + access_key: "{{ aws_access_key|default(omit) }}" + secret_key: "{{ aws_secret_key|default(omit) }}" + profile: "{{ aws_profile|default(omit) }}" + region: "{{ aws_region|default(omit) }}" + when: stat_result.stat.exists + +- name: Remove temp file + file: + path: "{{ playbook_dir }}/{{ file }}.temp" + state: absent + when: stat_result.stat.exists diff --git a/roles/s3_config/tasks/main.yml b/roles/s3_config/tasks/main.yml index dd598a9..14a0602 100644 --- a/roles/s3_config/tasks/main.yml +++ b/roles/s3_config/tasks/main.yml @@ -1,45 +1,8 @@ -- name: Check if config file exists - stat: - path: "{{ playbook_dir }}/group_vars/all.yml" - register: stat_result - -- name: Copy temporary file to be uploaded - command: "cp {{ playbook_dir }}/group_vars/all.yml {{ playbook_dir }}/group_vars/all.yml.temp" - when: stat_result.stat.exists == True - -- name: Remove insecure AWS variables - replace: - path: "{{ playbook_dir }}/group_vars/all.yml.temp" - regexp: 'aws_.*' - replace: '' - when: stat_result.stat.exists == True - -- name: Remove other insecure variables - replace: - path: "{{ playbook_dir }}/group_vars/all.yml.temp" - regexp: 'secret_.*' - replace: '' - when: stat_result.stat.exists == True - -- name: Upload config to S3 bucket - aws_s3: - bucket: "{{ prefix }}-{{ bucket }}" - object: all.yml - src: "{{ playbook_dir }}/group_vars/all.yml.temp" - mode: put - profile: "{{ profile }}" - aws_access_key: "{{ access_key }}" - aws_secret_key: "{{ secret_key }}" - region: "{{ region }}" - vars: - access_key: "{{ aws_access_key|default(omit) }}" - secret_key: "{{ aws_secret_key|default(omit) }}" - profile: "{{ aws_profile|default(omit) }}" - region: "{{ aws_region|default(omit) }}" - when: stat_result.stat.exists == True - -- name: Remove temp file - file: - path: "{{ playbook_dir }}/group_vars/all.yml.temp" - state: absent - when: stat_result.stat.exists == True +- name: "Loop over config files" + include: subtasks.yml file={{item}} + with_items: + - "group_vars/all.yml" + - "group_vars/{{ group_names[0] }}" + - "group_vars/{{ group_names[0] }}.yml" + - "host_vars/{{ inventory_hostname }}.yml" + - "host_vars/{{ inventory_hostname }}" diff --git a/roles/s3_debug/tasks/main.yml b/roles/s3_debug/tasks/main.yml index 084af1d..4f617d3 100644 --- a/roles/s3_debug/tasks/main.yml +++ b/roles/s3_debug/tasks/main.yml @@ -5,7 +5,7 @@ - name: Upload logs to s3 aws_s3: - bucket: "{{ prefix }}-{{ bucket }}" + bucket: "{{ group_names[0] }}-{{ bucket }}" object: log.txt src: "{{ playbook_dir }}/log.txt" mode: put From b89f8309789427163d0335e9a032be4e03a33adc Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Tue, 4 Jun 2019 18:58:36 +0300 Subject: [PATCH 02/24] Update README --- README.md | 148 ++++++++++++++++----------- group_vars/blockscout.yml.example | 6 ++ host_vars/blockscout.yml.example | 1 + host_vars/infrastructure.yml.example | 8 +- 4 files changed, 95 insertions(+), 68 deletions(-) create mode 100644 group_vars/blockscout.yml.example diff --git a/README.md b/README.md index 1ec9889..fda6ff4 100644 --- a/README.md +++ b/README.md @@ -14,11 +14,12 @@ Also you may want to refer to the `lambda` folder which contains a set of script Playbooks relies on Terraform under the hood, which is the stateful infrastructure-as-a-code software tool. It allows to keep a hand on your infrastructure - modify and recreate single and multiple resources depending on your needs. +This version of playbooks supports the multi-hosts deployment, which means that test BlockScout instances can be built on remote machines. In that case, you will need to have the Ansible, installed on jumpbox (controller) and all the prerequisites, that are described below, installed on runners. + ## Prerequisites for deploying infrastructure | Dependency name | Installation method | | -------------------------------------- | ------------------------------------------------------------ | -| Ansible >= 2.6 | [Installation guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) | | Terraform >=0.11.11 | [Installation guide](https://learn.hashicorp.com/terraform/getting-started/install.html) | | Python >=2.6.0 | `apt install python` | | Python-pip | `apt install python-pip` | @@ -28,7 +29,6 @@ Playbooks relies on Terraform under the hood, which is the stateful infrastructu | Dependency name | Installation method | | -------------------------------------- | ------------------------------------------------------------ | -| Ansible >= 2.7.3 | [Installation guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) | | Terraform >=0.11.11 | [Installation guide](https://learn.hashicorp.com/terraform/getting-started/install.html) | | Python >=2.6.0 | `apt install python` | | Python-pip | `apt install python-pip` | @@ -63,24 +63,36 @@ Each configured chain will receive its own ASG (autoscaling group) and deploymen The deployment process goes in two stages. First, Ansible creates S3 bucket and DynamoDB table that are required for Terraform state management. It is needed to ensure that Terraforms state is stored in a centralized location, so that multiple people can use Terraform on the same infra without stepping on each others toes. Terraform prevents this from happening by holding locks (via DynamoDB) against the state data (stored in S3). # Configuration +There are three groups of variables required to build BlockScout. Furst is required to create infrastructure, second is required to build BlockScout instances and the third is the one that is required both for infra and BS itself. +For your convenience we have divided variable templates into three files accordingly - `infrastructure.yml.example`, `blockscout.yml.example` and `all.yml.example` . Also we have divided those files to place them in `group_vars` and in `host_vars` folder, so you will not have to repeat some of the variables for each host/group. -The single point of configuration in this script is a `group_vars/all.yml` file. First, copy it from `group_vars/all.yml.example` template by executing `cp group_vars/all.yml.example group_vars/all.yml` command and then modify it via any text editor you want (vim example - `vim group_vars/all.yml`). The subsections describe the variable you may want to adjust. +In order to deploy BlockScout, you will have to setup the following set of files for each BlockScout instance: + +``` +/ +| - group_vars +| | - group.yml (combination of [blockscout+infrastructure+all].yml.example) +| | - all.yml (optional) +| - host_vars +| | - host.yml (combination of [blockscout+infrastructure+all].yml.example) +| - hosts +``` ## Common variables -- `aws_access_key` and `aws_secret_key` is a credentials pair that provides access to AWS for the deployer; +- `ansible_host` - is an address where BlockScout will be built. If this variable is set to localhost, also set `ansible_connection` to `local` for better performance. +- `chain` variable set the name of the network (Kovan, Core, xDAI, etc.). Will be used as part of the infrastructure resource names. +- `env_vars` represents a set of environment variables used by BlockScout. You can see the description of this variables at [POA Forum](https://forum.poa.network/t/faq-blockscout-environment-variables/1814). +- `aws_access_key` and `aws_secret_key` is a credentials pair that provides access to AWS for the deployer; You can use the `aws_profile` instead. In that case, AWS CLI profile will be used. Also, if none of the access key and profile provided, the `default` AWS profile will be used. The `aws_region` should be left at `us-east-1` as some of the other regions fail for different reasons; - `backend` variable defines whether deployer should keep state files remote or locally. Set `backend` variable to `true` if you want to save state file to the remote S3 bucket; - `upload_config_to_s3` - set to `true` if you want to upload config `all.yml` file to the S3 bucket automatically after the deployment. Will not work if `backend` is set to false; - `upload_debug_info_to_s3` - set to `true` if you want to upload full log output to the S3 bucket automatically after the deployment. Will not work if `backend` is set to false. *IMPORTANT*: Locally logs are stored at `log.txt` which is not cleaned automatically. Please, do not forget to clean it manually or using the `clean.yml` playbook; - `bucket` represents a globally unique name of the bucket where your configs and state will be stored. It will be created automatically during the deployment; -- `prefix` - is a unique tag to use for provisioned resources (5 alphanumeric chars or less); -- `chains` - maps chains to the URLs of HTTP RPC endpoints, an ordinary blockchain node can be used; -- The `region` should be left at `us-east-1` as some of the other regions fail for different reasons; *Note*: a chain name shouldn't be more than 5 characters. Otherwise, it causing the error, because the aws load balancer name should not be greater than 32 characters. ## Infrastructure related variables - +- `terraform_location` is an address of the Terraform binary on the builder; - `dynamodb_table` represents the name of table that will be used for Terraform state lock management; - If `ec2_ssh_key_content` variable is not empty, Terraform will try to create EC2 SSH key with the `ec2_ssh_key_name` name. Otherwise, the existing key with `ec2_ssh_key_name` name will be used; - `instance_type` defines a size of the Blockscout instance that will be launched during the deployment process; @@ -90,41 +102,19 @@ The single point of configuration in this script is a `group_vars/all.yml` file. `db_subnet_cidr`: "10.0.1.0/16" Real networks: 10.0.1.0/24 and 10.0.2.0/24 - An internal DNS zone with`dns_zone_name` name will be created to take care of BlockScout internal communications; -- The name of a IAM key pair to use for EC2 instances, if you provide a name which - already exists it will be used, otherwise it will be generated for you; - -* If `use_ssl` is set to `false`, SSL will be forced on Blockscout. To configure SSL, use `alb_ssl_policy` and `alb_certificate_arn` variables; - - The `root_block_size` is the amount of storage on your EC2 instance. This value can be adjusted by how frequently logs are rotated. Logs are located in `/opt/app/logs` of your EC2 instance; -- The `pool_size` defines the number of connections allowed by the RDS instance; -- `secret_key_base` is a random password used for BlockScout internally. It is highly recommended to gernerate your own `secret_key_base` before the deployment. For instance, you can do it via `openssl rand -base64 64 | tr -d '\n'` command; -- `new_relic_app_name` and `new_relic_license_key` should usually stay empty unless you want and know how to configure New Relic integration; -- `elixir_version` - is an Elixir version used in BlockScout release; -- `chain_trace_endpoint` - maps chains to the URLs of HTTP RPC endpoints, which represents a node where state pruning is disabled (archive node) and tracing is enabled. If you don't have a trace endpoint, you can simply copy values from `chains` variable; -- `chain_ws_endpoint` - maps chains to the URLs of HTTP RPCs that supports websockets. This is required to get the real-time updates. Can be the same as `chains` if websocket is enabled there (but make sure to use`ws(s)` instead of `htpp(s)` protocol); -- `chain_jsonrpc_variant` - a client used to connect to the network. Can be `parity`, `geth`, etc; -- `chain_logo` - maps chains to the it logos. Place your own logo at `apps/block_scout_web/assets/static` and specify a relative path at `chain_logo` variable; -- `chain_coin` - a name of the coin used in each particular chain; -- `chain_network` - usually, a name of the organization keeping group of networks, but can represent a name of any logical network grouping you want; -- `chain_subnetwork` - a name of the network to be shown at BlockScout; -- `chain_network_path` - a relative URL path which will be used as an endpoint for defined chain. For example, if we will have our BlockScout at `blockscout.com` domain and place `core` network at `/poa/core`, then the resulting endpoint will be `blockscout.com/poa/core` for this network. -- `chain_network_icon` - maps the chain name to the network navigation icon at apps/block_scout_web/lib/block_scout_web/templates/icons without .eex extension -- `chain_graphiql_transaction` - is a variable that maps chain to a random transaction hash on that chain. This hash will be used to provide a sample query in the GraphIQL Playground. -- `chain_block_transformer` - will be `clique` for clique networks like Rinkeby and Goerli, and `base` for the rest; -- `chain_heart_beat_timeout`, `chain_heart_command` - configs for the integrated heartbeat. First describes a timeout after the command described at the second variable will be executed; -- Each of the `chain_db_*` variables configures the database for each chain. Each chain will have the separate RDS instance. -- `chain_blockscout_version` - is a text at the footer of BlockScout instance. Usually represents the current BlockScout version. + +- Each of the `db_*` variables configures the database for each chain. Each chain will have the separate RDS instance; +- `instance_type` represent the size of the EC2 instance to be deployed in production; +- `use_placement_group` determines whether or not to launch BlockScout in a placement group. ## Blockscout related variables - `blockscout_repo` - a direct link to the Blockscout repo; -- `chain_branch` - maps branch at `blockscout_repo` to each chain; -- Specify the `chain_merge_commit` variable if you want to merge any of the specified `chains` with the commit in the other branch. Usually may be used to update production branches with the releases from master branch; +- `branch` - maps branch at `blockscout_repo` to each chain; +- Specify the `merge_commit` variable if you want to merge any of the specified `chains` with the commit in the other branch. Usually may be used to update production branches with the releases from master branch; - `skip_fetch` - if this variable is set to `true` , BlockScout repo will not be cloned and the process will start from building the dependencies. Use this variable to prevent playbooks from overriding manual changes in cloned repo; - `ps_*` variables represents a connection details to the test Postgres database. This one will not be installed automatically, so make sure `ps_*` credentials are valid before starting the deployment; -- `chain_custom_environment` - is a map of variables that should be overrided when deploying the new version of Blockscout. Can be omitted. - -*Note*: `chain_custom_environment` variables will not be propagated to the Parameter Store at production servers and need to be set there manually. ## Database Storage Required @@ -142,37 +132,41 @@ The configuration variable `db_storage` can be used to define the amount of stor # Deploying the Infrastructure 1. Ensure all the [infrastructure prerequisites](#Prerequisites-for-deploying-infrastructure) are installed and has the right version number; - 2. Create the AWS access key and secret access key for user with [sufficient permissions](#AWS); +3. Create `hosts` file from `hosts.example` (`mv hosts.example hosts`) and adjust to your needs. Each host should represent each BlockScout instance you want to deploy. Note, that each host name should belong exactly to one group. Also, as per Ansible requirements, hosts and groups names should be unique. -3. Merge `infrastructure` and `all` config template files into single config file: -```bash -cat group_vars/infrastructure.yml.example group_vars/all.yml.example > group_vars/all.yml +The simplest `hosts` file with one BlockScout instance will look like: + +```ini +[group] +host ``` -4. Set the variables at `group_vars/all.yml` config template file as described at the [corresponding part of instruction](#Configuration); +Where `[group]` is a group name, which will be interpreted as a `prefix` for all created resources and `host` is a name of BlockScout instance. -5. Run `ansible-playbook deploy_infra.yml`; +4. For each host merge `infrastructure.yml.example` and `all.yml.example` config template files in `host_vars` folder into single config file with the same name as in `hosts` file: - - During the deployment the ["diffs didn't match"](#error-applying-plan-diffs-didnt-match) error may occur, it will be ignored automatically. If Ansible play recap shows 0 failed plays, then the deployment was successful despite the error. +```bash +cat host_vars/infrastructure.yml.example host_vars/all.yml.example > host_vars/host.yml +``` +5. For each group merge `infrastructure.yml.example` and `all.yml.example` config template files in `group_vars` folder into single config file with the same name as group name in `hosts` file: - - Optionally, you may want to check the variables the were uploaded to the [Parameter Store](https://console.aws.amazon.com/systems-manager/parameters) at AWS Console. +```bash +cat group_vars/infrastructure.yml.example group_vars/all.yml.example > group_vars/group.yml +``` + +6. Adjust the variables at `group_vars` and `host_vars`. Note - you can move variables between host and group vars depending on if variable should be applied to the host or to the entire group. The list of the variables you can find at the [corresponding part of instruction](#Configuration); +Also, if you need to **distribute variables accross all the hosts/groups**, you can add these variables to the `group_vars/all.yml` file. Note about variable precedence => [Official Ansible Docs](https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable). + +7. Run `ansible-playbook deploy_infra.yml`; + +- During the deployment the ["diffs didn't match"](#error-applying-plan-diffs-didnt-match) error may occur, it will be ignored automatically. If Ansible play recap shows 0 failed plays, then the deployment was successful despite the error. +- Optionally, you may want to check the variables the were uploaded to the [Parameter Store](https://console.aws.amazon.com/systems-manager/parameters) at AWS Console. # Deploying BlockScout -1. Ensure all the [BlockScout prerequisites](#Prerequisites-for-deploying-blockscout) are installed and has the right version number; -2. Merge `blockscout` and `all` config template files into single config file: - -```bash -cat group_vars/blockscout.yml.example group_vars/all.yml.example > group_vars/all.yml -``` -**Note!** All three configuration files are compatible to each other, so you can simply `cat group_vars/blockscout.yml.example >> group_vars/all.yml` if you already do have the `all.yml` file after the deploying of infrastructure. - -3. Set the variables at `group_vars/all.yml` config template file as described at the [corresponding part of instruction](#Configuration); - **Note!** Use `chain_custom_environment` to update the variables in each deployment. Map each deployed chain with variables as they should appear at the Parameter Store. Check the example at `group_vars/blockscout.yml.example` config file. `chain_*` variables will be ignored during BlockScout software deployment. - -4. This step is for mac OS users. Please skip it, if this is not your case. +0. (optional) This step is for mac OS users. Please skip it, if this is not your case. To avoid the error ``` @@ -188,11 +182,39 @@ error and crashing of Python follow the next steps: (source: https://stackoverflow.com/questions/50168647/multiprocessing-causes-python-to-crash-and-gives-an-error-may-have-been-in-progr); -5. Run `ansible-playbook deploy_software.yml`; -6. When the prompt appears, check that server is running and there is no visual artifacts. The server will be launched at port 4000 at the same machine where you run the Ansible playbooks. If you face any errors you can either fix it or cancel the deployment by pressing **Ctrl+C** and then pressing **A** when additionally prompted. -7. When server is ready to be deployed simply press enter and deployer will upload Blockscout to the appropriate S3. -8. Two other prompts will appear to ensure your will on updating the Parameter Store variables and deploying the BlockScout through the CodeDeploy. Both **yes** and **true** will be interpreted as the confirmation. -9. Monitor and manage your deployment at [CodeDeploy](https://console.aws.amazon.com/codesuite/codedeploy/applications) service page at AWS Console. +1. Ensure all the [BlockScout prerequisites](#Prerequisites-for-deploying-blockscout) are installed and has the right version number; +2. Create the AWS access key and secret access key for user with [sufficient permissions](#AWS); +3. Create `hosts` file from `hosts.example` (`mv hosts.example hosts`) and adjust to your needs. Each host should represent each BlockScout instance you want to deploy. Note, that each host name should belong exactly to one group. Also, as per Ansible requirements, hosts and groups names should be unique. + +The simplest `hosts` file with one BlockScout instance will look like: + +```ini +[group] +host +``` + +Where `[group]` is a group name, which will be interpreted as a `prefix` for all created resources and `host` is a name of BlockScout instance. + +4. For each host merge `blockscout.yml.example` and `all.yml.example` config template files in `host_vars` folder into single config file with the same name as in `hosts` file: + +```bash +cat host_vars/blockscout.yml.example host_vars/all.yml.example > host_vars/host.yml +``` +If you have already merged `infrastructure.yml.example` and `all.yml` while deploying the BlockScout infrastructure, you can simply add the `blockscout.yml.example` to the merged file: `cat host_vars/blockscout.yml.example >> host_vars/host.yml` +5. For each group merge `blockscout.yml.example` and `all.yml.example` config template files in `group_vars` folder into single config file with the same name as group name in `hosts` file: + +```bash +cat group_vars/blockscout.yml.example group_vars/all.yml.example > group_vars/group.yml +``` +If you have already merged `infrastructure.yml.example` and `all.yml` while deploying the BlockScout infrastructure, you can simply add the `blockscout.yml.example` to the merged file: `cat group_vars/blockscout.yml.example >> group_vars/host.yml` +6. Adjust the variables at `group_vars` and `host_vars`. Note - you can move variables between host and group vars depending on if variable should be applied to the host or to the entire group. The list of the variables you can find at the [corresponding part of instruction](#Configuration); +Also, if you need to **distribute variables accross all the hosts/groups**, you can add these variables to the `group_vars/all.yml` file. Note about variable precedence => [Official Ansible Docs](https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable). + +7. Run `ansible-playbook deploy_software.yml`; +8. When the prompt appears, check that server is running and there is no visual artifacts. The server will be launched at port 4000 at the same machine where you run the Ansible playbooks. If you face any errors you can either fix it or cancel the deployment by pressing **Ctrl+C** and then pressing **A** when additionally prompted. +9. When server is ready to be deployed simply press enter and deployer will upload Blockscout to the appropriate S3. +10. Two other prompts will appear to ensure your will on updating the Parameter Store variables and deploying the BlockScout through the CodeDeploy. Both **yes** and **true** will be interpreted as the confirmation. +11. Monitor and manage your deployment at [CodeDeploy](https://console.aws.amazon.com/codesuite/codedeploy/applications) service page at AWS Console. # Destroying Provisioned Infrastructure @@ -261,3 +283,7 @@ This is due to a bug in Terraform, however the fix is to just rerun `ansible-pla ### Server doesn't start during deployment Even if server is configured correctly, sometimes it may not bind the appropriate 4000 port due to unknown reason. If so, simply go to the appropriate nested blockscout folder, kill and rerun server. For example, you can use the following command: `pkill beam.smp && pkill node && sleep 10 && mix phx.server`. + +``` + +``` \ No newline at end of file diff --git a/group_vars/blockscout.yml.example b/group_vars/blockscout.yml.example new file mode 100644 index 0000000..f1174ac --- /dev/null +++ b/group_vars/blockscout.yml.example @@ -0,0 +1,6 @@ +blockscout_repo: https://github.com/poanetwork/blockscout + +# Please, specify the credentials for the test Postgres installation +ps_host: localhost +ps_user: myuser +ps_password: mypass \ No newline at end of file diff --git a/host_vars/blockscout.yml.example b/host_vars/blockscout.yml.example index 92441f3..167b6bb 100644 --- a/host_vars/blockscout.yml.example +++ b/host_vars/blockscout.yml.example @@ -3,3 +3,4 @@ skip_fetch: true blockscout_repo: https://github.com/poanetwork/blockscout branch: "production-core" #merge_commit: "2cdead1" +ps_db: mydb # The name of the test DB to store data in; \ No newline at end of file diff --git a/host_vars/infrastructure.yml.example b/host_vars/infrastructure.yml.example index 2a56cbd..743cb47 100644 --- a/host_vars/infrastructure.yml.example +++ b/host_vars/infrastructure.yml.example @@ -13,10 +13,4 @@ db_storage_type: "gp2" # see https://docs.aws.amazon.com/AmazonRDS/latest/UserGu db_version: "10.6" #Blockscout uses Postgres as the DB engine. This variable describes the Postgres version used in each particular chain. instance_type: "m5.large" # EC2 BlockScout Instance will have this type -use_placement_group: false # Choose wheter or not to group BlockScout instances into group - -# Please, specify the credentials for the test Postgres installation -ps_host: localhost -ps_user: myuser -ps_password: mypass -ps_db: mydb +use_placement_group: false # Choose wheter or not to group BlockScout instances into group \ No newline at end of file From 20537a78a8cb1610e4a991ddba34eee104d73d1f Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Tue, 4 Jun 2019 19:00:26 +0300 Subject: [PATCH 03/24] Add more infro to README --- README.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index fda6ff4..0a00f28 100644 --- a/README.md +++ b/README.md @@ -66,16 +66,16 @@ The deployment process goes in two stages. First, Ansible creates S3 bucket and There are three groups of variables required to build BlockScout. Furst is required to create infrastructure, second is required to build BlockScout instances and the third is the one that is required both for infra and BS itself. For your convenience we have divided variable templates into three files accordingly - `infrastructure.yml.example`, `blockscout.yml.example` and `all.yml.example` . Also we have divided those files to place them in `group_vars` and in `host_vars` folder, so you will not have to repeat some of the variables for each host/group. -In order to deploy BlockScout, you will have to setup the following set of files for each BlockScout instance: +In order to deploy BlockScout, you will have to setup the following set of files for each instance: ``` / | - group_vars | | - group.yml (combination of [blockscout+infrastructure+all].yml.example) -| | - all.yml (optional) +| | - all.yml (optional, one for all instances) | - host_vars | | - host.yml (combination of [blockscout+infrastructure+all].yml.example) -| - hosts +| - hosts (one for all instances) ``` ## Common variables @@ -283,7 +283,3 @@ This is due to a bug in Terraform, however the fix is to just rerun `ansible-pla ### Server doesn't start during deployment Even if server is configured correctly, sometimes it may not bind the appropriate 4000 port due to unknown reason. If so, simply go to the appropriate nested blockscout folder, kill and rerun server. For example, you can use the following command: `pkill beam.smp && pkill node && sleep 10 && mix phx.server`. - -``` - -``` \ No newline at end of file From 7f7786ae9cf859e94c287936f8c2ea328dc41003 Mon Sep 17 00:00:00 2001 From: "a@a.ru" Date: Wed, 5 Jun 2019 16:04:33 +0300 Subject: [PATCH 04/24] Fix a number of variables --- .gitignore | 2 + attach_existing_rds.yml | 2 +- clean.yml | 18 ++- deploy_infra.yml | 5 +- deploy_software.yml | 6 +- destroy.yml | 3 +- host_vars/all.yml.example | 4 +- roles/attach_existing_rds/tasks/main.yml | 2 - roles/check/tasks/main.yml | 24 ++- roles/destroy/tasks/main.yml | 24 +-- roles/destroy/tasks/parameter_store.yml | 6 +- roles/dynamodb/tasks/main.yml | 4 +- roles/main_infra/defaults/main.yml | 1 + roles/main_infra/tasks/main.yml | 85 ++++++---- roles/main_infra/tasks/parameter_store.yml | 2 +- roles/main_infra/templates/hosts.tf.j2 | 32 ++-- roles/main_infra/templates/routing.tf.j2 | 4 +- .../main_infra/templates/terraform.tfvars.j2 | 20 +-- roles/main_software/tasks/main.yml | 149 ++++++++++-------- roles/s3_config/tasks/main.yml | 2 +- 20 files changed, 221 insertions(+), 174 deletions(-) diff --git a/.gitignore b/.gitignore index 29fb423..4cf995b 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ log.txt # Terraform State *.terraform* +*.tfstate *terraform.tfstate.d* *tfplan* roles/main_infra/files/backend.tfvars @@ -33,5 +34,6 @@ host_vars/* .*.swp blockscout-*/ +roles/main_infra/files-* hosts diff --git a/attach_existing_rds.yml b/attach_existing_rds.yml index 2675396..050d48e 100644 --- a/attach_existing_rds.yml +++ b/attach_existing_rds.yml @@ -9,7 +9,7 @@ with_items: - s3 - dynamodb - when: backend|bool == true + when: backend | bool - include_role: name: attach_existing_rds always: diff --git a/clean.yml b/clean.yml index c1e664a..e1e19d7 100644 --- a/clean.yml +++ b/clean.yml @@ -1,14 +1,16 @@ - name: Clean TF cache - hosts: localhost + hosts: localhost,all tasks: - name: Clean TF cache file: state: absent path: "{{ item }}" - with_items: - - roles/main_infra/files/.terraform - - roles/main_infra/files/terraform.tfstate.d - - roles/main_infra/files/main.tfvars - - roles/main_infra/files/backend.tfvars - - roles/main_infra/files/terraform.tfplan - - log.txt + with_fileglob: + - "roles/main_infra/files/.terraform" + - "roles/main_infra/files/terraform.tfstate.d" + - "roles/main_infra/files/main.tfvars" + - "roles/main_infra/files/backend.tfvars" + - "roles/main_infra/files/terraform.tfplan" + - "log.txt" + - "blockscout-*" + - "/tmp/files-*" diff --git a/deploy_infra.yml b/deploy_infra.yml index 5d64e04..0d2a644 100644 --- a/deploy_infra.yml +++ b/deploy_infra.yml @@ -1,5 +1,5 @@ - name: Prepare infrastructure - hosts: localhost + hosts: all tasks: - block: - include_role: @@ -9,9 +9,10 @@ with_items: - s3 - dynamodb - when: backend|bool == true + when: backend | bool - include_role: name: main_infra + when: inventory_hostname == groups[group_names[0]][0] always: - include_role: name: s3_config diff --git a/deploy_software.yml b/deploy_software.yml index d3f5213..965041a 100644 --- a/deploy_software.yml +++ b/deploy_software.yml @@ -8,10 +8,10 @@ always: - include_role: name: s3 - when: backend|bool == true and (upload_debug_info_to_s3|bool == true or upload_config_to_s3|bool ==true) + when: backend|bool and (upload_debug_info_to_s3|bool or upload_config_to_s3|bool) - include_role: name: s3_config - when: backend|bool == true and upload_config_to_s3|bool == true + when: backend|bool and upload_config_to_s3|bool - include_role: name: s3_debug - when: backend|bool == true and upload_debug_info_to_s3|bool == true + when: backend|bool and upload_debug_info_to_s3|bool diff --git a/destroy.yml b/destroy.yml index e39b036..e4eea51 100644 --- a/destroy.yml +++ b/destroy.yml @@ -1,5 +1,6 @@ - name: Destroy infrastructure - hosts: localhost + hosts: all + serial: 1 roles: - { role: destroy, when: "confirmation|bool == True" } vars_prompt: diff --git a/host_vars/all.yml.example b/host_vars/all.yml.example index e83cb6c..3177452 100644 --- a/host_vars/all.yml.example +++ b/host_vars/all.yml.example @@ -22,8 +22,8 @@ env_vars: #ALB_CERTIFICATE_ARN: "arn:aws:acm:us-east-1:290379793816:certificate/6d1bab74-fb46-4244-aab2-832bf519ab24" #ARN of the certificate to attach to the LB. Required if ECTO_USE_SSL is set to true #HEART_BEAT_TIMEOUT: 30 # Heartbeat is an Erlang monitoring service that will restart BlockScout if it becomes unresponsive. This variables configures the timeout before Blockscout will be restarted. #HEART_COMMAND: "sudo systemctl restart explorer.service" # This variable represents a command that is used to restart the service - BLOCKSCOUT_VERSION: "v1.3.13-beta" # Added to the footer to signify the current BlockScout version - RELEASE_LINK: "https://github.com/poanetwork/blockscout/releases/tag/v1.3.13-beta" # The link to Blockscout release notes in the footer. + #BLOCKSCOUT_VERSION: "v1.3.13-beta" # Added to the footer to signify the current BlockScout version + #RELEASE_LINK: "https://github.com/poanetwork/blockscout/releases/tag/v1.3.13-beta" # The link to Blockscout release notes in the footer. #ELIXIR_VERSION: "v1.8.1" # Elixir version to install on the node before Blockscout deploy #BLOCK_TRANSFORMER: "base" # Transformer for blocks: base or clique. #GRAPHIQL_TRANSACTION: "0xbc426b4792c48d8ca31ec9786e403866e14e7f3e4d39c7f2852e518fae529ab4" # Random tx hash on the network, used as default for graphiql tx. diff --git a/roles/attach_existing_rds/tasks/main.yml b/roles/attach_existing_rds/tasks/main.yml index 3b7a9df..9a080d2 100644 --- a/roles/attach_existing_rds/tasks/main.yml +++ b/roles/attach_existing_rds/tasks/main.yml @@ -16,8 +16,6 @@ template: src: roles/main_infra/templates/terraform.tfvars.j2 dest: roles/main_infra/files/terraform.tfvars - vars: - db_iops: "{{ chain_db_iops | default({}) }}" - name: Generating backend file template: diff --git a/roles/check/tasks/main.yml b/roles/check/tasks/main.yml index 2792753..cee6df3 100644 --- a/roles/check/tasks/main.yml +++ b/roles/check/tasks/main.yml @@ -1,33 +1,27 @@ - name: Check prefix fail: - msg: "The prefix '{{ prefix }}' is invalid. It must consist only of the lowercase characters a-z and digits 0-9, and must be between 3 and 5 characters long." - when: prefix|length < 3 or prefix|length > 5 or prefix is not match("^[a-z0-9]+$") + msg: "The prefix '{{ group_names[0] }}' is invalid. It must consist only of the lowercase characters a-z and digits 0-9, and must be between 3 and 5 characters long." + when: group_names[0] | length < 3 or group_names[0] | length > 5 or group_names[0] is not match("^[a-z0-9]+$") - name: Check chain names fail: - msg: "The prefix '{{ item }}' is invalid. It must consist only of the lowercase characters a-z and digits 0-9, and must not more than 5 characters long." - when: item.key|length > 5 or item.key is not match("^[a-z0-9]+$") - with_dict: "{{ chain_custom_environment }}" + msg: "The chain '{{ item }}' is invalid. It must consist only of the lowercase characters a-z and digits 0-9, and must not more than 5 characters long." + when: (item.key | length > 5 or item.key is not match("^[a-z0-9]+$")) and item.key != "all" and item.key != "ungrouped" + with_dict: "{{ groups }}" - name: Check if terraform is installed - command: which terraform + command: "{{ terraform_location }} --version" register: terraform_status changed_when: false -- name: Terraform check result - fail: - msg: "Terraform is not installed" - when: terraform_status.stdout == "" - - name: Check if python is installed - command: which python - register: python_status + command: "{{ ansible_python_interpreter }} --version" changed_when: false - name: Python check result fail: - msg: "Python either is not installed or is too old. Please install python version 2.6 or higher" - when: python_status.stdout == "" or python_int_version|int < 260 + msg: "Python is too old. Please install python version 2.6 or higher" + when: python_int_version | int < 260 vars: python_int_version: "{{ ansible_python_version.split('.')[0]|int * 100 + ansible_python_version.split('.')[1]|int * 10 + ansible_python_version.split('.')[2]|int }}" diff --git a/roles/destroy/tasks/main.yml b/roles/destroy/tasks/main.yml index 489b1a7..dd741b0 100644 --- a/roles/destroy/tasks/main.yml +++ b/roles/destroy/tasks/main.yml @@ -10,26 +10,24 @@ state: absent dest: roles/main_infra/files/remote-backend-selector.tf when: - - backend | default ('false') | bool != true + - not backend | default ('false') | bool - name: Generating variables file template: src: roles/main_infra/templates/terraform.tfvars.j2 dest: roles/main_infra/files/terraform.tfvars - vars: - db_iops: "{{ chain_db_iops | default({}) }}" - name: Generating backend file template: src: roles/main_infra/templates/backend.tfvars.j2 dest: roles/main_infra/files/backend.tfvars - when: backend|bool == true + when: backend | bool - name: Generate Terraform files template: src: "{{ item.key }}" dest: "{{ item.value }}" - with_dict: {roles/main_infra/templates/hosts.tf.j2: roles/main_infra/files/hosts.tf,roles/main_infra/templates/routing.tf.j2: roles/main_infra/files/routing.tf,roles/main_infra/templates/provider.tf.j2: roles/main_infra/files/provider.tf} + with_dict: { roles/main_infra/templates/hosts.tf.j2: roles/main_infra/files/hosts.tf, roles/main_infra/templates/routing.tf.j2: roles/main_infra/files/routing.tf, roles/main_infra/templates/provider.tf.j2: roles/main_infra/files/provider.tf } # This is due to the TF0.11 bug which do not allow to completely destroy resources if interpolation syntax is used in outputs.tf at edge cases - name: Check if outputs.tf exists @@ -49,7 +47,7 @@ file: path: roles/main_infra/files/.terraform/ state: absent - when: stat_result.stat.exists == True + when: stat_result.stat.exists - name: Terraform destroy main infra shell: "echo yes | {{ terraform_location }} {{ item }}" @@ -61,10 +59,6 @@ - name: Delete vars from parameter store include: parameter_store.yml - loop: "{{ chain_custom_environment.keys() }}" - loop_control: - loop_var: chain - index_var: index - name: Check if outputs.tf.backup exists stat: path=roles/main_infra/files/outputs.tf.backup @@ -78,6 +72,12 @@ pause: prompt: "Do you want to delete S3 bucket with state file and DynamoDB attached to it also? [Yes/No] Default: No" register: user_answer + until: user_answer.user_input | lower in conditional + retries: 10000 + delay: 1 + vars: + conditional: ['yes','no','true','false'] + when: inventory_hostname == groups['all'][0] - name: Destroy S3 bucket s3_bucket: @@ -93,7 +93,7 @@ secret_key: "{{ aws_secret_key|default(omit) }}" profile: "{{ aws_profile|default(omit) }}" region: "{{ aws_region|default(omit) }}" - when: user_answer.user_input|bool == True + when: hostvars[groups['all'][0]].user_answer.user_input | bool - dynamodb_table: name: "{{ prefix }}-{{ dynamodb_table }}" @@ -107,4 +107,4 @@ secret_key: "{{ aws_secret_key|default(omit) }}" profile: "{{ aws_profile|default(omit) }}" region: "{{ aws_region|default(omit) }}" - when: user_answer.user_input|bool == True + when: hostvars[groups['all'][0]].user_answer.user_input | bool diff --git a/roles/destroy/tasks/parameter_store.yml b/roles/destroy/tasks/parameter_store.yml index 5c7ec27..d1c56de 100644 --- a/roles/destroy/tasks/parameter_store.yml +++ b/roles/destroy/tasks/parameter_store.yml @@ -2,19 +2,19 @@ set_fact: chain_env: "{{ lookup('aws_ssm', path, aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, region=region, shortnames=true, bypath=true, recursive=true ) }}" vars: - path: "/{{ prefix }}/{{ chain }}" + path: "/{{ group_names[0] }}/{{ chain }}" when: aws_access_key is defined - name: Fetch environment variables (via profile) set_fact: chain_env: "{{ lookup('aws_ssm', path, aws_profile=aws_profile, shortnames=true, bypath=true, recursive=true ) }}" vars: - path: "/{{ prefix }}/{{ chain }}" + path: "/{{ group_names[0] }}/{{ chain }}" when: aws_profile is defined - name: Remove chain variables aws_ssm_parameter_store: - name: "/{{ prefix }}/{{ chain }}/{{ item.key }}" + name: "/{{ group_names[0] }}/{{ chain }}/{{ item.key }}" value: "{{ item.value }}" state: absent profile: "{{ profile }}" diff --git a/roles/dynamodb/tasks/main.yml b/roles/dynamodb/tasks/main.yml index 9de803b..e1e50d5 100644 --- a/roles/dynamodb/tasks/main.yml +++ b/roles/dynamodb/tasks/main.yml @@ -1,13 +1,13 @@ - name: Create DynamoDB table dynamodb_table: - name: "{{ prefix }}-{{ dynamodb_table }}" + name: "{{ group_names[0] }}-{{ dynamodb_table }}" hash_key_name: LockID hash_key_type: STRING read_capacity: 1 write_capacity: 1 tags: origin: terraform - prefix: "{{ prefix }}" + prefix: "{{ group_names[0] }}" profile: "{{ profile }}" aws_access_key: "{{ access_key }}" aws_secret_key: "{{ secret_key }}" diff --git a/roles/main_infra/defaults/main.yml b/roles/main_infra/defaults/main.yml index 6edcb21..3c110e7 100644 --- a/roles/main_infra/defaults/main.yml +++ b/roles/main_infra/defaults/main.yml @@ -9,3 +9,4 @@ db_subnet_cidr: "10.0.2.0/16" dns_zone_name: "poa.internal" instance_type: "m5.large" root_block_size: 8 +db_iops: {} diff --git a/roles/main_infra/tasks/main.yml b/roles/main_infra/tasks/main.yml index a31002a..9eb203e 100644 --- a/roles/main_infra/tasks/main.yml +++ b/roles/main_infra/tasks/main.yml @@ -1,53 +1,68 @@ +- name: Ansible delete file glob + find: + paths: /tmp/ + file_type: directory + patterns: "files-{{ group_names[0] }}" + register: files_to_delete + +- name: Ansible remove file glob + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ files_to_delete.files }}" + +- name: Copy files + copy: + src: "roles/main_infra/files/" + dest: "/tmp/files-{{ group_names[0] }}/" + - name: Local or remote backend selector (remote) template: src: remote-backend-selector.tf.j2 - dest: roles/main_infra/files/remote-backend-selector.tf + dest: "/tmp/files-{{ group_names[0] }}/remote-backend-selector.tf" when: - - backend|bool + - backend | bool - name: Local or remote backend selector (local) file: state: absent - dest: roles/main_infra/files/remote-backend-selector.tf + dest: "/tmp/files-{{ group_names[0] }}/remote-backend-selector.tf" when: - - backend | default('false') | bool + - not backend | default('false') | bool - name: Generating variables file template: src: terraform.tfvars.j2 - dest: roles/main_infra/files/terraform.tfvars - vars: - db_iops: "{{ db_iops | default({}) }}" + dest: "/tmp/files-{{ group_names[0] }}/terraform.tfvars" - name: Generating backend file template: src: backend.tfvars.j2 - dest: roles/main_infra/files/backend.tfvars + dest: "/tmp/files-{{ group_names[0] }}/backend.tfvars" when: backend | default('false') | bool -- name: Check if .terraform folder exists - stat: - path: "roles/main_infra/files/.terraform/" - register: stat_result - -- name: Remove .terraform folder +- name: Remove Terraform state file: - path: roles/main_infra/files/.terraform/ + path: "{{ item }}" state: absent - when: stat_result.stat.exists + with_items: + - "/tmp/files-{{ group_names[0] }}/.terraform/" + - "/tmp/files-{{ group_names[0] }}/terraform.tfstate" + - "/tmp/files-{{ group_names[0] }}/terraform.tfstate.backup" + - "/tmp/files-{{ group_names[0] }}/terraform.tfplan" - name: Generate Terraform files template: src: "{{ item.key }}" dest: "{{ item.value }}" - with_dict: { hosts.tf.j2: roles/main_infra/files/hosts.tf, routing.tf.j2: roles/main_infra/files/routing.tf, provider.tf.j2: roles/main_infra/files/provider.tf } + with_dict: { hosts.tf.j2: "/tmp/files-{{ group_names[0] }}/hosts.tf", routing.tf.j2: "/tmp/files-{{ group_names[0] }}/routing.tf", provider.tf.j2: "/tmp/files-{{ group_names[0] }}/provider.tf" } #Workaround since terraform module return unexpected error. - name: Terraform plan construct shell: "echo yes | {{ terraform_location }} {{ item }}" register: tf_plan args: - chdir: "roles/main_infra/files" + chdir: "/tmp/files-{{ group_names[0] }}" with_items: - "init{{ ' -backend-config=backend.tfvars' if backend|bool == true else '' }}" - plan -out terraform.tfplan @@ -61,35 +76,51 @@ pause: prompt: "Are you absolutely sure you want to execute the deployment plan shown above? [False]" register: user_answer - until: user_answer.user_input | lower != "false" and user_answer.user_input | lower != "no" and user_answer.user_input | lower != "true" and user_answer.user_input | lower != "yes" + until: user_answer.user_input | lower in conditional retries: 10000 delay: 1 + vars: + conditional: ['yes','no','true','false'] + when: inventory_hostname == groups['all'][0] - name: Insert vars into parameter store include: parameter_store.yml - when: user_answer.user_input | bool + when: hostvars[groups['all'][0]].user_answer.user_input | bool - name: Terraform provisioning shell: "echo yes | {{ terraform_location }} apply terraform.tfplan" args: - chdir: "roles/main_infra/files" - when: user_answer.user_input | bool + chdir: "/tmp/files-{{ group_names[0] }}" + when: hostvars[groups['all'][0]].user_answer.user_input | bool ignore_errors: True - name: Ensure Terraform resources has been provisioned shell: "echo yes | {{ terraform_location }} apply" args: - chdir: "roles/main_infra/files" - when: user_answer.user_input | bool + chdir: "/tmp/files-{{ group_names[0] }}" + when: hostvars[groups['all'][0]].user_answer.user_input | bool - name: Terraform output info into variable shell: "{{ terraform_location }} output -json" register: output args: - chdir: "roles/main_infra/files" - when: user_answer.user_input | bool + chdir: "/tmp/files-{{ group_names[0] }}" + when: hostvars[groups['all'][0]].user_answer.user_input | bool - name: Output info from Terraform debug: var: output.stdout_lines - when: user_answer.user_input | bool + when: hostvars[groups['all'][0]].user_answer.user_input | bool + +- name: Ansible delete file glob + find: + paths: /tmp/ + file_type: directory + patterns: "files-{{ group_names[0] }}" + register: files_to_delete + +- name: Ansible remove file glob + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ files_to_delete.files }}" diff --git a/roles/main_infra/tasks/parameter_store.yml b/roles/main_infra/tasks/parameter_store.yml index bbf13b7..e5f9481 100644 --- a/roles/main_infra/tasks/parameter_store.yml +++ b/roles/main_infra/tasks/parameter_store.yml @@ -7,7 +7,7 @@ - name: Insert variables in PS aws_ssm_parameter_store: - name: "/{{ prefix }}/{{ chain }}/{{ item.key }}" + name: "/{{ group_names[0] }}/{{ chain }}/{{ item.key }}" value: "{{ item.value }}" profile: "{{ profile }}" aws_access_key: "{{ access_key }}" diff --git a/roles/main_infra/templates/hosts.tf.j2 b/roles/main_infra/templates/hosts.tf.j2 index 27f8d77..1b3bd7d 100644 --- a/roles/main_infra/templates/hosts.tf.j2 +++ b/roles/main_infra/templates/hosts.tf.j2 @@ -39,22 +39,22 @@ resource "aws_launch_configuration" "explorer" { } } -{% for key, value in env_vars.iteritems() %} -{% if value['USE_PLACEMENT_GROUP']|default('true') == "true" %} -resource "aws_placement_group" "explorer-{{key}}" { - name = "${var.prefix}-{{key}}-explorer-pg" +{% for key in groups[group_names[0]] %} +{% if use_placement_group | default('true') == "true" %} +resource "aws_placement_group" "explorer-{{ hostvars[key]['chain'] }}" { + name = "${var.prefix}-{{ hostvars[key]['chain'] }}-explorer-pg" strategy = "cluster" } {% endif %} {% endfor %} -{% for key, value in env_vars.iteritems() %} -resource "aws_autoscaling_group" "explorer-{{key}}" { - name = "${aws_launch_configuration.explorer.name}-asg-{{key}}" +{% for key in groups[group_names[0]] %} +resource "aws_autoscaling_group" "explorer-{{ hostvars[key]['chain'] }}" { + name = "${aws_launch_configuration.explorer.name}-asg-{{ hostvars[key]['chain'] }}" max_size = "4" min_size = "1" desired_capacity = "1" -{% if use_placement_group | default('false') == "true" %} placement_group = "${var.prefix}-{{key}}-explorer-pg" +{% if use_placement_group | default('false') == "true" %} placement_group = "${var.prefix}-{{ hostvars[key]['chain'] }}-explorer-pg" {% endif %} launch_configuration = "${aws_launch_configuration.explorer.name}" vpc_zone_identifier = ["${aws_subnet.default.id}"] @@ -92,29 +92,29 @@ resource "aws_autoscaling_group" "explorer-{{key}}" { tag { key = "chain" - value = "{{ key }}" + value = "{{ hostvars[key]['chain'] }}" propagate_at_launch = true } tag { key = "Name" - value = "{{ key }} Application" + value = "{{ hostvars[key]['chain'] }} Application" propagate_at_launch = true } } # TODO: These autoscaling policies are not currently wired up to any triggers -resource "aws_autoscaling_policy" "explorer-up" { - name = "${var.prefix}-{{key}}-explorer-autoscaling-policy-up" - autoscaling_group_name = "${aws_autoscaling_group.explorer-{{key}}.name}" +resource "aws_autoscaling_policy" "explorer-up-{{ hostvars[key]['chain'] }}" { + name = "${var.prefix}-{{ hostvars[key]['chain'] }}-explorer-autoscaling-policy-up" + autoscaling_group_name = "${aws_autoscaling_group.explorer-{{ hostvars[key]['chain'] }}.name}" adjustment_type = "ChangeInCapacity" scaling_adjustment = 1 cooldown = 300 } -resource "aws_autoscaling_policy" "explorer-down" { - name = "${var.prefix}-{{key}}-explorer-autoscaling-policy-down" - autoscaling_group_name = "${aws_autoscaling_group.explorer-{{key}}.name}" +resource "aws_autoscaling_policy" "explorer-down-{{ hostvars[key]['chain'] }}" { + name = "${var.prefix}-{{ hostvars[key]['chain'] }}-explorer-autoscaling-policy-down" + autoscaling_group_name = "${aws_autoscaling_group.explorer-{{ hostvars[key]['chain'] }}.name}" adjustment_type = "ChangeInCapacity" scaling_adjustment = -1 cooldown = 300 diff --git a/roles/main_infra/templates/routing.tf.j2 b/roles/main_infra/templates/routing.tf.j2 index 9dd190f..a6bebe6 100644 --- a/roles/main_infra/templates/routing.tf.j2 +++ b/roles/main_infra/templates/routing.tf.j2 @@ -58,12 +58,12 @@ resource "aws_lb_target_group" "explorer" { } } -{% for key, value in env_vars.iteritems() %} +{% for host in groups[group_names[0]] %} resource "aws_alb_listener" "alb_listener{{loop.index-1}}" { load_balancer_arn = "${aws_lb.explorer.*.arn[{{loop.index-1}}]}" port = "${lookup(var.use_ssl,element(var.chains,{{loop.index-1}})) ? "443" : "80" }" protocol = "${lookup(var.use_ssl,element(var.chains,{{loop.index-1}})) ? "HTTPS" : "HTTP" }" -{% if value['ECTO_USE_SSL']|default('false') == "true" %} +{% if hostvars[host]['env_vars']['ECTO_USE_SSL']|default('false') == "true" %} ssl_policy = "${lookup(var.alb_ssl_policy,element(var.chains,{{loop.index-1}}))}" certificate_arn = "${lookup(var.alb_certificate_arn,element(var.chains,{{loop.index-1}}))}" {% endif %} diff --git a/roles/main_infra/templates/terraform.tfvars.j2 b/roles/main_infra/templates/terraform.tfvars.j2 index 0ca8988..1a08696 100644 --- a/roles/main_infra/templates/terraform.tfvars.j2 +++ b/roles/main_infra/templates/terraform.tfvars.j2 @@ -45,55 +45,55 @@ chains = [ {% endfor %} ] -db_id = { +chain_db_id = { {% for host in groups[group_names[0]] %} {{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_id'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -db_name = { +chain_db_name = { {% for host in groups[group_names[0]] %} {{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_name'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -db_username = { +chain_db_username = { {% for host in groups[group_names[0]] %} {{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_username'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -db_password = { +chain_db_password = { {% for host in groups[group_names[0]] %} {{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_password'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -db_instance_class = { +chain_db_instance_class = { {% for host in groups[group_names[0]] %} {{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_instance_class'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -db_storage = { +chain_db_storage = { {% for host in groups[group_names[0]] %} {{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_storage'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -db_storage_type = { +chain_db_storage_type = { {% for host in groups[group_names[0]] %} {{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_storage_type'] }}"{% if not loop.last %},{% endif %} {% endfor %} } -db_iops = { +chain_db_iops = { {% for host in groups[group_names[0]] %} -{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_iops'] }}"{% if not loop.last %},{% endif %} +{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_iops']|default('0') }}"{% if not loop.last %},{% endif %} {% endfor %} } -db_version = { +chain_db_version = { {% for host in groups[group_names[0]] %} {{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_version'] }}"{% if not loop.last %},{% endif %} {% endfor %} diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index 5e8b762..08b50bc 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -1,7 +1,7 @@ - name: Clone BlockScout git: repo: "{{ blockscout_repo }}" - dest: "blockscout-{{ group_names[0] }}-{{ chain }}" + dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}" version: "{{ branch }}" force: true when: skip_fetch | bool != true @@ -9,75 +9,42 @@ - name: Git clean command: "git clean -fdx" args: - chdir: "blockscout-{{ group_names[0] }}-{{ chain }}" + chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}" when: skip_fetch | bool != true - name: Merge branches command: "git merge {{ merge_commit_item }}" args: - chdir: "blockscout-{{ group_names[0] }}-{{ chain }}" + chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}" when: merge_commit_item and not skip_fetch | bool vars: merge_commit_item: "{{ merge_commit | default(false) }}" - name: Copy web config files copy: - src: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs.example" - dest: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs" + src: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs.example" + dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/prod.secret.exs" - name: Template explorer config files template: src: dev.secret.exs.j2 - dest: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs" + dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/prod.secret.exs" when: ps_user is defined - name: Copy default explorer config files copy: - src: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs.example" - dest: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs" + src: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs.example" + dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/prod.secret.exs" when: ps_user is undefined or ps_user == "" - name: Remove static assets from previous deployment, if any file: - path: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/priv/static" + path: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/priv/static" state: absent -- name: Compile BlockScout - command: "mix do {{ item }}" - args: - chdir: "blockscout-{{ group_names[0] }}-{{ chain }}" - with_items: - - deps.get - - local.rebar --force - - deps.compile - - compile - - ecto.drop - - ecto.create - - ecto.migrate - -- name: Install Node modules at apps/block_scout_web/assets - command: npm install - args: - chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets" - -- name: Execute webpack.js at apps/block_scout_web/assets/node_modules/webpack/bin - command: node_modules/webpack/bin/webpack.js --mode production - args: - chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets" - -- name: Instal Node modules at apps/explorer - command: npm install - args: - chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer" - -- name: Install SSL certificates - command: mix phx.gen.cert blockscout blockscout.local - args: - chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web" - - name: Fetch environment variables (via access key) set_fact: - env: "{{ lookup('aws_ssm', path, aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, region=aws_region|default('us-east-1'), shortnames=true, bypath=true, recursive=true ) }}" + env_compiled: "{{ lookup('aws_ssm', path, aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, region=aws_region|default('us-east-1'), shortnames=true, bypath=true, recursive=true ) }}" vars: path: "/{{ group_names[0] }}/{{ chain }}" when: aws_access_key is defined @@ -100,7 +67,7 @@ - name: Override env variables set_fact: - env_compiled: "{{ env_compilated | combine(lower_env) }}" + env_compiled: "{{ env_compiled | combine(lower_env) }}" when: lower_env is defined - name: Uppercase chain @@ -110,19 +77,59 @@ vars: upper_env: {} +- name: Add server port + set_fact: + server_port: "{{ 65535|random(seed=inventory_hostname,start=1024) }}" + +- name: Combine server env + set_fact: + server_env: "{{ upper_env | combine({'NETWORK_PATH':'/','PORT':server_port,'MIX_ENV':'prod','DATABASE_URL':'postgresql://' ~ ps_user ~ ':' ~ ps_password ~ '@' ~ ps_host ~ ':5432/' ~ ps_db}) }}" + +- name: Compile BlockScout + command: "mix do {{ item }}" + args: + chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}" + environment: "{{ server_env }}" + with_items: + - deps.get + - local.rebar --force + - deps.compile + - compile + - ecto.drop + - ecto.create + - ecto.migrate + +- name: Install Node modules at apps/block_scout_web/assets + environment: "{{ server_env }}" + command: npm install + args: + chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets" + +- name: Execute webpack.js at apps/block_scout_web/assets/node_modules/webpack/bin + environment: "{{ server_env }}" + command: node_modules/webpack/bin/webpack.js --mode production + args: + chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets" + +- name: Instal Node modules at apps/explorer + environment: "{{ server_env }}" + command: npm install + args: + chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer" + +- name: Install SSL certificates + environment: "{{ server_env }}" + command: mix phx.gen.cert blockscout blockscout.local + args: + chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web" + - name: Start server block: - - set_fact: - server_port: "{{ 65535|random(seed=inventory_hostname,start=1024) }}" - - - set_fact: - server_env: "{{ upper_env | combine({'NETWORK_PATH':'/','PORT':server_port,'MIX_ENV':'prod'}) }}" - - name: Start server command: "mix phx.server" environment: "{{ server_env }}" args: - chdir: "blockscout-{{ group_names[0] }}-{{ chain }}" + chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}" async: 10000 poll: 0 @@ -152,38 +159,42 @@ when: prompt is failed - name: Build static assets + environment: "{{ server_env }}" command: mix phx.digest args: - chdir: "blockscout-{{ group_names[0] }}-{{ chain }}" + chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}" - name: User prompt pause: prompt: "Would you like to remove staging dependencies? [Yes/No] Default: Yes" register: user_answer - until: user_answer.user_input|lower != "false" and user_answer.user_input|lower != "no" and user_answer.user_input|lower != "true" and user_answer.user_input|lower != "yes" + until: user_answer.user_input | lower in conditional retries: 10000 delay: 1 + vars: + conditional: ['yes','no','true','false'] + when: inventory_hostname == groups['all'][0] - name: Remove dev dependencies file: state: absent path: "{{ item }}" with_items: - - "blockscout-{{ group_names[0] }}-{{ chain }}/_build/" - - "blockscout-{{ group_names[0] }}-{{ chain }}/deps/" - - "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets/node_modules/" - - "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/node_modules/" - - "blockscout-{{ group_names[0] }}-{{ chain }}/logs/dev/" - when: user_answer.user_input | lower | bool + - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/_build/" + - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/deps/" + - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets/node_modules/" + - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/node_modules/" + - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/logs/dev/" + when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool - name: Fix bug with favicon replace: regexp: '\"favicon\.ico\"\:\"favicon-[a-z0-9]+?\.ico\"' replace: '"images/favicon.ico":"favicon.ico"' - path: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/priv/static/cache_manifest.json" + path: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/priv/static/cache_manifest.json" - name: Upload Blockscout to S3 - command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ group_names[0] }}-explorer --s3-location s3://{{ group_names[0] }}-explorer-codedeploy-releases/blockscout-{{ group_names[0] }}-{{ chain }}.zip --source=blockscout-{{ group_names[0] }}-{{ chain }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ group_names[0] }}-explorer --s3-location s3://{{ group_names[0] }}-explorer-codedeploy-releases/blockscout-{{ group_names[0] }}-{{ chain }}.zip --source=/tmp/blockscout-{{ group_names[0] }}-{{ chain }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" register: push_output - name: Upload output @@ -194,9 +205,12 @@ pause: prompt: "Do you want to update the Parameter Store variables? [Yes/No] Default: Yes" register: user_answer - until: user_answer.user_input | lower != "false" and user_answer.user_input | lower != "no" and user_answer.user_input | lower != "true" and user_answer.user_input | lower != "yes" + until: user_answer.user_input | lower in conditional retries: 10000 delay: 1 + vars: + conditional: ['yes','no','true','false'] + when: inventory_hostname == groups['all'][0] - name: Update chain variables aws_ssm_parameter_store: @@ -212,16 +226,19 @@ profile: "{{ aws_profile|default(omit) }}" region: "{{ aws_region|default(omit) }}" with_dict: "{{ lower_env }}" - when: user_answer.user_input | lower | bool + when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool - name: User prompt pause: prompt: "Do you want to deploy BlockScout? [Yes/No] Default: Yes" register: user_answer - until: user_answer.user_input | lower != "false" and user_answer.user_input | lower != "no" and user_answer.user_input | lower != "true" and user_answer.user_input | lower != "yes" + until: user_answer.user_input | lower in conditional retries: 10000 delay: 1 + vars: + conditional: ['yes','no','true','false'] + when: inventory_hostname == groups['all'][0] - name: Deploy Blockscout - command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ group_names[0] }}-explorer-dg{{ groups[group_names[0]].index(inventory_hostname) }} --deployment-config-name CodeDeployDefault.OneAtATime --description '{{ env_compiled['BLOCKSCOUT_VERSION'] }}' {{ '--profile='~aws_profile if aws_profile is defined else '' }}" - when: user_answer.user_input | lower | bool + command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ group_names[0] }}-explorer-dg{{ groups[group_names[0]].index(inventory_hostname) }} --deployment-config-name CodeDeployDefault.OneAtATime {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool diff --git a/roles/s3_config/tasks/main.yml b/roles/s3_config/tasks/main.yml index 14a0602..3cb963f 100644 --- a/roles/s3_config/tasks/main.yml +++ b/roles/s3_config/tasks/main.yml @@ -1,5 +1,5 @@ - name: "Loop over config files" - include: subtasks.yml file={{item}} + include: config.yml file={{item}} with_items: - "group_vars/all.yml" - "group_vars/{{ group_names[0] }}" From a91cfdf4b92dd6503d4347e89ca01d500080e7fe Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Mon, 10 Jun 2019 14:26:17 +0300 Subject: [PATCH 05/24] Update link to ERLANG --- roles/main_infra/files/libexec/init.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/main_infra/files/libexec/init.sh b/roles/main_infra/files/libexec/init.sh index d485c5b..62316df 100755 --- a/roles/main_infra/files/libexec/init.sh +++ b/roles/main_infra/files/libexec/init.sh @@ -147,7 +147,7 @@ EOF log "Installing Erlang.." -wget http://packages.erlang-solutions.com/site/esl/esl-erlang/FLAVOUR_1_general/esl-erlang_21.1-1~centos~7_amd64.rpm +wget https://packages.erlang-solutions.com/erlang/rpm/centos/7/x86_64/esl-erlang_21.1-1~centos~7_amd64.rpm yum localinstall -y wxGTK-devel unixODBC-devel >"$LOG" yum localinstall -y esl-erlang_21.1-1~centos~7_amd64.rpm >"$LOG" From fcb06b96185e04717dcb990beac74bcc06ff23ae Mon Sep 17 00:00:00 2001 From: "a@a.ru" Date: Mon, 10 Jun 2019 22:59:35 +0300 Subject: [PATCH 06/24] fix SUPPORTED_CHAINS issue and remove temp solution for randomizing port --- README.md | 2 +- deploy_software.yml | 2 ++ group_vars/all.yml.example | 7 ++++++ group_vars/blockscout.yml.example | 3 ++- group_vars/infrastructure.yml.example | 1 + host_vars/all.yml.example | 5 ++++- host_vars/blockscout.yml.example | 3 ++- host_vars/infrastructure.yml.example | 3 ++- roles/main_infra/files/libexec/init.sh | 3 +-- roles/main_software/tasks/main.yml | 30 +++++++++++++++++++------- 10 files changed, 44 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 0a00f28..655fdbd 100644 --- a/README.md +++ b/README.md @@ -249,7 +249,7 @@ Example: `prefix` variable: tf - `chain_db_id` variable: poa + `db_id` variable: poa **Note 3**: make sure MultiAZ is disabled on your database. diff --git a/deploy_software.yml b/deploy_software.yml index 965041a..563469e 100644 --- a/deploy_software.yml +++ b/deploy_software.yml @@ -5,6 +5,8 @@ - name: Deploy include_role: name: main_software + tags: + - chain_vars_update always: - include_role: name: s3 diff --git a/group_vars/all.yml.example b/group_vars/all.yml.example index 6b22d2f..d138603 100644 --- a/group_vars/all.yml.example +++ b/group_vars/all.yml.example @@ -1,3 +1,9 @@ +blockscout_repo: https://github.com/poanetwork/blockscout + +ps_host: localhost +ps_user: myuser +ps_password: mypass + # System variables ansible_python_interpreter: "/usr/bin/python3" @@ -18,3 +24,4 @@ upload_debug_info_to_s3: true ## The bucket and dynamodb_table variables will be used only when backend variable is set to true ## Name of the bucket where TF state files will be stored bucket: "poa-terraform-state" + diff --git a/group_vars/blockscout.yml.example b/group_vars/blockscout.yml.example index f1174ac..b3b9d06 100644 --- a/group_vars/blockscout.yml.example +++ b/group_vars/blockscout.yml.example @@ -3,4 +3,5 @@ blockscout_repo: https://github.com/poanetwork/blockscout # Please, specify the credentials for the test Postgres installation ps_host: localhost ps_user: myuser -ps_password: mypass \ No newline at end of file +ps_password: mypass + diff --git a/group_vars/infrastructure.yml.example b/group_vars/infrastructure.yml.example index c728b99..82b9d02 100644 --- a/group_vars/infrastructure.yml.example +++ b/group_vars/infrastructure.yml.example @@ -20,3 +20,4 @@ dns_zone_name: "poa.internal" ## Size of the EC2 instance EBS root volume root_block_size: 120 + diff --git a/host_vars/all.yml.example b/host_vars/all.yml.example index 3177452..8b32317 100644 --- a/host_vars/all.yml.example +++ b/host_vars/all.yml.example @@ -32,6 +32,7 @@ env_vars: #LINK_TO_OTHER_EXPLORERS: "false" # If true, links to other explorers are added in the footer #USE_PLACEMENT_GROUP: "false" # If true, BlockScout instance will be created in the placement group ##The following variables are optional + #SUPPORTED_CHAINS: '[{ "title": "POA Core", "url": "https://blockscout.com/poa/core" }]' # JSON array with links to other exporers #FIRST_BLOCK: 0 # The block number, where indexing begins from. #COINMARKETCAP_PAGES: 10 # Sets the number of pages at Coinmarketcap to search coin at. Defaults to 10 #METADATA_CONTRACT: # Address of metadata smart contract. Used by POA Network to obtain Validators information to display in the UI @@ -44,4 +45,6 @@ env_vars: #DATADOG_PORT: # Port configuration variable for Datadog integration #SPANDEX_BATCH_SIZE: # Spandex and Datadog configuration setting. #SPANDEX_SYNC_THRESHOLD: # Spandex and Datadog configuration setting. - #BLOCK_COUNT_CACHE_TTL: #Time to live of block count cache in milliseconds + #BLOCK_COUNT_CACHE_PERIOD: 600 #Time to live of block count cache in milliseconds + #ALLOWED_EVM_VERSIONS: "homestead, tangerineWhistle, spuriousDragon, byzantium, constantinople, petersburg" # the comma-separated list of allowed EVM versions for contracts verification + diff --git a/host_vars/blockscout.yml.example b/host_vars/blockscout.yml.example index 167b6bb..af7063e 100644 --- a/host_vars/blockscout.yml.example +++ b/host_vars/blockscout.yml.example @@ -3,4 +3,5 @@ skip_fetch: true blockscout_repo: https://github.com/poanetwork/blockscout branch: "production-core" #merge_commit: "2cdead1" -ps_db: mydb # The name of the test DB to store data in; \ No newline at end of file +ps_db: mydb # The name of the test DB to store data in; + diff --git a/host_vars/infrastructure.yml.example b/host_vars/infrastructure.yml.example index 743cb47..b2f0d20 100644 --- a/host_vars/infrastructure.yml.example +++ b/host_vars/infrastructure.yml.example @@ -13,4 +13,5 @@ db_storage_type: "gp2" # see https://docs.aws.amazon.com/AmazonRDS/latest/UserGu db_version: "10.6" #Blockscout uses Postgres as the DB engine. This variable describes the Postgres version used in each particular chain. instance_type: "m5.large" # EC2 BlockScout Instance will have this type -use_placement_group: false # Choose wheter or not to group BlockScout instances into group \ No newline at end of file +use_placement_group: false # Choose wheter or not to group BlockScout instances into group + diff --git a/roles/main_infra/files/libexec/init.sh b/roles/main_infra/files/libexec/init.sh index d485c5b..36a6218 100755 --- a/roles/main_infra/files/libexec/init.sh +++ b/roles/main_infra/files/libexec/init.sh @@ -185,8 +185,7 @@ old_env="$(cat /etc/environment)" # shellcheck disable=SC2016 echo 'PATH=/opt/elixir/bin:/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:$PATH' # shellcheck disable=SC1117 - echo "$parameters_json" | \ - jq ".Parameters[] as \$ps | \"\(\$ps[\"Name\"] | gsub(\"-\"; \"_\") | ltrimstr(\"/$PREFIX/$CHAIN/\") | ascii_upcase)=\\\"\(\$ps[\"Value\"])\\\"\"" --raw-output + echo "$parameters_json" | echo "$parameters_json" | jq ".Parameters[] as \$ps | \"\(\$ps[\"Name\"] | gsub(\"-\"; \"_\") | ltrimstr(\"/$PREFIX/$CHAIN/\") | ascii_upcase)='\(\$ps[\"Value\"])'\"" --raw-output echo "DYNO=\"$HOSTNAME\"" echo "HOSTNAME=\"$HOSTNAME\"" echo "DATABASE_URL=\"$DATABASE_URL/$DB_NAME\"" diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index 08b50bc..6186e55 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -23,18 +23,18 @@ - name: Copy web config files copy: src: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs.example" - dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/prod.secret.exs" + dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs" - name: Template explorer config files template: src: dev.secret.exs.j2 - dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/prod.secret.exs" + dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs" when: ps_user is defined - name: Copy default explorer config files copy: src: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs.example" - dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/prod.secret.exs" + dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs" when: ps_user is undefined or ps_user == "" - name: Remove static assets from previous deployment, if any @@ -48,6 +48,8 @@ vars: path: "/{{ group_names[0] }}/{{ chain }}" when: aws_access_key is defined + tags: + - chain_vars_update - name: Fetch environment variables (via profile) set_fact: @@ -55,6 +57,8 @@ vars: path: "/{{ group_names[0] }}/{{ chain }}" when: aws_access_key is undefined + tags: + - chain_vars_update - name: Make config variables lowercase set_fact: @@ -64,6 +68,8 @@ vars: lower_env: {} custom_environment_chain: "{{ env_vars | default({}) if env_vars>0 else {} }}" + tags: + - chain_vars_update - name: Override env variables set_fact: @@ -83,7 +89,11 @@ - name: Combine server env set_fact: - server_env: "{{ upper_env | combine({'NETWORK_PATH':'/','PORT':server_port,'MIX_ENV':'prod','DATABASE_URL':'postgresql://' ~ ps_user ~ ':' ~ ps_password ~ '@' ~ ps_host ~ ':5432/' ~ ps_db}) }}" + server_env: "{{ upper_env | combine({'NETWORK_PATH':'/','PORT':server_port}) }}" + +- name: Debug + debug: + var: server_env - name: Compile BlockScout command: "mix do {{ item }}" @@ -166,7 +176,7 @@ - name: User prompt pause: - prompt: "Would you like to remove staging dependencies? [Yes/No] Default: Yes" + prompt: "Would you like to remove staging dependencies? [Yes/No]" register: user_answer until: user_answer.user_input | lower in conditional retries: 10000 @@ -203,7 +213,7 @@ - name: User prompt pause: - prompt: "Do you want to update the Parameter Store variables? [Yes/No] Default: Yes" + prompt: "Do you want to update the Parameter Store variables? [Yes/No]" register: user_answer until: user_answer.user_input | lower in conditional retries: 10000 @@ -211,11 +221,13 @@ vars: conditional: ['yes','no','true','false'] when: inventory_hostname == groups['all'][0] + tags: + - chain_vars_update - name: Update chain variables aws_ssm_parameter_store: name: "/{{ group_names[0] }}/{{ chain }}/{{ item.key }}" - value: "{{ item.value }}" + value: !unsafe '{{ item.value }}' profile: "{{ profile }}" aws_access_key: "{{ access_key }}" aws_secret_key: "{{ secret_key }}" @@ -227,10 +239,12 @@ region: "{{ aws_region|default(omit) }}" with_dict: "{{ lower_env }}" when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool + tags: + - chain_vars_update - name: User prompt pause: - prompt: "Do you want to deploy BlockScout? [Yes/No] Default: Yes" + prompt: "Do you want to deploy BlockScout? [Yes/No]" register: user_answer until: user_answer.user_input | lower in conditional retries: 10000 From a37deff5736ad7f65f2c977d6acc0bd603ec477b Mon Sep 17 00:00:00 2001 From: "a@a.ru" Date: Tue, 11 Jun 2019 13:16:43 +0300 Subject: [PATCH 07/24] fix small bugs --- ansible.cfg | 5 +++-- host_vars/all.yml.example | 3 +-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index d30db00..e7f61a5 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -3,5 +3,6 @@ force_handlers = True pipelining = True inventory = hosts deprecation_warnings = False -host_key_checking=false -log_path=log.txt +host_key_checking = false +log_path = log.txt +hash_behaviour = merge diff --git a/host_vars/all.yml.example b/host_vars/all.yml.example index 8b32317..563365c 100644 --- a/host_vars/all.yml.example +++ b/host_vars/all.yml.example @@ -22,8 +22,7 @@ env_vars: #ALB_CERTIFICATE_ARN: "arn:aws:acm:us-east-1:290379793816:certificate/6d1bab74-fb46-4244-aab2-832bf519ab24" #ARN of the certificate to attach to the LB. Required if ECTO_USE_SSL is set to true #HEART_BEAT_TIMEOUT: 30 # Heartbeat is an Erlang monitoring service that will restart BlockScout if it becomes unresponsive. This variables configures the timeout before Blockscout will be restarted. #HEART_COMMAND: "sudo systemctl restart explorer.service" # This variable represents a command that is used to restart the service - #BLOCKSCOUT_VERSION: "v1.3.13-beta" # Added to the footer to signify the current BlockScout version - #RELEASE_LINK: "https://github.com/poanetwork/blockscout/releases/tag/v1.3.13-beta" # The link to Blockscout release notes in the footer. + #BLOCKSCOUT_VERSION: "v2.0.0-beta" # Added to the footer to signify the current BlockScout version #ELIXIR_VERSION: "v1.8.1" # Elixir version to install on the node before Blockscout deploy #BLOCK_TRANSFORMER: "base" # Transformer for blocks: base or clique. #GRAPHIQL_TRANSACTION: "0xbc426b4792c48d8ca31ec9786e403866e14e7f3e4d39c7f2852e518fae529ab4" # Random tx hash on the network, used as default for graphiql tx. From 802b5eb5d168ade65aca9441dca527a15e88c4b5 Mon Sep 17 00:00:00 2001 From: "a@a.ru" Date: Tue, 11 Jun 2019 13:29:01 +0300 Subject: [PATCH 08/24] fix unsafe parameter update --- roles/main_software/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index 6186e55..20c1b98 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -227,7 +227,7 @@ - name: Update chain variables aws_ssm_parameter_store: name: "/{{ group_names[0] }}/{{ chain }}/{{ item.key }}" - value: !unsafe '{{ item.value }}' + value: "{{ item.value }}" profile: "{{ profile }}" aws_access_key: "{{ access_key }}" aws_secret_key: "{{ secret_key }}" From ccb10db6745b6d659c4cf9627c5cfa67d94f5c3f Mon Sep 17 00:00:00 2001 From: "a@a.ru" Date: Tue, 11 Jun 2019 13:56:21 +0300 Subject: [PATCH 09/24] tagging build --- deploy_software.yml | 6 ++- roles/main_software/tasks/main.yml | 76 +++++++++++++++++++++++++----- 2 files changed, 68 insertions(+), 14 deletions(-) diff --git a/deploy_software.yml b/deploy_software.yml index ff4c4b4..9061d27 100644 --- a/deploy_software.yml +++ b/deploy_software.yml @@ -6,7 +6,9 @@ include_role: name: main_software tags: - - chain_vars_update + - update_vars + - build + - deploy always: - include_role: name: s3 @@ -16,4 +18,4 @@ when: backend|bool and upload_config_to_s3|bool - include_role: name: s3_debug - when: backend|bool and upload_debug_info_to_s3|bool \ No newline at end of file + when: backend|bool and upload_debug_info_to_s3|bool diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index 20c1b98..8a08a1f 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -5,12 +5,16 @@ version: "{{ branch }}" force: true when: skip_fetch | bool != true + tags: + - build - name: Git clean command: "git clean -fdx" args: chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}" when: skip_fetch | bool != true + tags: + - build - name: Merge branches command: "git merge {{ merge_commit_item }}" @@ -19,28 +23,38 @@ when: merge_commit_item and not skip_fetch | bool vars: merge_commit_item: "{{ merge_commit | default(false) }}" + tags: + - build - name: Copy web config files copy: src: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs.example" dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs" + tags: + - build - name: Template explorer config files template: src: dev.secret.exs.j2 dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs" when: ps_user is defined + tags: + - build - name: Copy default explorer config files copy: src: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs.example" dest: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs" when: ps_user is undefined or ps_user == "" + tags: + - build - name: Remove static assets from previous deployment, if any file: path: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/priv/static" state: absent + tags: + - build - name: Fetch environment variables (via access key) set_fact: @@ -49,7 +63,8 @@ path: "/{{ group_names[0] }}/{{ chain }}" when: aws_access_key is defined tags: - - chain_vars_update + - update_vars + - build - name: Fetch environment variables (via profile) set_fact: @@ -58,7 +73,8 @@ path: "/{{ group_names[0] }}/{{ chain }}" when: aws_access_key is undefined tags: - - chain_vars_update + - update_vars + - build - name: Make config variables lowercase set_fact: @@ -69,12 +85,15 @@ lower_env: {} custom_environment_chain: "{{ env_vars | default({}) if env_vars>0 else {} }}" tags: - - chain_vars_update + - update_vars + - build - name: Override env variables set_fact: env_compiled: "{{ env_compiled | combine(lower_env) }}" when: lower_env is defined + tags: + - build - name: Uppercase chain set_fact: @@ -82,18 +101,20 @@ with_dict: "{{ env_compiled }}" vars: upper_env: {} - + tags: + - build + - name: Add server port set_fact: server_port: "{{ 65535|random(seed=inventory_hostname,start=1024) }}" + tags: + - build - name: Combine server env set_fact: server_env: "{{ upper_env | combine({'NETWORK_PATH':'/','PORT':server_port}) }}" - -- name: Debug - debug: - var: server_env + tags: + - build - name: Compile BlockScout command: "mix do {{ item }}" @@ -108,32 +129,44 @@ - ecto.drop - ecto.create - ecto.migrate + tags: + - build - name: Install Node modules at apps/block_scout_web/assets environment: "{{ server_env }}" command: npm install args: chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets" - + tags: + - build + - name: Execute webpack.js at apps/block_scout_web/assets/node_modules/webpack/bin environment: "{{ server_env }}" command: node_modules/webpack/bin/webpack.js --mode production args: chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets" + tags: + - build - name: Instal Node modules at apps/explorer environment: "{{ server_env }}" command: npm install args: chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer" + tags: + - build - name: Install SSL certificates environment: "{{ server_env }}" command: mix phx.gen.cert blockscout blockscout.local args: chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web" + tags: + - build - name: Start server + tags: + - build block: - name: Start server command: "mix phx.server" @@ -167,12 +200,16 @@ fail: msg: "Execution aborted" when: prompt is failed + tags: + - build - name: Build static assets environment: "{{ server_env }}" command: mix phx.digest args: chdir: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}" + tags: + - build - name: User prompt pause: @@ -184,6 +221,8 @@ vars: conditional: ['yes','no','true','false'] when: inventory_hostname == groups['all'][0] + tags: + - build - name: Remove dev dependencies file: @@ -196,20 +235,28 @@ - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/node_modules/" - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/logs/dev/" when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool + tags: + - build - name: Fix bug with favicon replace: regexp: '\"favicon\.ico\"\:\"favicon-[a-z0-9]+?\.ico\"' replace: '"images/favicon.ico":"favicon.ico"' path: "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/priv/static/cache_manifest.json" + tags: + - build - name: Upload Blockscout to S3 command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ group_names[0] }}-explorer --s3-location s3://{{ group_names[0] }}-explorer-codedeploy-releases/blockscout-{{ group_names[0] }}-{{ chain }}.zip --source=/tmp/blockscout-{{ group_names[0] }}-{{ chain }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" register: push_output + tags: + - deploy - name: Upload output debug: msg: "If deployment will fail, you can try to deploy blockscout manually using the following commands: {{ 'AWS_ACCESS_KEY=XXXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXX AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + tags: + - deploy - name: User prompt pause: @@ -222,7 +269,7 @@ conditional: ['yes','no','true','false'] when: inventory_hostname == groups['all'][0] tags: - - chain_vars_update + - update_vars - name: Update chain variables aws_ssm_parameter_store: @@ -240,7 +287,7 @@ with_dict: "{{ lower_env }}" when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool tags: - - chain_vars_update + - update_vars - name: User prompt pause: @@ -252,7 +299,12 @@ vars: conditional: ['yes','no','true','false'] when: inventory_hostname == groups['all'][0] + tags: + - deploy - name: Deploy Blockscout - command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ group_names[0] }}-explorer-dg{{ groups[group_names[0]].index(inventory_hostname) }} --deployment-config-name CodeDeployDefault.OneAtATime {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ group_names[0] }}-explorer-dg{{ groups[group_names[0]].index(inventory_hostname) }} --deployment-config-name CodeDeployDefault.OneAtATime {{ '--profile='~aws_profile if aws_profile is defined else '' }}" when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool + tags: + - deploy + From 491e40fd5d1c210b65bd6d7542f9bc1cdecbe255 Mon Sep 17 00:00:00 2001 From: "a@a.ru" Date: Tue, 11 Jun 2019 15:25:01 +0300 Subject: [PATCH 10/24] fix supported_chains v2 --- roles/main_software/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index 8a08a1f..aa3b246 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -274,7 +274,7 @@ - name: Update chain variables aws_ssm_parameter_store: name: "/{{ group_names[0] }}/{{ chain }}/{{ item.key }}" - value: "{{ item.value }}" + value: " {{ item.value }} " profile: "{{ profile }}" aws_access_key: "{{ access_key }}" aws_secret_key: "{{ secret_key }}" From 8eec0b23e004389e4de187703636e6bb14be0208 Mon Sep 17 00:00:00 2001 From: "a@a.ru" Date: Tue, 11 Jun 2019 15:44:07 +0300 Subject: [PATCH 11/24] change tasks order --- roles/main_software/tasks/main.yml | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index aa3b246..9bde84c 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -246,18 +246,6 @@ tags: - build -- name: Upload Blockscout to S3 - command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ group_names[0] }}-explorer --s3-location s3://{{ group_names[0] }}-explorer-codedeploy-releases/blockscout-{{ group_names[0] }}-{{ chain }}.zip --source=/tmp/blockscout-{{ group_names[0] }}-{{ chain }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" - register: push_output - tags: - - deploy - -- name: Upload output - debug: - msg: "If deployment will fail, you can try to deploy blockscout manually using the following commands: {{ 'AWS_ACCESS_KEY=XXXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXX AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" - tags: - - deploy - - name: User prompt pause: prompt: "Do you want to update the Parameter Store variables? [Yes/No]" @@ -302,6 +290,21 @@ tags: - deploy +- name: Upload Blockscout to S3 + command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ group_names[0] }}-explorer --s3-location s3://{{ group_names[0] }}-explorer-codedeploy-releases/blockscout-{{ group_names[0] }}-{{ chain }}.zip --source=/tmp/blockscout-{{ group_names[0] }}-{{ chain }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + register: push_output + when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool + tags: + - deploy + +- name: Upload output + debug: + msg: "If deployment will fail, you can try to deploy blockscout manually using the following commands: {{ 'AWS_ACCESS_KEY=XXXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXX AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool + tags: + - deploy + + - name: Deploy Blockscout command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ group_names[0] }}-explorer-dg{{ groups[group_names[0]].index(inventory_hostname) }} --deployment-config-name CodeDeployDefault.OneAtATime {{ '--profile='~aws_profile if aws_profile is defined else '' }}" when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool From e2357c3807ec2c85c354fb7976ca1168c2b8d901 Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Mon, 17 Jun 2019 20:54:04 +0300 Subject: [PATCH 12/24] fix small bugs --- host_vars/all.yml.example | 3 ++- roles/main_software/tasks/main.yml | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/host_vars/all.yml.example b/host_vars/all.yml.example index 563365c..96295fa 100644 --- a/host_vars/all.yml.example +++ b/host_vars/all.yml.example @@ -31,7 +31,8 @@ env_vars: #LINK_TO_OTHER_EXPLORERS: "false" # If true, links to other explorers are added in the footer #USE_PLACEMENT_GROUP: "false" # If true, BlockScout instance will be created in the placement group ##The following variables are optional - #SUPPORTED_CHAINS: '[{ "title": "POA Core", "url": "https://blockscout.com/poa/core" }]' # JSON array with links to other exporers + ## SUPPORTED_CHAINS variable shoud have space before main content. This is due to the Ansible variable interpretation bug + #SUPPORTED_CHAINS: ' [{ "title": "POA Core", "url": "https://blockscout.com/poa/core" }]' # JSON array with links to other exporers #FIRST_BLOCK: 0 # The block number, where indexing begins from. #COINMARKETCAP_PAGES: 10 # Sets the number of pages at Coinmarketcap to search coin at. Defaults to 10 #METADATA_CONTRACT: # Address of metadata smart contract. Used by POA Network to obtain Validators information to display in the UI diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index 9bde84c..e7fda0d 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -68,7 +68,7 @@ - name: Fetch environment variables (via profile) set_fact: - env_compiled: "{{ lookup('aws_ssm', path, aws_profile=aws_profile, shortnames=true, bypath=true, recursive=true ) }}" + env_compiled: "{{ lookup('aws_ssm', path, region=aws_region|default('us-east-1'), aws_profile=aws_profile, shortnames=true, bypath=true, recursive=true ) }}" vars: path: "/{{ group_names[0] }}/{{ chain }}" when: aws_access_key is undefined @@ -262,7 +262,7 @@ - name: Update chain variables aws_ssm_parameter_store: name: "/{{ group_names[0] }}/{{ chain }}/{{ item.key }}" - value: " {{ item.value }} " + value: "{{ item.value }}" profile: "{{ profile }}" aws_access_key: "{{ access_key }}" aws_secret_key: "{{ secret_key }}" @@ -291,7 +291,7 @@ - deploy - name: Upload Blockscout to S3 - command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ group_names[0] }}-explorer --s3-location s3://{{ group_names[0] }}-explorer-codedeploy-releases/blockscout-{{ group_names[0] }}-{{ chain }}.zip --source=/tmp/blockscout-{{ group_names[0] }}-{{ chain }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ group_names[0] }}-explorer --s3-location s3://{{ group_names[0] }}-explorer-codedeploy-releases/blockscout-{{ group_names[0] }}-{{ chain }}.zip --source=/tmp/blockscout-{{ group_names[0] }}-{{ chain }} {{ '--profile='~aws_profile~' --region='~aws_region if aws_profile is defined else '' }}" register: push_output when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool tags: @@ -299,14 +299,14 @@ - name: Upload output debug: - msg: "If deployment will fail, you can try to deploy blockscout manually using the following commands: {{ 'AWS_ACCESS_KEY=XXXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXX AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + msg: "If deployment will fail, you can try to deploy blockscout manually using the following commands: {{ 'AWS_ACCESS_KEY=XXXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXX AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines }} {{ '--profile='~aws_profile~' --region'~aws_region if aws_profile is defined else '' }}" when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool tags: - deploy - name: Deploy Blockscout - command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ group_names[0] }}-explorer-dg{{ groups[group_names[0]].index(inventory_hostname) }} --deployment-config-name CodeDeployDefault.OneAtATime {{ '--profile='~aws_profile if aws_profile is defined else '' }}" + command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ group_names[0] }}-explorer-dg{{ groups[group_names[0]].index(inventory_hostname) }} --deployment-config-name CodeDeployDefault.OneAtATime {{ '--profile='~aws_profile~' --region='~aws_region if aws_profile is defined else '' }}" when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool tags: - deploy From 620d14ba65149ad4cd359e43a6aff71c942180ee Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Sun, 23 Jun 2019 16:50:04 +0300 Subject: [PATCH 13/24] Allows user to define BUILD_ variables --- host_vars/all.yml.example | 2 +- roles/main_software/tasks/main.yml | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/host_vars/all.yml.example b/host_vars/all.yml.example index 96295fa..3a98457 100644 --- a/host_vars/all.yml.example +++ b/host_vars/all.yml.example @@ -47,4 +47,4 @@ env_vars: #SPANDEX_SYNC_THRESHOLD: # Spandex and Datadog configuration setting. #BLOCK_COUNT_CACHE_PERIOD: 600 #Time to live of block count cache in milliseconds #ALLOWED_EVM_VERSIONS: "homestead, tangerineWhistle, spuriousDragon, byzantium, constantinople, petersburg" # the comma-separated list of allowed EVM versions for contracts verification - + #BUILD_* - redefine variables with BUILD_ prefix to override parameters used for building the dev server diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index e7fda0d..51a3337 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -115,6 +115,13 @@ server_env: "{{ upper_env | combine({'NETWORK_PATH':'/','PORT':server_port}) }}" tags: - build + +- name: Override build variables + set_fact: + server_env: "{{ server_env | combine({item.key|regex_replace('BUILD_'):item.value}) if item.key | search('BUILD_') else server_env }}" + with_dict: "{{ server_env }}" + tags: + - build - name: Compile BlockScout command: "mix do {{ item }}" From 4ecac2083f201fb66d47125d834c3edd3d6f4c07 Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Sun, 23 Jun 2019 16:51:34 +0300 Subject: [PATCH 14/24] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 655fdbd..031d35f 100644 --- a/README.md +++ b/README.md @@ -83,6 +83,7 @@ In order to deploy BlockScout, you will have to setup the following set of files - `ansible_host` - is an address where BlockScout will be built. If this variable is set to localhost, also set `ansible_connection` to `local` for better performance. - `chain` variable set the name of the network (Kovan, Core, xDAI, etc.). Will be used as part of the infrastructure resource names. - `env_vars` represents a set of environment variables used by BlockScout. You can see the description of this variables at [POA Forum](https://forum.poa.network/t/faq-blockscout-environment-variables/1814). + - Also One can define `BULD_*` set of the variables, where asterisk stands for any environment variables. All variables defined with `BUILD_*` will override default variables while building the dev server. - `aws_access_key` and `aws_secret_key` is a credentials pair that provides access to AWS for the deployer; You can use the `aws_profile` instead. In that case, AWS CLI profile will be used. Also, if none of the access key and profile provided, the `default` AWS profile will be used. The `aws_region` should be left at `us-east-1` as some of the other regions fail for different reasons; - `backend` variable defines whether deployer should keep state files remote or locally. Set `backend` variable to `true` if you want to save state file to the remote S3 bucket; - `upload_config_to_s3` - set to `true` if you want to upload config `all.yml` file to the S3 bucket automatically after the deployment. Will not work if `backend` is set to false; From dac2b7dcf55a8cc54c3961f5f212d2da4706796d Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Fri, 28 Jun 2019 13:23:11 +0300 Subject: [PATCH 15/24] fix conditional bug and variables --- group_vars/all.yml.example | 6 ------ roles/main_software/tasks/main.yml | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/group_vars/all.yml.example b/group_vars/all.yml.example index d138603..9a039ad 100644 --- a/group_vars/all.yml.example +++ b/group_vars/all.yml.example @@ -1,9 +1,3 @@ -blockscout_repo: https://github.com/poanetwork/blockscout - -ps_host: localhost -ps_user: myuser -ps_password: mypass - # System variables ansible_python_interpreter: "/usr/bin/python3" diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index 51a3337..9d3cec9 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -280,7 +280,7 @@ profile: "{{ aws_profile|default(omit) }}" region: "{{ aws_region|default(omit) }}" with_dict: "{{ lower_env }}" - when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool + when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool and lower_env is defined tags: - update_vars From 3110fd8b29dfae302324fd23866e3711014fd3a2 Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Fri, 28 Jun 2019 13:46:05 +0300 Subject: [PATCH 16/24] Improve readme: add info about tagging --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 031d35f..90e5424 100644 --- a/README.md +++ b/README.md @@ -215,7 +215,11 @@ Also, if you need to **distribute variables accross all the hosts/groups**, you 8. When the prompt appears, check that server is running and there is no visual artifacts. The server will be launched at port 4000 at the same machine where you run the Ansible playbooks. If you face any errors you can either fix it or cancel the deployment by pressing **Ctrl+C** and then pressing **A** when additionally prompted. 9. When server is ready to be deployed simply press enter and deployer will upload Blockscout to the appropriate S3. 10. Two other prompts will appear to ensure your will on updating the Parameter Store variables and deploying the BlockScout through the CodeDeploy. Both **yes** and **true** will be interpreted as the confirmation. -11. Monitor and manage your deployment at [CodeDeploy](https://console.aws.amazon.com/codesuite/codedeploy/applications) service page at AWS Console. +11. (optional) If the deployment fails, you can use the following tags to repeat the particular steps of the deployment: +- build +- update_vars +- deploy +12. Monitor and manage your deployment at [CodeDeploy](https://console.aws.amazon.com/codesuite/codedeploy/applications) service page at AWS Console. # Destroying Provisioned Infrastructure From f2d7456cd1320da8027d99a3f6940cc11b28f98e Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Tue, 2 Jul 2019 17:45:27 +0300 Subject: [PATCH 17/24] Initial TF12 commit --- .gitignore | 4 - roles/main_infra/files/deploy.tf | 15 ++- roles/main_infra/files/dns.tf | 15 ++- roles/main_infra/files/hosts.tf | 120 +++++++++++++++++ roles/main_infra/files/outputs.tf | 20 ++- roles/main_infra/files/provider.tf | 8 ++ roles/main_infra/files/rds.tf | 62 ++++----- roles/main_infra/files/routing.tf | 73 +++++++++++ roles/main_infra/files/security.tf | 71 +++++----- roles/main_infra/files/ssh.tf | 7 +- roles/main_infra/files/subnets.tf | 41 +++--- roles/main_infra/files/variables.tf | 46 +++++-- roles/main_infra/files/versions.tf | 4 + roles/main_infra/files/vpc.tf | 22 ++-- roles/main_infra/tasks/main.yml | 8 +- roles/main_infra/templates/hosts.tf.j2 | 122 ------------------ roles/main_infra/templates/provider.tf.j2 | 7 - roles/main_infra/templates/routing.tf.j2 | 76 ----------- .../main_infra/templates/terraform.tfvars.j2 | 11 ++ 19 files changed, 389 insertions(+), 343 deletions(-) create mode 100644 roles/main_infra/files/hosts.tf create mode 100644 roles/main_infra/files/provider.tf create mode 100644 roles/main_infra/files/routing.tf create mode 100644 roles/main_infra/files/versions.tf delete mode 100644 roles/main_infra/templates/hosts.tf.j2 delete mode 100644 roles/main_infra/templates/provider.tf.j2 delete mode 100644 roles/main_infra/templates/routing.tf.j2 diff --git a/.gitignore b/.gitignore index 4cf995b..2d06351 100644 --- a/.gitignore +++ b/.gitignore @@ -6,11 +6,7 @@ log.txt *terraform.tfstate.d* *tfplan* roles/main_infra/files/backend.tfvars -roles/main_infra/files/remote-backend-selector.tf roles/main_infra/files/terraform.tfvars -roles/main_infra/files/hosts.tf -roles/main_infra/files/routing.tf -roles/main_infra/files/provider.tf *.backup # Sensitive information diff --git a/roles/main_infra/files/deploy.tf b/roles/main_infra/files/deploy.tf index 6f1ab29..2f9c849 100644 --- a/roles/main_infra/files/deploy.tf +++ b/roles/main_infra/files/deploy.tf @@ -1,6 +1,6 @@ resource "aws_s3_bucket" "explorer_releases" { - bucket = "${var.prefix}-explorer-codedeploy-releases" - acl = "private" + bucket = "${var.prefix}-explorer-codedeploy-releases" + acl = "private" force_destroy = "true" versioning { @@ -13,11 +13,11 @@ resource "aws_codedeploy_app" "explorer" { } resource "aws_codedeploy_deployment_group" "explorer" { - count = "${length(var.chains)}" - app_name = "${aws_codedeploy_app.explorer.name}" + count = length(var.chains) + app_name = aws_codedeploy_app.explorer.name deployment_group_name = "${var.prefix}-explorer-dg${count.index}" - service_role_arn = "${aws_iam_role.deployer.arn}" - autoscaling_groups = ["${aws_launch_configuration.explorer.name}-asg-${element(var.chains,count.index)}"] + service_role_arn = aws_iam_role.deployer.arn + autoscaling_groups = ["${aws_launch_configuration.explorer.name}-asg-${element(var.chains, count.index)}"] deployment_style { deployment_option = "WITH_TRAFFIC_CONTROL" @@ -26,7 +26,7 @@ resource "aws_codedeploy_deployment_group" "explorer" { load_balancer_info { target_group_info { - name = "${aws_lb_target_group.explorer.*.name[count.index]}" + name = aws_lb_target_group.explorer[count.index].name } } @@ -46,3 +46,4 @@ resource "aws_codedeploy_deployment_group" "explorer" { } } } + diff --git a/roles/main_infra/files/dns.tf b/roles/main_infra/files/dns.tf index 2ad81f1..27595a5 100644 --- a/roles/main_infra/files/dns.tf +++ b/roles/main_infra/files/dns.tf @@ -1,24 +1,25 @@ # Internal DNS Zone resource "aws_route53_zone" "main" { name = "${var.prefix}.${var.dns_zone_name}" - vpc_id = "${aws_vpc.vpc.id}" + vpc_id = aws_vpc.vpc.id - tags { - prefix = "${var.prefix}" + tags = { + prefix = var.prefix origin = "terraform" } } # Private DNS records resource "aws_route53_record" "db" { - zone_id = "${aws_route53_zone.main.zone_id}" + zone_id = aws_route53_zone.main.zone_id name = "db${count.index}" type = "A" - count = "${length(var.chains)}" + count = length(var.chains) alias { - name = "${aws_db_instance.default.*.address[count.index]}" - zone_id = "${aws_db_instance.default.*.hosted_zone_id[count.index]}" + name = aws_db_instance.default[count.index].address + zone_id = aws_db_instance.default[count.index].hosted_zone_id evaluate_target_health = false } } + diff --git a/roles/main_infra/files/hosts.tf b/roles/main_infra/files/hosts.tf new file mode 100644 index 0000000..8fd3f49 --- /dev/null +++ b/roles/main_infra/files/hosts.tf @@ -0,0 +1,120 @@ +data "aws_ami" "explorer" { + most_recent = true + + filter { + name = "name" + values = ["amzn2-ami-*-x86_64-gp2"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "owner-alias" + values = ["amazon"] + } +} + +resource "aws_launch_configuration" "explorer" { + name_prefix = "${var.prefix}-explorer-launchconfig" + image_id = data.aws_ami.explorer.id + instance_type = var.instance_type + security_groups = [aws_security_group.app.id] + key_name = var.key_name + iam_instance_profile = aws_iam_instance_profile.explorer.id + associate_public_ip_address = false + + depends_on = [aws_db_instance.default] + + user_data = file("${path.module}/libexec/init.sh") + + root_block_device { + volume_size = var.root_block_size + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_placement_group" "explorer" { + count = "var.use_placement_group[var.chains[count.index]] ? 1 : 0" + name = "${var.prefix}-${var.chains[count.index]}-explorer-pg" + strategy = "cluster" +} + +resource "aws_autoscaling_group" "explorer" { + count = "length(var.chains)" + name = "${var.prefix}-${var.chains[count.index]}-asg" + max_size = "4" + min_size = "1" + desired_capacity = "1" + launch_configuration = aws_launch_configuration.explorer.name + vpc_zone_identifier = [aws_subnet.default.id] + availability_zones = data.aws_availability_zones.available.names + target_group_arns = [aws_lb_target_group.explorer[0].arn] + placement_group = "${var.prefix}-${var.chains[count.index]}-explorer-pg : null" + + # Health checks are performed by CodeDeploy hooks + health_check_type = "EC2" + + enabled_metrics = [ + "GroupMinSize", + "GroupMaxSize", + "GroupDesiredCapacity", + "GroupInServiceInstances", + "GroupTotalInstances", + ] + + depends_on = [ + aws_ssm_parameter.db_host, + aws_ssm_parameter.db_name, + aws_ssm_parameter.db_port, + aws_ssm_parameter.db_username, + aws_ssm_parameter.db_password, + aws_placement_group.explorer + ] + + lifecycle { + create_before_destroy = true + } + + tag { + key = "prefix" + value = var.prefix + propagate_at_launch = true + } + + tag { + key = "chain" + value = var.chains[count.index] + propagate_at_launch = true + } + + tag { + key = "Name" + value = "${var.chains[count.index]} Application" + propagate_at_launch = true + } +} + +# TODO: These autoscaling policies are not currently wired up to any triggers +resource "aws_autoscaling_policy" "explorer-up" { + count = "length(var.chains)" + name = "${var.prefix}-${var.chains[count.index]}-explorer-autoscaling-policy-up" + autoscaling_group_name = aws_autoscaling_group.explorer[count.index].name + adjustment_type = "ChangeInCapacity" + scaling_adjustment = 1 + cooldown = 300 +} + +resource "aws_autoscaling_policy" "explorer-down" { + name = "${var.prefix}-${var.chains[count.index]}-explorer-autoscaling-policy-down" + autoscaling_group_name = aws_autoscaling_group.explorer[count.index].name + adjustment_type = "ChangeInCapacity" + scaling_adjustment = -1 + cooldown = 300 +} + diff --git a/roles/main_infra/files/outputs.tf b/roles/main_infra/files/outputs.tf index 760f648..8ac81f1 100644 --- a/roles/main_infra/files/outputs.tf +++ b/roles/main_infra/files/outputs.tf @@ -11,7 +11,13 @@ To deploy a new version of the application manually: 2) Follow the instructions in the output from the `aws deploy push` command to deploy the uploaded application. Use the deployment group names shown below: - - ${join("\n - ", formatlist("%s", aws_codedeploy_deployment_group.explorer.*.deployment_group_name))} + - ${join( +"\n - ", +formatlist( +"%s", +aws_codedeploy_deployment_group.explorer.*.deployment_group_name, +), +)} You will also need to specify a deployment config name. Example: @@ -25,7 +31,15 @@ To deploy a new version of the application manually: 4) Once the deployment is complete, you can access each chain explorer from its respective url: - - ${join("\n - ", formatlist("%s: %s", keys(zipmap(var.chains, aws_lb.explorer.*.dns_name)), values(zipmap(var.chains, aws_lb.explorer.*.dns_name))))} + - ${join( +"\n - ", +formatlist( +"%s: %s", +keys(zipmap(var.chains, aws_lb.explorer.*.dns_name)), +values(zipmap(var.chains, aws_lb.explorer.*.dns_name)), +), +)} OUTPUT -} + + } diff --git a/roles/main_infra/files/provider.tf b/roles/main_infra/files/provider.tf new file mode 100644 index 0000000..6125fde --- /dev/null +++ b/roles/main_infra/files/provider.tf @@ -0,0 +1,8 @@ +provider "aws" { + version = "~> 2.17" + profile = var.aws_profile + access_key = var.aws_access_key + secret_key = var.aws_secret_key + region = var.aws_region +} + diff --git a/roles/main_infra/files/rds.tf b/roles/main_infra/files/rds.tf index de6ea70..c65ed25 100644 --- a/roles/main_infra/files/rds.tf +++ b/roles/main_infra/files/rds.tf @@ -1,61 +1,61 @@ resource "aws_ssm_parameter" "db_host" { - count = "${length(var.chains)}" - name = "/${var.prefix}/${element(var.chains,count.index)}/db_host" - value = "${aws_route53_record.db.*.fqdn[count.index]}" + count = length(var.chains) + name = "/${var.prefix}/${element(var.chains, count.index)}/db_host" + value = aws_route53_record.db[count.index].fqdn type = "String" } resource "aws_ssm_parameter" "db_port" { - count = "${length(var.chains)}" - name = "/${var.prefix}/${element(var.chains,count.index)}/db_port" - value = "${aws_db_instance.default.*.port[count.index]}" + count = length(var.chains) + name = "/${var.prefix}/${element(var.chains, count.index)}/db_port" + value = aws_db_instance.default[count.index].port type = "String" } resource "aws_ssm_parameter" "db_name" { - count = "${length(var.chains)}" - name = "/${var.prefix}/${element(var.chains,count.index)}/db_name" - value = "${lookup(var.chain_db_name,element(var.chains,count.index))}" + count = length(var.chains) + name = "/${var.prefix}/${element(var.chains, count.index)}/db_name" + value = var.chain_db_name[element(var.chains, count.index)] type = "String" } resource "aws_ssm_parameter" "db_username" { - count = "${length(var.chains)}" - name = "/${var.prefix}/${element(var.chains,count.index)}/db_username" - value = "${lookup(var.chain_db_username,element(var.chains,count.index))}" + count = length(var.chains) + name = "/${var.prefix}/${element(var.chains, count.index)}/db_username" + value = var.chain_db_username[element(var.chains, count.index)] type = "String" } resource "aws_ssm_parameter" "db_password" { - count = "${length(var.chains)}" - name = "/${var.prefix}/${element(var.chains,count.index)}/db_password" - value = "${lookup(var.chain_db_password,element(var.chains,count.index))}" + count = length(var.chains) + name = "/${var.prefix}/${element(var.chains, count.index)}/db_password" + value = var.chain_db_password[element(var.chains, count.index)] type = "String" } resource "aws_db_instance" "default" { - count = "${length(var.chains)}" - name = "${lookup(var.chain_db_name,element(var.chains,count.index))}" - identifier = "${var.prefix}-${lookup(var.chain_db_id,element(var.chains,count.index))}" + count = length(var.chains) + name = var.chain_db_name[element(var.chains, count.index)] + identifier = "${var.prefix}-${var.chain_db_id[element(var.chains, count.index)]}" engine = "postgres" - engine_version = "${lookup(var.chain_db_version,element(var.chains,count.index))}" - instance_class = "${lookup(var.chain_db_instance_class,element(var.chains,count.index))}" - storage_type = "${lookup(var.chain_db_storage_type,element(var.chains,count.index))}" - allocated_storage = "${lookup(var.chain_db_storage,element(var.chains,count.index))}" + engine_version = var.chain_db_version[element(var.chains, count.index)] + instance_class = var.chain_db_instance_class[element(var.chains, count.index)] + storage_type = var.chain_db_storage_type[element(var.chains, count.index)] + allocated_storage = var.chain_db_storage[element(var.chains, count.index)] copy_tags_to_snapshot = true skip_final_snapshot = true - username = "${lookup(var.chain_db_username,element(var.chains,count.index))}" - password = "${lookup(var.chain_db_password,element(var.chains,count.index))}" - vpc_security_group_ids = ["${aws_security_group.database.id}"] - db_subnet_group_name = "${aws_db_subnet_group.database.id}" + username = var.chain_db_username[element(var.chains, count.index)] + password = var.chain_db_password[element(var.chains, count.index)] + vpc_security_group_ids = [aws_security_group.database.id] + db_subnet_group_name = aws_db_subnet_group.database.id apply_immediately = true - iops = "${lookup(var.chain_db_iops,element(var.chains,count.index),"0")}" + iops = lookup(var.chain_db_iops, element(var.chains, count.index), "0") + depends_on = [aws_security_group.database] - depends_on = ["aws_security_group.database"] - - tags { - prefix = "${var.prefix}" + tags = { + prefix = var.prefix origin = "terraform" } } + diff --git a/roles/main_infra/files/routing.tf b/roles/main_infra/files/routing.tf new file mode 100644 index 0000000..784ad91 --- /dev/null +++ b/roles/main_infra/files/routing.tf @@ -0,0 +1,73 @@ +# Create a gateway to provide access to the outside world +resource "aws_internet_gateway" "default" { + vpc_id = aws_vpc.vpc.id + + tags = { + prefix = var.prefix + origin = "terraform" + } +} + +# Grant the VPC internet access in its main route table +resource "aws_route" "internet_access" { + route_table_id = aws_vpc.vpc.main_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.default.id +} + +# The ALB for the app server +resource "aws_lb" "explorer" { + count = length(var.chains) + name = "${var.prefix}-explorer-${element(var.chains, count.index)}-alb" + internal = false + load_balancer_type = "application" + security_groups = [aws_security_group.alb.id] + subnets = [aws_subnet.default.id, aws_subnet.alb.id] + + enable_deletion_protection = false + + tags = { + prefix = var.prefix + origin = "terraform" + } +} + +# The Target Group for the ALB +resource "aws_lb_target_group" "explorer" { + count = length(var.chains) + name = "${var.prefix}-explorer-${element(var.chains, count.index)}-alb-target" + port = 4000 + protocol = "HTTP" + vpc_id = aws_vpc.vpc.id + tags = { + prefix = var.prefix + origin = "terraform" + } + stickiness { + type = "lb_cookie" + cookie_duration = 600 + enabled = true + } + health_check { + healthy_threshold = 2 + unhealthy_threshold = 2 + timeout = 15 + interval = 30 + path = "/blocks" + port = 4000 + } +} + +resource "aws_alb_listener" "alb_listener" { + count = "length(var.chains)" + load_balancer_arn = aws_lb.explorer[count.index].arn + port = var.use_ssl[element(var.chains, count.index)] ? "443" : "80" + protocol = var.use_ssl[element(var.chains, count.index)] ? "HTTPS" : "HTTP" + ssl_policy = var.use_ssl[element(var.chains, count.index)] ? var.alb_ssl_policy[element(var.chains, count.index)] : null + certificate_arn = var.use_ssl[element(var.chains, count.index)] ? var.alb_certificate_arn[element(var.chains, count.index)] : null + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.explorer[count.index].arn + } +} + diff --git a/roles/main_infra/files/security.tf b/roles/main_infra/files/security.tf index a461061..169dd8e 100644 --- a/roles/main_infra/files/security.tf +++ b/roles/main_infra/files/security.tf @@ -63,7 +63,7 @@ data "aws_iam_policy_document" "codedeploy-policy" { actions = ["s3:Get*", "s3:List*"] resources = [ - "${aws_s3_bucket.explorer_releases.arn}", + aws_s3_bucket.explorer_releases.arn, "${aws_s3_bucket.explorer_releases.arn}/*", "arn:aws:s3:::aws-codedeploy-us-east-1/*", "arn:aws:s3:::aws-codedeploy-us-east-2/*", @@ -90,38 +90,38 @@ data "aws_iam_policy" "AmazonEC2RoleForSSM" { } resource "aws_iam_role_policy_attachment" "ec2-codedeploy-policy-attachment" { - role = "${aws_iam_role.role.name}" - policy_arn = "${data.aws_iam_policy.AmazonEC2RoleForAWSCodeDeploy.arn}" + role = aws_iam_role.role.name + policy_arn = data.aws_iam_policy.AmazonEC2RoleForAWSCodeDeploy.arn } resource "aws_iam_role_policy_attachment" "ec2-ssm-policy-attachment" { - role = "${aws_iam_role.role.name}" - policy_arn = "${data.aws_iam_policy.AmazonEC2RoleForSSM.arn}" + role = aws_iam_role.role.name + policy_arn = data.aws_iam_policy.AmazonEC2RoleForSSM.arn } resource "aws_iam_instance_profile" "explorer" { name = "${var.prefix}-explorer-profile" - role = "${aws_iam_role.role.name}" + role = aws_iam_role.role.name path = "/${var.prefix}/" } resource "aws_iam_role_policy" "config" { name = "${var.prefix}-config-policy" - role = "${aws_iam_role.role.id}" - policy = "${data.aws_iam_policy_document.config-policy.json}" + role = aws_iam_role.role.id + policy = data.aws_iam_policy_document.config-policy.json } resource "aws_iam_role" "role" { name = "${var.prefix}-explorer-role" description = "The IAM role given to each Explorer instance" path = "/${var.prefix}/" - assume_role_policy = "${data.aws_iam_policy_document.instance-assume-role-policy.json}" + assume_role_policy = data.aws_iam_policy_document.instance-assume-role-policy.json } resource "aws_iam_role_policy" "deployer" { name = "${var.prefix}-codedeploy-policy" - role = "${aws_iam_role.deployer.id}" - policy = "${data.aws_iam_policy_document.codedeploy-policy.json}" + role = aws_iam_role.deployer.id + policy = data.aws_iam_policy_document.codedeploy-policy.json } data "aws_iam_policy" "AWSCodeDeployRole" { @@ -129,21 +129,21 @@ data "aws_iam_policy" "AWSCodeDeployRole" { } resource "aws_iam_role_policy_attachment" "codedeploy-policy-attachment" { - role = "${aws_iam_role.deployer.name}" - policy_arn = "${data.aws_iam_policy.AWSCodeDeployRole.arn}" + role = aws_iam_role.deployer.name + policy_arn = data.aws_iam_policy.AWSCodeDeployRole.arn } resource "aws_iam_role" "deployer" { name = "${var.prefix}-deployer-role" description = "The IAM role given to the CodeDeploy service" - assume_role_policy = "${data.aws_iam_policy_document.deployer-assume-role-policy.json}" + assume_role_policy = data.aws_iam_policy_document.deployer-assume-role-policy.json } # A security group for the ALB so it is accessible via the web resource "aws_security_group" "alb" { name = "${var.prefix}-poa-alb" description = "A security group for the app server ALB, so it is accessible via the web" - vpc_id = "${aws_vpc.vpc.id}" + vpc_id = aws_vpc.vpc.id # HTTP from anywhere ingress { @@ -152,11 +152,11 @@ resource "aws_security_group" "alb" { protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] } - + ingress { - from_port = 4000 - to_port = 4000 - protocol = "tcp" + from_port = 4000 + to_port = 4000 + protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] } @@ -176,8 +176,8 @@ resource "aws_security_group" "alb" { cidr_blocks = ["0.0.0.0/0"] } - tags { - prefix = "${var.prefix}" + tags = { + prefix = var.prefix origin = "terraform" } } @@ -185,21 +185,21 @@ resource "aws_security_group" "alb" { resource "aws_security_group" "app" { name = "${var.prefix}-poa-app" description = "A security group for the app server, allowing SSH and HTTP(S)" - vpc_id = "${aws_vpc.vpc.id}" + vpc_id = aws_vpc.vpc.id # HTTP from the VPC ingress { from_port = 80 to_port = 80 protocol = "tcp" - cidr_blocks = ["${var.vpc_cidr}"] + cidr_blocks = [var.vpc_cidr] } - + ingress { - from_port = 4000 - to_port = 4000 - protocol = "tcp" - cidr_blocks = ["${var.vpc_cidr}"] + from_port = 4000 + to_port = 4000 + protocol = "tcp" + cidr_blocks = [var.vpc_cidr] } # HTTPS from the VPC @@ -207,7 +207,7 @@ resource "aws_security_group" "app" { from_port = 443 to_port = 443 protocol = "tcp" - cidr_blocks = ["${var.vpc_cidr}"] + cidr_blocks = [var.vpc_cidr] } # SSH from anywhere @@ -226,8 +226,8 @@ resource "aws_security_group" "app" { cidr_blocks = ["0.0.0.0/0"] } - tags { - prefix = "${var.prefix}" + tags = { + prefix = var.prefix origin = "terraform" } } @@ -235,14 +235,14 @@ resource "aws_security_group" "app" { resource "aws_security_group" "database" { name = "${var.prefix}-poa-database" description = "Allow any inbound traffic from public/private subnet" - vpc_id = "${aws_vpc.vpc.id}" + vpc_id = aws_vpc.vpc.id # Allow anything from within the app server subnet ingress { from_port = 0 to_port = 65535 protocol = "tcp" - cidr_blocks = ["${var.public_subnet_cidr}"] + cidr_blocks = [var.public_subnet_cidr] } # Unrestricted outbound @@ -253,8 +253,9 @@ resource "aws_security_group" "database" { cidr_blocks = ["0.0.0.0/0"] } - tags { - prefix = "${var.prefix}" + tags = { + prefix = var.prefix origin = "terraform" } } + diff --git a/roles/main_infra/files/ssh.tf b/roles/main_infra/files/ssh.tf index 6410bad..af2a92d 100644 --- a/roles/main_infra/files/ssh.tf +++ b/roles/main_infra/files/ssh.tf @@ -1,5 +1,6 @@ resource "aws_key_pair" "blockscout" { - count = "${var.key_content == "" ? 0 : 1}" - key_name = "${var.key_name}" - public_key = "${var.key_content}" + count = var.key_content == "" ? 0 : 1 + key_name = var.key_name + public_key = var.key_content } + diff --git a/roles/main_infra/files/subnets.tf b/roles/main_infra/files/subnets.tf index 069a105..6cec93c 100644 --- a/roles/main_infra/files/subnets.tf +++ b/roles/main_infra/files/subnets.tf @@ -1,44 +1,44 @@ ## Public subnet resource "aws_subnet" "default" { - vpc_id = "${aws_vpc.vpc.id}" - cidr_block = "${var.public_subnet_cidr}" - availability_zone = "${data.aws_availability_zones.available.names[0]}" + vpc_id = aws_vpc.vpc.id + cidr_block = var.public_subnet_cidr + availability_zone = data.aws_availability_zones.available.names[0] map_public_ip_on_launch = true - tags { + tags = { Name = "${var.prefix}-default-subnet" - prefix = "${var.prefix}" + prefix = var.prefix origin = "terraform" } } ## ALB subnet resource "aws_subnet" "alb" { - vpc_id = "${aws_vpc.vpc.id}" - cidr_block = "${var.public_subnet_cidr}" - cidr_block = "${cidrsubnet(var.db_subnet_cidr, 5, 1)}" - availability_zone = "${data.aws_availability_zones.available.names[1]}" + vpc_id = aws_vpc.vpc.id + cidr_block = var.public_subnet_cidr + cidr_block = cidrsubnet(var.db_subnet_cidr, 5, 1) + availability_zone = data.aws_availability_zones.available.names[1] map_public_ip_on_launch = true - tags { + tags = { Name = "${var.prefix}-default-subnet" - prefix = "${var.prefix}" + prefix = var.prefix origin = "terraform" } } ## Database subnet resource "aws_subnet" "database" { - count = "${length(data.aws_availability_zones.available.names)}" - vpc_id = "${aws_vpc.vpc.id}" - cidr_block = "${cidrsubnet(var.db_subnet_cidr, 8, 1 + count.index)}" - availability_zone = "${data.aws_availability_zones.available.names[count.index]}" + count = length(data.aws_availability_zones.available.names) + vpc_id = aws_vpc.vpc.id + cidr_block = cidrsubnet(var.db_subnet_cidr, 8, 1 + count.index) + availability_zone = data.aws_availability_zones.available.names[count.index] map_public_ip_on_launch = false - tags { + tags = { Name = "${var.prefix}-database-subnet${count.index}" - prefix = "${var.prefix}" + prefix = var.prefix origin = "terraform" } } @@ -46,10 +46,11 @@ resource "aws_subnet" "database" { resource "aws_db_subnet_group" "database" { name = "${var.prefix}-database" description = "The group of database subnets" - subnet_ids = ["${aws_subnet.database.*.id}"] + subnet_ids = aws_subnet.database.*.id - tags { - prefix = "${var.prefix}" + tags = { + prefix = var.prefix origin = "terraform" } } + diff --git a/roles/main_infra/files/variables.tf b/roles/main_infra/files/variables.tf index d9b97a5..4e1f18a 100644 --- a/roles/main_infra/files/variables.tf +++ b/roles/main_infra/files/variables.tf @@ -1,14 +1,37 @@ -variable "prefix" {} -variable "key_name" {} -variable "vpc_cidr" {} -variable "public_subnet_cidr" {} -variable "db_subnet_cidr" {} -variable "dns_zone_name" {} -variable "instance_type" {} -variable "root_block_size" {} +variable "aws_profile" { + default = "null" +} + +variable "aws_region" { + default = "null" +} + +variable "prefix" { +} + +variable "key_name" { +} + +variable "vpc_cidr" { +} + +variable "public_subnet_cidr" { +} + +variable "db_subnet_cidr" { +} + +variable "dns_zone_name" { +} + +variable "instance_type" { +} + +variable "root_block_size" { +} variable "pool_size" { - default = {} + default = {} } variable "use_placement_group" { @@ -60,7 +83,7 @@ variable "chain_db_version" { } variable "secret_key_base" { - default = {} + default = {} } variable "alb_ssl_policy" { @@ -72,5 +95,6 @@ variable "alb_certificate_arn" { } variable "use_ssl" { - default = {} + default = {} } + diff --git a/roles/main_infra/files/versions.tf b/roles/main_infra/files/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/roles/main_infra/files/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/roles/main_infra/files/vpc.tf b/roles/main_infra/files/vpc.tf index 9794917..ddd1ba9 100644 --- a/roles/main_infra/files/vpc.tf +++ b/roles/main_infra/files/vpc.tf @@ -6,31 +6,33 @@ # - A private subnet # - NAT to give the private subnet access to internet -data "aws_availability_zones" "available" {} +data "aws_availability_zones" "available" { +} resource "aws_vpc" "vpc" { - cidr_block = "${var.vpc_cidr}" + cidr_block = var.vpc_cidr enable_dns_hostnames = true enable_dns_support = true - tags { - Name = "${var.prefix}" - prefix = "${var.prefix}" + tags = { + Name = var.prefix + prefix = var.prefix origin = "terraform" } } resource "aws_vpc_dhcp_options" "poa_dhcp" { - domain_name = "${var.dns_zone_name}" + domain_name = var.dns_zone_name domain_name_servers = ["AmazonProvidedDNS"] - tags { - prefix = "${var.prefix}" + tags = { + prefix = var.prefix origin = "terraform" } } resource "aws_vpc_dhcp_options_association" "poa_dhcp" { - vpc_id = "${aws_vpc.vpc.id}" - dhcp_options_id = "${aws_vpc_dhcp_options.poa_dhcp.id}" + vpc_id = aws_vpc.vpc.id + dhcp_options_id = aws_vpc_dhcp_options.poa_dhcp.id } + diff --git a/roles/main_infra/tasks/main.yml b/roles/main_infra/tasks/main.yml index 9eb203e..d7f674c 100644 --- a/roles/main_infra/tasks/main.yml +++ b/roles/main_infra/tasks/main.yml @@ -51,12 +51,6 @@ - "/tmp/files-{{ group_names[0] }}/terraform.tfstate.backup" - "/tmp/files-{{ group_names[0] }}/terraform.tfplan" -- name: Generate Terraform files - template: - src: "{{ item.key }}" - dest: "{{ item.value }}" - with_dict: { hosts.tf.j2: "/tmp/files-{{ group_names[0] }}/hosts.tf", routing.tf.j2: "/tmp/files-{{ group_names[0] }}/routing.tf", provider.tf.j2: "/tmp/files-{{ group_names[0] }}/provider.tf" } - #Workaround since terraform module return unexpected error. - name: Terraform plan construct shell: "echo yes | {{ terraform_location }} {{ item }}" @@ -66,7 +60,7 @@ with_items: - "init{{ ' -backend-config=backend.tfvars' if backend|bool == true else '' }}" - plan -out terraform.tfplan - - show terraform.tfplan -no-color + - show terraform.tfplan -json -no-color - name: Show Terraform plan debug: diff --git a/roles/main_infra/templates/hosts.tf.j2 b/roles/main_infra/templates/hosts.tf.j2 deleted file mode 100644 index 1b3bd7d..0000000 --- a/roles/main_infra/templates/hosts.tf.j2 +++ /dev/null @@ -1,122 +0,0 @@ -data "aws_ami" "explorer" { - most_recent = true - - filter { - name = "name" - values = ["amzn2-ami-*-x86_64-gp2"] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - filter { - name = "owner-alias" - values = ["amazon"] - } -} - -resource "aws_launch_configuration" "explorer" { - name_prefix = "${var.prefix}-explorer-launchconfig" - image_id = "${data.aws_ami.explorer.id}" - instance_type = "${var.instance_type}" - security_groups = ["${aws_security_group.app.id}"] - key_name = "${var.key_name}" - iam_instance_profile = "${aws_iam_instance_profile.explorer.id}" - associate_public_ip_address = false - - depends_on = ["aws_db_instance.default"] - - user_data = "${file("${path.module}/libexec/init.sh")}" - - root_block_device { - volume_size = "${var.root_block_size}" - } - - lifecycle { - create_before_destroy = true - } -} - -{% for key in groups[group_names[0]] %} -{% if use_placement_group | default('true') == "true" %} -resource "aws_placement_group" "explorer-{{ hostvars[key]['chain'] }}" { - name = "${var.prefix}-{{ hostvars[key]['chain'] }}-explorer-pg" - strategy = "cluster" -} -{% endif %} -{% endfor %} - -{% for key in groups[group_names[0]] %} -resource "aws_autoscaling_group" "explorer-{{ hostvars[key]['chain'] }}" { - name = "${aws_launch_configuration.explorer.name}-asg-{{ hostvars[key]['chain'] }}" - max_size = "4" - min_size = "1" - desired_capacity = "1" -{% if use_placement_group | default('false') == "true" %} placement_group = "${var.prefix}-{{ hostvars[key]['chain'] }}-explorer-pg" -{% endif %} - launch_configuration = "${aws_launch_configuration.explorer.name}" - vpc_zone_identifier = ["${aws_subnet.default.id}"] - availability_zones = ["${data.aws_availability_zones.available.names}"] - target_group_arns = ["${aws_lb_target_group.explorer.*.arn[{{loop.index-1}}]}"] - - # Health checks are performed by CodeDeploy hooks - health_check_type = "EC2" - - enabled_metrics = [ - "GroupMinSize", - "GroupMaxSize", - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupTotalInstances", - ] - - depends_on = [ - "aws_ssm_parameter.db_host", - "aws_ssm_parameter.db_name", - "aws_ssm_parameter.db_port", - "aws_ssm_parameter.db_username", - "aws_ssm_parameter.db_password" - ] - - lifecycle { - create_before_destroy = true - } - - tag { - key = "prefix" - value = "${var.prefix}" - propagate_at_launch = true - } - - tag { - key = "chain" - value = "{{ hostvars[key]['chain'] }}" - propagate_at_launch = true - } - - tag { - key = "Name" - value = "{{ hostvars[key]['chain'] }} Application" - propagate_at_launch = true - } -} - -# TODO: These autoscaling policies are not currently wired up to any triggers -resource "aws_autoscaling_policy" "explorer-up-{{ hostvars[key]['chain'] }}" { - name = "${var.prefix}-{{ hostvars[key]['chain'] }}-explorer-autoscaling-policy-up" - autoscaling_group_name = "${aws_autoscaling_group.explorer-{{ hostvars[key]['chain'] }}.name}" - adjustment_type = "ChangeInCapacity" - scaling_adjustment = 1 - cooldown = 300 -} - -resource "aws_autoscaling_policy" "explorer-down-{{ hostvars[key]['chain'] }}" { - name = "${var.prefix}-{{ hostvars[key]['chain'] }}-explorer-autoscaling-policy-down" - autoscaling_group_name = "${aws_autoscaling_group.explorer-{{ hostvars[key]['chain'] }}.name}" - adjustment_type = "ChangeInCapacity" - scaling_adjustment = -1 - cooldown = 300 -} -{% endfor %} diff --git a/roles/main_infra/templates/provider.tf.j2 b/roles/main_infra/templates/provider.tf.j2 deleted file mode 100644 index 76bed10..0000000 --- a/roles/main_infra/templates/provider.tf.j2 +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - version = "~> 1.15" -{% if aws_access_key is undefined %} - profile = "{{ aws_profile|default("default") }}" -{% endif %} - region = "{{ aws_region|default("us-east-1") }}" -} diff --git a/roles/main_infra/templates/routing.tf.j2 b/roles/main_infra/templates/routing.tf.j2 deleted file mode 100644 index a6bebe6..0000000 --- a/roles/main_infra/templates/routing.tf.j2 +++ /dev/null @@ -1,76 +0,0 @@ -# Create a gateway to provide access to the outside world -resource "aws_internet_gateway" "default" { - vpc_id = "${aws_vpc.vpc.id}" - - tags { - prefix = "${var.prefix}" - origin = "terraform" - } -} - -# Grant the VPC internet access in its main route table -resource "aws_route" "internet_access" { - route_table_id = "${aws_vpc.vpc.main_route_table_id}" - destination_cidr_block = "0.0.0.0/0" - gateway_id = "${aws_internet_gateway.default.id}" -} - -# The ALB for the app server -resource "aws_lb" "explorer" { - count = "${length(var.chains)}" - name = "${var.prefix}-explorer-${element(var.chains,count.index)}-alb" - internal = false - load_balancer_type = "application" - security_groups = ["${aws_security_group.alb.id}"] - subnets = ["${aws_subnet.default.id}", "${aws_subnet.alb.id}"] - - enable_deletion_protection = false - - tags { - prefix = "${var.prefix}" - origin = "terraform" - } -} - -# The Target Group for the ALB -resource "aws_lb_target_group" "explorer" { - count = "${length(var.chains)}" - name = "${var.prefix}-explorer-${element(var.chains,count.index)}-alb-target" - port = 4000 - protocol = "HTTP" - vpc_id = "${aws_vpc.vpc.id}" - tags { - prefix = "${var.prefix}" - origin = "terraform" - } - stickiness { - type = "lb_cookie" - cookie_duration = 600 - enabled = true - } - health_check { - healthy_threshold = 2 - unhealthy_threshold = 2 - timeout = 15 - interval = 30 - path = "/blocks" - port = 4000 - } -} - -{% for host in groups[group_names[0]] %} -resource "aws_alb_listener" "alb_listener{{loop.index-1}}" { - load_balancer_arn = "${aws_lb.explorer.*.arn[{{loop.index-1}}]}" - port = "${lookup(var.use_ssl,element(var.chains,{{loop.index-1}})) ? "443" : "80" }" - protocol = "${lookup(var.use_ssl,element(var.chains,{{loop.index-1}})) ? "HTTPS" : "HTTP" }" -{% if hostvars[host]['env_vars']['ECTO_USE_SSL']|default('false') == "true" %} - ssl_policy = "${lookup(var.alb_ssl_policy,element(var.chains,{{loop.index-1}}))}" - certificate_arn = "${lookup(var.alb_certificate_arn,element(var.chains,{{loop.index-1}}))}" -{% endif %} - default_action { - type = "forward" - target_group_arn = "${aws_lb_target_group.explorer.*.arn[{{loop.index-1}}]}" - } -} - -{% endfor %} diff --git a/roles/main_infra/templates/terraform.tfvars.j2 b/roles/main_infra/templates/terraform.tfvars.j2 index 1a08696..b06b2dc 100644 --- a/roles/main_infra/templates/terraform.tfvars.j2 +++ b/roles/main_infra/templates/terraform.tfvars.j2 @@ -1,3 +1,8 @@ +aws_profile = "{{ aws_profile|default("default") if aws_access_key is not defined or aws_access_key='' else 'null' }}" +aws_access_key = "{{ aws_access_key | default("null") }}" +aws_secret_key = "{{ aws_secret_key | default("null") }}" +aws_region = "{{ aws_region | default("us-east-1") }}" + prefix = "{{ group_names[0] }}" key_name = "{{ ec2_ssh_key_name }}" key_content = "{{ ec2_ssh_key_content }}" @@ -8,6 +13,12 @@ dns_zone_name = "{{ dns_zone_name }}" instance_type = "{{ instance_type }}" root_block_size = "{{ root_block_size }}" +use_placement_group = { +{% for host in groups[group_names[0]] %} +{{ hostvars[host]['chain'] }}="{{ hostvars[host]['use_placement_group'] | default('false') }}"{% if not loop.last %},{% endif %} +{% endfor %} +} + pool_size = { {% for host in groups[group_names[0]] %} {{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['POOL_SIZE'] | default('30') }}"{% if not loop.last %},{% endif %} From 973c02f7b15fab5cc3ce2742e18446aae1fd6a6c Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Thu, 4 Jul 2019 15:08:36 +0300 Subject: [PATCH 18/24] Update destroy role, fix bugs --- ansible.cfg | 1 + destroy.yml | 3 +- roles/destroy/tasks/main.yml | 81 ++++++++++++++----- roles/main_infra/files/deploy.tf | 2 +- roles/main_infra/files/dns.tf | 4 +- roles/main_infra/files/hosts.tf | 15 ++-- roles/main_infra/files/routing.tf | 2 +- roles/main_infra/files/subnets.tf | 2 +- roles/main_infra/files/variables.tf | 12 ++- roles/main_infra/tasks/main.yml | 17 ++-- .../templates/remote-backend-selector.tf.j2 | 2 +- .../main_infra/templates/terraform.tfvars.j2 | 11 ++- 12 files changed, 98 insertions(+), 54 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index e7f61a5..f7c3f62 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -6,3 +6,4 @@ deprecation_warnings = False host_key_checking = false log_path = log.txt hash_behaviour = merge +display_skipped_hosts = false diff --git a/destroy.yml b/destroy.yml index e4eea51..cdf5892 100644 --- a/destroy.yml +++ b/destroy.yml @@ -1,8 +1,7 @@ - name: Destroy infrastructure hosts: all - serial: 1 roles: - - { role: destroy, when: "confirmation|bool == True" } + - { role: destroy, when: "confirmation|bool == True and inventory_hostname == groups[group_names[0]][0]" } vars_prompt: - name: "confirmation" prompt: "Are you sure you want to destroy all the infra?" diff --git a/roles/destroy/tasks/main.yml b/roles/destroy/tasks/main.yml index dd741b0..90e6a1a 100644 --- a/roles/destroy/tasks/main.yml +++ b/roles/destroy/tasks/main.yml @@ -1,78 +1,115 @@ +- name: Ansible delete file glob + find: + paths: /tmp/ + file_type: directory + patterns: "files-{{ group_names[0] }}" + register: files_to_delete + +- name: Ansible remove file glob + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ files_to_delete.files }}" + +- name: Copy files + copy: + src: "roles/main_infra/files/" + dest: "/tmp/files-{{ group_names[0] }}/" + - name: Local or remote backend selector (remote) template: src: roles/main_infra/templates/remote-backend-selector.tf.j2 - dest: roles/main_infra/files/remote-backend-selector.tf + dest: "/tmp/files-{{ group_names[0] }}/remote-backend-selector.tf" when: - backend|bool == true - name: Local or remote backend selector (local) file: state: absent - dest: roles/main_infra/files/remote-backend-selector.tf + dest: "/tmp/files-{{ group_names[0] }}/" when: - not backend | default ('false') | bool - name: Generating variables file template: src: roles/main_infra/templates/terraform.tfvars.j2 - dest: roles/main_infra/files/terraform.tfvars + dest: "/tmp/files-{{ group_names[0] }}/terraform.tfvars" - name: Generating backend file template: src: roles/main_infra/templates/backend.tfvars.j2 - dest: roles/main_infra/files/backend.tfvars + dest: "/tmp/files-{{ group_names[0] }}/backend.tfvars" when: backend | bool -- name: Generate Terraform files - template: - src: "{{ item.key }}" - dest: "{{ item.value }}" - with_dict: { roles/main_infra/templates/hosts.tf.j2: roles/main_infra/files/hosts.tf, roles/main_infra/templates/routing.tf.j2: roles/main_infra/files/routing.tf, roles/main_infra/templates/provider.tf.j2: roles/main_infra/files/provider.tf } - -# This is due to the TF0.11 bug which do not allow to completely destroy resources if interpolation syntax is used in outputs.tf at edge cases +# This is due to the TF0.11-12 bug which do not allow to completely destroy resources if interpolation syntax is used in outputs.tf at edge cases - name: Check if outputs.tf exists - stat: path=roles/main_infra/files/outputs.tf + stat: + path: "/tmp/files-{{ group_names[0] }}/outputs.tf" register: outputs_stat - name: Temporarily remove outputs.tf file - command: mv roles/main_infra/files/outputs.tf roles/main_infra/files/outputs.tf.backup + command: "mv /tmp/files-{{ group_names[0] }}/outputs.tf /tmp/files-{{ group_names[0] }}/outputs.tf.backup" when: outputs_stat.stat.exists - name: Check if .terraform folder exists stat: - path: "roles/main_infra/files/.terraform/" + path: "/tmp/files-{{ group_names[0] }}/.terraform/" register: stat_result - name: Remove .terraform folder file: - path: roles/main_infra/files/.terraform/ + path: "/tmp/files-{{ group_names[0] }}/.terraform/" state: absent when: stat_result.stat.exists -- name: Terraform destroy main infra +- name: Terraform plan to destroy main infra shell: "echo yes | {{ terraform_location }} {{ item }}" args: - chdir: "roles/main_infra/files" + chdir: "/tmp/files-{{ group_names[0] }}/" with_items: - - "init {{ '-backend-config=backend.tfvars' if backend|bool == true else '' }}" - - destroy + - "init {{ '-backend-config=backend.tfvars' if backend|bool else '' }}" + - plan -destroy -out terraform.tfplan + - show -no-color terraform.tfplan + register: tf_plan + +- name: Terraform show destroy plan + debug: + var: tf_plan.results[2].stdout_lines + +- name: User prompt + pause: + prompt: "Are you absolutely sure you want to execute the destruction plan shown above? [False]" + register: user_answer + until: user_answer.user_input | lower in conditional + retries: 10000 + delay: 1 + vars: + conditional: ['yes','no','true','false'] + when: inventory_hostname == groups['all'][0] + +- name: Terraform destroy + shell: "{{ terraform_location }} destroy -auto-approve" + args: + chdir: "/tmp/files-{{ group_names[0] }}" + when: hostvars[groups['all'][0]].user_answer.user_input | bool - name: Delete vars from parameter store include: parameter_store.yml - name: Check if outputs.tf.backup exists - stat: path=roles/main_infra/files/outputs.tf.backup + stat: + path: "/tmp/files-{{ group_names[0] }}/outputs.tf.backup" register: outputs_backup_stat - name: Get back outputs.tf file - command: mv roles/main_infra/files/outputs.tf.backup roles/main_infra/files/outputs.tf + command: "mv /tmp/files-{{ group_names[0] }}/outputs.tf.backup /tmp/files-{{ group_names[0] }}/outputs.tf" when: outputs_backup_stat.stat.exists - name: User prompt pause: prompt: "Do you want to delete S3 bucket with state file and DynamoDB attached to it also? [Yes/No] Default: No" register: user_answer - until: user_answer.user_input | lower in conditional + until: user_answer.user_input | lower in conditional retries: 10000 delay: 1 vars: diff --git a/roles/main_infra/files/deploy.tf b/roles/main_infra/files/deploy.tf index 2f9c849..97b5eb9 100644 --- a/roles/main_infra/files/deploy.tf +++ b/roles/main_infra/files/deploy.tf @@ -17,7 +17,7 @@ resource "aws_codedeploy_deployment_group" "explorer" { app_name = aws_codedeploy_app.explorer.name deployment_group_name = "${var.prefix}-explorer-dg${count.index}" service_role_arn = aws_iam_role.deployer.arn - autoscaling_groups = ["${aws_launch_configuration.explorer.name}-asg-${element(var.chains, count.index)}"] + autoscaling_groups = [aws_autoscaling_group.explorer[count.index].name] deployment_style { deployment_option = "WITH_TRAFFIC_CONTROL" diff --git a/roles/main_infra/files/dns.tf b/roles/main_infra/files/dns.tf index 27595a5..49cb409 100644 --- a/roles/main_infra/files/dns.tf +++ b/roles/main_infra/files/dns.tf @@ -1,7 +1,9 @@ # Internal DNS Zone resource "aws_route53_zone" "main" { name = "${var.prefix}.${var.dns_zone_name}" - vpc_id = aws_vpc.vpc.id + vpc { + vpc_id = aws_vpc.vpc.id + } tags = { prefix = var.prefix diff --git a/roles/main_infra/files/hosts.tf b/roles/main_infra/files/hosts.tf index 8fd3f49..8b783e3 100644 --- a/roles/main_infra/files/hosts.tf +++ b/roles/main_infra/files/hosts.tf @@ -1,5 +1,6 @@ data "aws_ami" "explorer" { most_recent = true + owners = ["amazon"] filter { name = "name" @@ -10,11 +11,6 @@ data "aws_ami" "explorer" { name = "virtualization-type" values = ["hvm"] } - - filter { - name = "owner-alias" - values = ["amazon"] - } } resource "aws_launch_configuration" "explorer" { @@ -40,13 +36,13 @@ resource "aws_launch_configuration" "explorer" { } resource "aws_placement_group" "explorer" { - count = "var.use_placement_group[var.chains[count.index]] ? 1 : 0" + count = length(matchkeys(keys(var.use_placement_group),values(var.use_placement_group),["True"])) name = "${var.prefix}-${var.chains[count.index]}-explorer-pg" strategy = "cluster" } resource "aws_autoscaling_group" "explorer" { - count = "length(var.chains)" + count = length(var.chains) name = "${var.prefix}-${var.chains[count.index]}-asg" max_size = "4" min_size = "1" @@ -55,7 +51,7 @@ resource "aws_autoscaling_group" "explorer" { vpc_zone_identifier = [aws_subnet.default.id] availability_zones = data.aws_availability_zones.available.names target_group_arns = [aws_lb_target_group.explorer[0].arn] - placement_group = "${var.prefix}-${var.chains[count.index]}-explorer-pg : null" + placement_group = var.use_placement_group[var.chains[count.index]] == "True" ? "${var.prefix}-${var.chains[count.index]}-explorer-pg" : null # Health checks are performed by CodeDeploy hooks health_check_type = "EC2" @@ -102,7 +98,7 @@ resource "aws_autoscaling_group" "explorer" { # TODO: These autoscaling policies are not currently wired up to any triggers resource "aws_autoscaling_policy" "explorer-up" { - count = "length(var.chains)" + count = length(var.chains) name = "${var.prefix}-${var.chains[count.index]}-explorer-autoscaling-policy-up" autoscaling_group_name = aws_autoscaling_group.explorer[count.index].name adjustment_type = "ChangeInCapacity" @@ -111,6 +107,7 @@ resource "aws_autoscaling_policy" "explorer-up" { } resource "aws_autoscaling_policy" "explorer-down" { + count = length(var.chains) name = "${var.prefix}-${var.chains[count.index]}-explorer-autoscaling-policy-down" autoscaling_group_name = aws_autoscaling_group.explorer[count.index].name adjustment_type = "ChangeInCapacity" diff --git a/roles/main_infra/files/routing.tf b/roles/main_infra/files/routing.tf index 784ad91..cd5e506 100644 --- a/roles/main_infra/files/routing.tf +++ b/roles/main_infra/files/routing.tf @@ -59,7 +59,7 @@ resource "aws_lb_target_group" "explorer" { } resource "aws_alb_listener" "alb_listener" { - count = "length(var.chains)" + count = length(var.chains) load_balancer_arn = aws_lb.explorer[count.index].arn port = var.use_ssl[element(var.chains, count.index)] ? "443" : "80" protocol = var.use_ssl[element(var.chains, count.index)] ? "HTTPS" : "HTTP" diff --git a/roles/main_infra/files/subnets.tf b/roles/main_infra/files/subnets.tf index 6cec93c..2db51b2 100644 --- a/roles/main_infra/files/subnets.tf +++ b/roles/main_infra/files/subnets.tf @@ -16,7 +16,7 @@ resource "aws_subnet" "default" { ## ALB subnet resource "aws_subnet" "alb" { vpc_id = aws_vpc.vpc.id - cidr_block = var.public_subnet_cidr + #cidr_block = var.public_subnet_cidr cidr_block = cidrsubnet(var.db_subnet_cidr, 5, 1) availability_zone = data.aws_availability_zones.available.names[1] map_public_ip_on_launch = true diff --git a/roles/main_infra/files/variables.tf b/roles/main_infra/files/variables.tf index 4e1f18a..c7d8775 100644 --- a/roles/main_infra/files/variables.tf +++ b/roles/main_infra/files/variables.tf @@ -1,9 +1,17 @@ variable "aws_profile" { - default = "null" + default = null } variable "aws_region" { - default = "null" + default = null +} + +variable "aws_access_key" { + default = null +} + +variable "aws_secret_key" { + default = null } variable "prefix" { diff --git a/roles/main_infra/tasks/main.yml b/roles/main_infra/tasks/main.yml index d7f674c..b2aad5b 100644 --- a/roles/main_infra/tasks/main.yml +++ b/roles/main_infra/tasks/main.yml @@ -58,9 +58,9 @@ args: chdir: "/tmp/files-{{ group_names[0] }}" with_items: - - "init{{ ' -backend-config=backend.tfvars' if backend|bool == true else '' }}" + - "init{{ ' -backend-config=backend.tfvars' if backend|bool else '' }}" - plan -out terraform.tfplan - - show terraform.tfplan -json -no-color + - show -no-color terraform.tfplan - name: Show Terraform plan debug: @@ -86,13 +86,10 @@ args: chdir: "/tmp/files-{{ group_names[0] }}" when: hostvars[groups['all'][0]].user_answer.user_input | bool - ignore_errors: True - -- name: Ensure Terraform resources has been provisioned - shell: "echo yes | {{ terraform_location }} apply" - args: - chdir: "/tmp/files-{{ group_names[0] }}" - when: hostvars[groups['all'][0]].user_answer.user_input | bool + retries: 1 + delay: 3 + register: result + until: result.rc == 0 - name: Terraform output info into variable shell: "{{ terraform_location }} output -json" @@ -103,7 +100,7 @@ - name: Output info from Terraform debug: - var: output.stdout_lines + var: (output.stdout|from_json).instructions.value when: hostvars[groups['all'][0]].user_answer.user_input | bool - name: Ansible delete file glob diff --git a/roles/main_infra/templates/remote-backend-selector.tf.j2 b/roles/main_infra/templates/remote-backend-selector.tf.j2 index 56f0b2a..8d97b71 100644 --- a/roles/main_infra/templates/remote-backend-selector.tf.j2 +++ b/roles/main_infra/templates/remote-backend-selector.tf.j2 @@ -1,6 +1,6 @@ terraform { backend "s3" { - {% if aws_access_key is undefined %} + {% if aws_access_key is undefined or aws_access_key == '' %} profile = "{{ aws_profile|default("default") }}" {% else %} access_key = "{{ aws_access_key }}" diff --git a/roles/main_infra/templates/terraform.tfvars.j2 b/roles/main_infra/templates/terraform.tfvars.j2 index b06b2dc..2e16110 100644 --- a/roles/main_infra/templates/terraform.tfvars.j2 +++ b/roles/main_infra/templates/terraform.tfvars.j2 @@ -1,7 +1,10 @@ -aws_profile = "{{ aws_profile|default("default") if aws_access_key is not defined or aws_access_key='' else 'null' }}" -aws_access_key = "{{ aws_access_key | default("null") }}" -aws_secret_key = "{{ aws_secret_key | default("null") }}" -aws_region = "{{ aws_region | default("us-east-1") }}" +{% if aws_access_key is undefined or aws_access_key == '' %} +aws_profile = "{{ aws_profile|default('default') }}" +{% else %} +aws_access_key = "{{ aws_access_key | default('null') }}" +aws_secret_key = "{{ aws_secret_key | default('null') }}" +{% endif %} +aws_region = "{{ aws_region | default('us-east-1') }}" prefix = "{{ group_names[0] }}" key_name = "{{ ec2_ssh_key_name }}" From ce09070b86af7763c7db5e8e74fcc5a5d3c8185e Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Thu, 4 Jul 2019 17:07:10 +0300 Subject: [PATCH 19/24] fix variable bug --- roles/main_software/tasks/main.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index 9d3cec9..a52e771 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -79,11 +79,10 @@ - name: Make config variables lowercase set_fact: lower_env: "{{ lower_env | combine ({item.key|lower : item.value}) }}" - with_dict: "{{ custom_environment_chain }}" - when: custom_environment_chain|length > 0 + with_dict: "{{ env_vars }}" + when: env_vars|length > 0 vars: lower_env: {} - custom_environment_chain: "{{ env_vars | default({}) if env_vars>0 else {} }}" tags: - update_vars - build @@ -123,6 +122,10 @@ tags: - build +- name: Show Server environment variables + debug: + var: server_env + - name: Compile BlockScout command: "mix do {{ item }}" args: @@ -288,7 +291,7 @@ pause: prompt: "Do you want to deploy BlockScout? [Yes/No]" register: user_answer - until: user_answer.user_input | lower in conditional + until: user_answer.user_input | lower in conditional retries: 10000 delay: 1 vars: From fdcf6efc561fc82fe2a7b237d1abe0c6a2ee7fef Mon Sep 17 00:00:00 2001 From: Symanovich Siarhei Date: Thu, 8 Aug 2019 12:43:30 +0300 Subject: [PATCH 20/24] accept enter key or other --- roles/main_software/tasks/main.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index a52e771..4d76a36 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -225,7 +225,7 @@ pause: prompt: "Would you like to remove staging dependencies? [Yes/No]" register: user_answer - until: user_answer.user_input | lower in conditional + until: user_answer.user_input | lower in conditional retries: 10000 delay: 1 vars: @@ -244,7 +244,7 @@ - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets/node_modules/" - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/node_modules/" - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}/logs/dev/" - when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool + when: hostvars[groups['all'][0]].user_answer.user_input == "" or hostvars[groups['all'][0]].user_answer.user_input | lower | bool tags: - build @@ -260,11 +260,11 @@ pause: prompt: "Do you want to update the Parameter Store variables? [Yes/No]" register: user_answer - until: user_answer.user_input | lower in conditional + until: user_answer.user_input | lower in conditional retries: 10000 delay: 1 vars: - conditional: ['yes','no','true','false'] + conditional: ["",'yes','no','true','false'] when: inventory_hostname == groups['all'][0] tags: - update_vars @@ -283,7 +283,7 @@ profile: "{{ aws_profile|default(omit) }}" region: "{{ aws_region|default(omit) }}" with_dict: "{{ lower_env }}" - when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool and lower_env is defined + when: hostvars[groups['all'][0]].user_answer.user_input == "" or hostvars[groups['all'][0]].user_answer.user_input | lower | bool tags: - update_vars @@ -295,7 +295,7 @@ retries: 10000 delay: 1 vars: - conditional: ['yes','no','true','false'] + conditional: ["",'yes','no','true','false'] when: inventory_hostname == groups['all'][0] tags: - deploy @@ -303,21 +303,21 @@ - name: Upload Blockscout to S3 command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ group_names[0] }}-explorer --s3-location s3://{{ group_names[0] }}-explorer-codedeploy-releases/blockscout-{{ group_names[0] }}-{{ chain }}.zip --source=/tmp/blockscout-{{ group_names[0] }}-{{ chain }} {{ '--profile='~aws_profile~' --region='~aws_region if aws_profile is defined else '' }}" register: push_output - when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool + when: hostvars[groups['all'][0]].user_answer.user_input == "" or hostvars[groups['all'][0]].user_answer.user_input | lower | bool tags: - deploy - name: Upload output debug: msg: "If deployment will fail, you can try to deploy blockscout manually using the following commands: {{ 'AWS_ACCESS_KEY=XXXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXX AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines }} {{ '--profile='~aws_profile~' --region'~aws_region if aws_profile is defined else '' }}" - when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool + when: hostvars[groups['all'][0]].user_answer.user_input == "" or hostvars[groups['all'][0]].user_answer.user_input | lower | bool tags: - deploy - name: Deploy Blockscout command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ group_names[0] }}-explorer-dg{{ groups[group_names[0]].index(inventory_hostname) }} --deployment-config-name CodeDeployDefault.OneAtATime {{ '--profile='~aws_profile~' --region='~aws_region if aws_profile is defined else '' }}" - when: hostvars[groups['all'][0]].user_answer.user_input | lower | bool + when: hostvars[groups['all'][0]].user_answer.user_input == "" or hostvars[groups['all'][0]].user_answer.user_input | lower | bool tags: - deploy From 338f05ab290ac71edecbe1f8f842b1b57bf10dfe Mon Sep 17 00:00:00 2001 From: Symanovich Siarhei Date: Thu, 8 Aug 2019 13:05:11 +0300 Subject: [PATCH 21/24] fix clean.yml, env_vars in 'Make config variables lowercase' --- clean.yml | 4 ++-- roles/main_software/tasks/main.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clean.yml b/clean.yml index cd9af4c..6c54acf 100644 --- a/clean.yml +++ b/clean.yml @@ -12,5 +12,5 @@ - "roles/main_infra/files/backend.tfvars" - "roles/main_infra/files/terraform.tfplan" - "log.txt" - - "blockscout-*" - - "/tmp/files-*" \ No newline at end of file + - "/tmp/blockscout-{{ group_names[0] }}-{{ chain }}" + - "/tmp/files-{{ group_names[0] }}" diff --git a/roles/main_software/tasks/main.yml b/roles/main_software/tasks/main.yml index 4d76a36..3296f2b 100644 --- a/roles/main_software/tasks/main.yml +++ b/roles/main_software/tasks/main.yml @@ -80,7 +80,7 @@ set_fact: lower_env: "{{ lower_env | combine ({item.key|lower : item.value}) }}" with_dict: "{{ env_vars }}" - when: env_vars|length > 0 + when: env_vars is defined vars: lower_env: {} tags: From 5d888c494af1e96a28b1e225d5bb68bdc673f1ac Mon Sep 17 00:00:00 2001 From: Symanovich Siarhei Date: Thu, 8 Aug 2019 13:24:59 +0300 Subject: [PATCH 22/24] changed skip_fetch to false --- host_vars/blockscout.yml.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/host_vars/blockscout.yml.example b/host_vars/blockscout.yml.example index af7063e..1d3ffc2 100644 --- a/host_vars/blockscout.yml.example +++ b/host_vars/blockscout.yml.example @@ -1,4 +1,4 @@ -skip_fetch: true +skip_fetch: false blockscout_repo: https://github.com/poanetwork/blockscout branch: "production-core" From 4609d5b9c0ce388260c8a7a88a4e7f4159b2af28 Mon Sep 17 00:00:00 2001 From: sergey Date: Fri, 16 Aug 2019 18:57:45 +0300 Subject: [PATCH 23/24] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 90e5424..25d31d5 100644 --- a/README.md +++ b/README.md @@ -150,6 +150,7 @@ Where `[group]` is a group name, which will be interpreted as a `prefix` for all ```bash cat host_vars/infrastructure.yml.example host_vars/all.yml.example > host_vars/host.yml ``` + 5. For each group merge `infrastructure.yml.example` and `all.yml.example` config template files in `group_vars` folder into single config file with the same name as group name in `hosts` file: ```bash From f28ec00c1d66f70eb8025e0e49c2c86201f2ba33 Mon Sep 17 00:00:00 2001 From: sergey Date: Fri, 16 Aug 2019 18:59:21 +0300 Subject: [PATCH 24/24] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 25d31d5..a7cffa3 100644 --- a/README.md +++ b/README.md @@ -203,12 +203,14 @@ Where `[group]` is a group name, which will be interpreted as a `prefix` for all cat host_vars/blockscout.yml.example host_vars/all.yml.example > host_vars/host.yml ``` If you have already merged `infrastructure.yml.example` and `all.yml` while deploying the BlockScout infrastructure, you can simply add the `blockscout.yml.example` to the merged file: `cat host_vars/blockscout.yml.example >> host_vars/host.yml` + 5. For each group merge `blockscout.yml.example` and `all.yml.example` config template files in `group_vars` folder into single config file with the same name as group name in `hosts` file: ```bash cat group_vars/blockscout.yml.example group_vars/all.yml.example > group_vars/group.yml ``` If you have already merged `infrastructure.yml.example` and `all.yml` while deploying the BlockScout infrastructure, you can simply add the `blockscout.yml.example` to the merged file: `cat group_vars/blockscout.yml.example >> group_vars/host.yml` + 6. Adjust the variables at `group_vars` and `host_vars`. Note - you can move variables between host and group vars depending on if variable should be applied to the host or to the entire group. The list of the variables you can find at the [corresponding part of instruction](#Configuration); Also, if you need to **distribute variables accross all the hosts/groups**, you can add these variables to the `group_vars/all.yml` file. Note about variable precedence => [Official Ansible Docs](https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable).