add possibiltiy to deploy all BlockScouts at once

This commit is contained in:
a@a.ru 2019-05-20 23:38:14 +03:00
parent eb72725ef7
commit f31883411b
22 changed files with 305 additions and 308 deletions

6
.gitignore vendored
View File

@ -20,6 +20,10 @@ roles/main_infra/files/provider.tf
/PREFIX
group_vars/*
host_vars/*
!host_vars/all.yml.example
!host_vars/blockscout.yml.example
!host_vars/infrastructure.yml.example
!group_vars/all.yml.example
!group_vars/blockscout.yml.example
!group_vars/infrastructure.yml.example
@ -29,3 +33,5 @@ group_vars/*
.*.swp
blockscout-*/
hosts

View File

@ -1,14 +1,10 @@
- name: Deploy BlockScout
hosts: localhost
hosts: all
tasks:
- block:
- name: Use role in loop
- name: Deploy
include_role:
name: main_software
loop: "{{ chain_custom_environment.keys() }}"
loop_control:
loop_var: chain
index_var: index
always:
- include_role:
name: s3

View File

@ -18,52 +18,3 @@ upload_debug_info_to_s3: true
## The bucket and dynamodb_table variables will be used only when backend variable is set to true
## Name of the bucket where TF state files will be stored
bucket: "poa-terraform-state"
## All resources will be prefixed with this one
prefix: "poa"
## This dictionary represents a set of environment variables required for each chain. Variables that commented out are optional.
chain_custom_environment:
core:
NETWORK: "(POA)" # Name of the organization/community that hosts the chain
SUBNETWORK: "Core Network" # Actual name of the particular network
NETWORK_ICON: "_network_icon.html" # Either _test_network_icon.html or _network_icon.html, depending on the type of the network (prod/test).
LOGO: "/images/blockscout_logo.svg" # Chain logo
ETHEREUM_JSONRPC_VARIANT: "parity" # Chain client installed at ETHEREUM_JSONRPC_HTTP_URL
ETHEREUM_JSONRPC_HTTP_URL: "http://localhost:8545" # Network RPC endpoint
ETHEREUM_JSONRPC_TRACE_URL: "http://localhost:8545" # Network RPC endpoint in trace mode. Can be the same as the previous variable
ETHEREUM_JSONRPC_WS_URL: "ws://localhost:8546" # Network RPC endpoint in websocket mode
NETWORK_PATH: "/poa/core" # relative URL path, for example: blockscout.com/$NETWORK_PATH
SECRET_KEY_BASE: "TPGMvGK0iIwlXBQuQDA5KRqk77VETbEBlG4gAWeb93TvBsYAjvoAvdODMd6ZeguPwf2YTRY3n7uvxXzQP4WayQ==" # Secret key for production assets protection. Use `mix phx.gen.secret` or `openssl rand -base64 64 | tr -d '\n'` to generate
PORT: 4000 # Port the application runs on
COIN: "POA" # Coin name at the Coinmarketcap, used to display current exchange rate
POOL_SIZE: 20 # Defines the number of database connections allowed
ECTO_USE_SSL: "false" # Specifies whether or not to use SSL on Ecto queries
ALB_SSL_POLICY: "ELBSecurityPolicy-2016-08" #SSL policy for Load Balancer. Required if ECTO_USE_SSL is set to true
ALB_CERTIFICATE_ARN: "arn:aws:acm:us-east-1:290379793816:certificate/6d1bab74-fb46-4244-aab2-832bf519ab24" #ARN of the certificate to attach to the LB. Required if ECTO_USE_SSL is set to
true
HEART_BEAT_TIMEOUT: 30 # Heartbeat is an Erlang monitoring service that will restart BlockScout if it becomes unresponsive. This variables configures the timeout before Blockscout will be restarted.
HEART_COMMAND: "sudo systemctl restart explorer.service" # This variable represents a command that is used to restart the service
BLOCKSCOUT_VERSION: "v1.3.11-beta" # Added to the footer to signify the current BlockScout version
RELEASE_LINK: "https://github.com/poanetwork/blockscout/releases/tag/v1.3.9-beta" # The link to Blockscout release notes in the footer.
ELIXIR_VERSION: "v1.8.1" # Elixir version to install on the node before Blockscout deploy
BLOCK_TRANSFORMER: "base" # Transformer for blocks: base or clique.
GRAPHIQL_TRANSACTION: "0xbc426b4792c48d8ca31ec9786e403866e14e7f3e4d39c7f2852e518fae529ab4" # Random tx hash on the network, used as default for graphiql tx.
TXS_COUNT_CACHE_PERIOD: 7200 # Interval in seconds to restart the task, which calculates the total txs count.
ADDRESS_WITH_BALANCES_UPDATE_INTERVAL: 1800 #Interval in seconds to restart the task, which calculates addresses with balances
LINK_TO_OTHER_EXPLORERS: "false" # If true, links to other explorers are added in the footer
USE_PLACEMENT_GROUP: "false" # If true, BlockScout instance will be created in the placement group
#The following variables are optional
#FIRST_BLOCK: 0 # The block number, where indexing begins from.
#COINMARKETCAP_PAGES: 10 # Sets the number of pages at Coinmarketcap to search coin at. Defaults to 10
#METADATA_CONTRACT: # Address of metadata smart contract. Used by POA Network to obtain Validators information to display in the UI
#VALIDATORS_CONTRACT: #Address of the EMission Fund smart contract
#SUPPLY_MODULE: "false" # Used by the xDai Chain to calculate the total supply of the chain
#SOURCE_MODULE: "false" # Used to calculate the total supply
#DATABASE_URL: # Database URL. Usually generated automatically, but this variable can be used to modify the URL of the databases during the updates.
#CHECK_ORIGIN: "false" # Used to check the origin of requests when the origin header is present
#DATADOG_HOST: # Host configuration variable for Datadog integration
#DATADOG_PORT: # Port configuration variable for Datadog integration
#SPANDEX_BATCH_SIZE: # Spandex and Datadog configuration setting.
#SPANDEX_SYNC_THRESHOLD: # Spandex and Datadog configuration setting.
#BLOCK_COUNT_CACHE_TTL: #Time to live of block count cache in milliseconds

View File

@ -1,26 +0,0 @@
# BlockScout related variables
## Exact path to the TF binary on your local machine
terraform_location: "/usr/local/bin/terraform"
## An address of BlockScout repo to download
blockscout_repo: https://github.com/poanetwork/blockscout
## A branch at `blockscout_repo` with ready-to-deploy version of BlockScout
chain_branch:
core: "production-core"
sokol: "production-sokol"
## Usually you don't want to merge branches, so it is commented out by default
#chain_merge_commit:
# core: "2cdead1"
# sokol: "2cdead1"
## If you want you can download and configure repo on your own. It should has the following name - blockscout-{{ chain_name }} and exist inside root playbook folder. Use the following variable to prevent playbooks from overriding
skip_fetch: false
## Login data for the test database. Please, use postgres database with the version specified at BlockScout repo prerequisites
ps_host: localhost
ps_user: myuser
ps_password: mypass
ps_db: mydb

View File

@ -1,4 +1,4 @@
# Infrastructure related variables
# Infrastructure related group variables
## Name of the DynamoDB table where current lease of TF state file will be stored
dynamodb_table: "poa-terraform-lock"
@ -7,9 +7,6 @@ dynamodb_table: "poa-terraform-lock"
ec2_ssh_key_name: "sokol-test"
ec2_ssh_key_content: ""
## EC2 Instance will have the following size:
instance_type: "m5.large"
## VPC containing Blockscout resources will be created as following:
vpc_cidr: "10.0.0.0/16"
public_subnet_cidr: "10.0.0.0/24"
@ -23,48 +20,3 @@ dns_zone_name: "poa.internal"
## Size of the EC2 instance EBS root volume
root_block_size: 120
# DB related variables
## This value represents the name of the DB that will be created/attached. Must be unique. Will be prefixed with `prefix` variable.
chain_db_id:
core: "core"
sokol: "sokol"
## Each network should have it's own DB. This variable maps chain to DB name. Should not be messed with db_id variable, which represents the RDS instance ID.
chain_db_name:
core: "core"
sokol: "sokol"
## The following variables describes the DB configurations for each network including usernames, password, instance class, etc.
chain_db_username:
core: "core"
sokol: "sokol"
chain_db_password:
core: "fkowfjpoi309021"
sokol: "kopsdOPpa9213K"
chain_db_instance_class:
core: "db.m4.xlarge"
sokol: "db.m4.large"
## Size of storage in GiB.
chain_db_storage:
core: "200"
sokol: "100"
## Type of disk to be used for the DB.
chain_db_storage_type:
core: "io1"
sokol: "gp2"
## This should be set only if chain_db_storage is set to io1
#chain_db_iops:
# core: "1000"
# sokol: "1500"
## Blockscout uses Postgres as the DB engine. This variable describes the Postgres version used in each particular chain.
chain_db_version:
core: "10.5"
sokol: "10.6"

47
host_vars/all.yml.example Normal file
View File

@ -0,0 +1,47 @@
ansible_host: localhost # An address of machine where BlockScout staging will be built
ansible_connection: local # Comment out if your ansible_host is not localhost
chain: poa # Can be not unique. Represents chain name.
env_vars:
#NETWORK: "(POA)" # Name of the organization/community that hosts the chain
#SUBNETWORK: "Core Network" # Actual name of the particular network
#NETWORK_ICON: "_network_icon.html" # Either _test_network_icon.html or _network_icon.html, depending on the type of the network (prod/test).
#LOGO: "/images/blockscout_logo.svg" # Chain logo
#ETHEREUM_JSONRPC_VARIANT: "parity" # Chain client installed at ETHEREUM_JSONRPC_HTTP_URL
#ETHEREUM_JSONRPC_HTTP_URL: "http://localhost:8545" # Network RPC endpoint
#ETHEREUM_JSONRPC_TRACE_URL: "http://localhost:8545" # Network RPC endpoint in trace mode. Can be the same as the previous variable
#ETHEREUM_JSONRPC_WS_URL: "ws://localhost:8546" # Network RPC endpoint in websocket mode
#NETWORK_PATH: "/poa/core" # relative URL path, for example: blockscout.com/$NETWORK_PATH
#SECRET_KEY_BASE: "TPGMvGK0iIwlXBQuQDA5KRqk77VETbEBlG4gAWeb93TvBsYAjvoAvdODMd6ZeguPwf2YTRY3n7uvxXzQP4WayQ==" # Secret key for production assets protection. Use `mix phx.gen.secret` or `openssl rand -base64 64 | tr -d '\n'` to generate
#PORT: 4000 # Port the application runs on
#COIN: "POA" # Coin name at the Coinmarketcap, used to display current exchange rate
#POOL_SIZE: 20 # Defines the number of database connections allowed
#ECTO_USE_SSL: "false" # Specifies whether or not to use SSL on Ecto queries
#ALB_SSL_POLICY: "ELBSecurityPolicy-2016-08" #SSL policy for Load Balancer. Required if ECTO_USE_SSL is set to true
#ALB_CERTIFICATE_ARN: "arn:aws:acm:us-east-1:290379793816:certificate/6d1bab74-fb46-4244-aab2-832bf519ab24" #ARN of the certificate to attach to the LB. Required if ECTO_USE_SSL is set to true
#HEART_BEAT_TIMEOUT: 30 # Heartbeat is an Erlang monitoring service that will restart BlockScout if it becomes unresponsive. This variables configures the timeout before Blockscout will be restarted.
#HEART_COMMAND: "sudo systemctl restart explorer.service" # This variable represents a command that is used to restart the service
BLOCKSCOUT_VERSION: "v1.3.13-beta" # Added to the footer to signify the current BlockScout version
RELEASE_LINK: "https://github.com/poanetwork/blockscout/releases/tag/v1.3.13-beta" # The link to Blockscout release notes in the footer.
#ELIXIR_VERSION: "v1.8.1" # Elixir version to install on the node before Blockscout deploy
#BLOCK_TRANSFORMER: "base" # Transformer for blocks: base or clique.
#GRAPHIQL_TRANSACTION: "0xbc426b4792c48d8ca31ec9786e403866e14e7f3e4d39c7f2852e518fae529ab4" # Random tx hash on the network, used as default for graphiql tx.
#TXS_COUNT_CACHE_PERIOD: 7200 # Interval in seconds to restart the task, which calculates the total txs count.
#ADDRESS_WITH_BALANCES_UPDATE_INTERVAL: 1800 #Interval in seconds to restart the task, which calculates addresses with balances
#LINK_TO_OTHER_EXPLORERS: "false" # If true, links to other explorers are added in the footer
#USE_PLACEMENT_GROUP: "false" # If true, BlockScout instance will be created in the placement group
##The following variables are optional
#FIRST_BLOCK: 0 # The block number, where indexing begins from.
#COINMARKETCAP_PAGES: 10 # Sets the number of pages at Coinmarketcap to search coin at. Defaults to 10
#METADATA_CONTRACT: # Address of metadata smart contract. Used by POA Network to obtain Validators information to display in the UI
#VALIDATORS_CONTRACT: #Address of the EMission Fund smart contract
#SUPPLY_MODULE: "false" # Used by the xDai Chain to calculate the total supply of the chain
#SOURCE_MODULE: "false" # Used to calculate the total supply
#DATABASE_URL: # Database URL. Usually generated automatically, but this variable can be used to modify the URL of the databases during the updates.
#CHECK_ORIGIN: "false" # Used to check the origin of requests when the origin header is present
#DATADOG_HOST: # Host configuration variable for Datadog integration
#DATADOG_PORT: # Port configuration variable for Datadog integration
#SPANDEX_BATCH_SIZE: # Spandex and Datadog configuration setting.
#SPANDEX_SYNC_THRESHOLD: # Spandex and Datadog configuration setting.
#BLOCK_COUNT_CACHE_TTL: #Time to live of block count cache in milliseconds

View File

@ -0,0 +1,5 @@
skip_fetch: true
blockscout_repo: https://github.com/poanetwork/blockscout
branch: "production-core"
#merge_commit: "2cdead1"

View File

@ -0,0 +1,22 @@
terraform_location: "/usr/local/bin/terraform"
db_id: "core" # This value represents the name of the DB that will be created/attached. Must be unique. Will be prefixed with `prefix` variable.
db_name: "core" # Each network should have it's own DB. This variable maps chain to DB name. Should not be messed with db_id variable, which represents the RDS instance ID.
## The following variables describes the DB configurations for each network including usernames, password, instance class, etc.
db_username: "core"
db_password: "fkowfjpoi309021"
db_instance_class: "db.t3.medium"
db_storage: "100" # in GiB
db_storage_type: "gp2" # see https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html for details
#db_iops: "1000" # This should be set only if chain_db_storage is set to `io1`
db_version: "10.6" #Blockscout uses Postgres as the DB engine. This variable describes the Postgres version used in each particular chain.
instance_type: "m5.large" # EC2 BlockScout Instance will have this type
use_placement_group: false # Choose wheter or not to group BlockScout instances into group
# Please, specify the credentials for the test Postgres installation
ps_host: localhost
ps_user: myuser
ps_password: mypass
ps_db: mydb

1
hosts
View File

@ -1 +0,0 @@
localhost ansible_connection=local

11
hosts.example Normal file
View File

@ -0,0 +1,11 @@
# Each group and host name must be unique
[poa]
sokol
core
[eth]
kovan
main
ropst
rink

View File

@ -103,6 +103,21 @@ log "Setting up application environment.."
mkdir -p /opt/app
chown -R ec2-user /opt/app
log "Creating logrotate config"
cat <<EOF > /etc/logrotate.d/blockscout
/var/log/messages* {
rotate 5
size 1G
compress
missingok
delaycompress
copytruncate
}
EOF
log "Creating explorer systemd service.."
cat <<EOF > /lib/systemd/system/explorer.service

View File

@ -3,27 +3,27 @@
src: remote-backend-selector.tf.j2
dest: roles/main_infra/files/remote-backend-selector.tf
when:
- backend|bool == true
- backend|bool
- name: Local or remote backend selector (local)
file:
state: absent
dest: roles/main_infra/files/remote-backend-selector.tf
when:
- backend | default ('false') | bool != true
- backend | default('false') | bool
- name: Generating variables file
template:
src: terraform.tfvars.j2
dest: roles/main_infra/files/terraform.tfvars
vars:
db_iops: "{{ chain_db_iops | default({}) }}"
db_iops: "{{ db_iops | default({}) }}"
- name: Generating backend file
template:
src: backend.tfvars.j2
dest: roles/main_infra/files/backend.tfvars
when: backend|bool == true
when: backend | default('false') | bool
- name: Check if .terraform folder exists
stat:
@ -34,13 +34,13 @@
file:
path: roles/main_infra/files/.terraform/
state: absent
when: stat_result.stat.exists == True
when: stat_result.stat.exists
- name: Generate Terraform files
template:
src: "{{ item.key }}"
dest: "{{ item.value }}"
with_dict: {hosts.tf.j2: roles/main_infra/files/hosts.tf,routing.tf.j2: roles/main_infra/files/routing.tf,provider.tf.j2: roles/main_infra/files/provider.tf}
with_dict: { hosts.tf.j2: roles/main_infra/files/hosts.tf, routing.tf.j2: roles/main_infra/files/routing.tf, provider.tf.j2: roles/main_infra/files/provider.tf }
#Workaround since terraform module return unexpected error.
- name: Terraform plan construct
@ -61,36 +61,35 @@
pause:
prompt: "Are you absolutely sure you want to execute the deployment plan shown above? [False]"
register: user_answer
until: user_answer.user_input | lower != "false" and user_answer.user_input | lower != "no" and user_answer.user_input | lower != "true" and user_answer.user_input | lower != "yes"
retries: 10000
delay: 1
- name: Insert vars into parameter store
include: parameter_store.yml
loop: "{{ chain_custom_environment.keys() }}"
loop_control:
loop_var: chain
index_var: index
when: user_answer.user_input|bool == True
when: user_answer.user_input | bool
- name: Terraform provisioning
shell: "echo yes | {{ terraform_location }} apply terraform.tfplan"
args:
chdir: "roles/main_infra/files"
when: user_answer.user_input|bool == True
when: user_answer.user_input | bool
ignore_errors: True
- name: Ensure Terraform resources has been provisioned
shell: "echo yes | {{ terraform_location }} apply"
args:
chdir: "roles/main_infra/files"
when: user_answer.user_input|bool == True
when: user_answer.user_input | bool
- name: Terraform output info into variable
shell: "{{ terraform_location }} output -json"
register: output
args:
chdir: "roles/main_infra/files"
when: user_answer.user_input|bool == True
when: user_answer.user_input | bool
- name: Output info from Terraform
debug:
var: output.stdout_lines
when: user_answer.user_input|bool == True
when: user_answer.user_input | bool

View File

@ -1,7 +1,7 @@
- name: Prepare variables for Parameter Store
set_fact:
chain_ps_env: "{{ chain_ps_env | combine ({item.key|lower : item.value}) }}"
with_dict: "{{ chain_custom_environment[chain] }}"
with_dict: "{{ hostvars[inventory_hostname]['env_vars'] }}"
vars:
chain_ps_env: {}

View File

@ -1,3 +1,3 @@
bucket = "{{ prefix }}-{{ bucket }}"
dynamodb_table = "{{ prefix }}-{{ dynamodb_table }}"
bucket = "{{ group_names[0] }}-{{ bucket }}"
dynamodb_table = "{{ group_names[0] }}-{{ dynamodb_table }}"
key = "terraform.tfstate"

View File

@ -39,7 +39,7 @@ resource "aws_launch_configuration" "explorer" {
}
}
{% for key, value in chain_custom_environment.iteritems() %}
{% for key, value in env_vars.iteritems() %}
{% if value['USE_PLACEMENT_GROUP']|default('true') == "true" %}
resource "aws_placement_group" "explorer-{{key}}" {
name = "${var.prefix}-{{key}}-explorer-pg"
@ -48,13 +48,13 @@ resource "aws_placement_group" "explorer-{{key}}" {
{% endif %}
{% endfor %}
{% for key, value in chain_custom_environment.iteritems() %}
{% for key, value in env_vars.iteritems() %}
resource "aws_autoscaling_group" "explorer-{{key}}" {
name = "${aws_launch_configuration.explorer.name}-asg-{{key}}"
max_size = "4"
min_size = "1"
desired_capacity = "1"
{% if value['USE_PLACEMENT_GROUP']|default('true') == "true" %} placement_group = "${var.prefix}-{{key}}-explorer-pg"
{% if use_placement_group | default('false') == "true" %} placement_group = "${var.prefix}-{{key}}-explorer-pg"
{% endif %}
launch_configuration = "${aws_launch_configuration.explorer.name}"
vpc_zone_identifier = ["${aws_subnet.default.id}"]

View File

@ -58,7 +58,7 @@ resource "aws_lb_target_group" "explorer" {
}
}
{% for key, value in chain_custom_environment.iteritems() %}
{% for key, value in env_vars.iteritems() %}
resource "aws_alb_listener" "alb_listener{{loop.index-1}}" {
load_balancer_arn = "${aws_lb.explorer.*.arn[{{loop.index-1}}]}"
port = "${lookup(var.use_ssl,element(var.chains,{{loop.index-1}})) ? "443" : "80" }"

View File

@ -1,4 +1,4 @@
prefix = "{{ prefix }}"
prefix = "{{ group_names[0] }}"
key_name = "{{ ec2_ssh_key_name }}"
key_content = "{{ ec2_ssh_key_content }}"
vpc_cidr = "{{ vpc_cidr }}"
@ -9,92 +9,92 @@ instance_type = "{{ instance_type }}"
root_block_size = "{{ root_block_size }}"
pool_size = {
{% for key, value in chain_custom_environment.iteritems() %}
{{ key }}="{{ value['POOL_SIZE']|default('30') }}"{% if not loop.last %},{% endif %}
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['POOL_SIZE'] | default('30') }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
secret_key_base = {
{% for key, value in chain_custom_environment.iteritems() %}
{{ key }}="{{ value['SECRET_KEY_BASE']|default('TPGMvGK0iIwlXBQuQDA5KRqk77VETbEBlG4gAWeb93TvBsYAjvoAvdODMd6ZeguPwf2YTRY3n7uvxXzQP4WayQ==') }}"{% if not loop.last %},{% endif %}
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['SECRET_KEY_BASE']|default('TPGMvGK0iIwlXBQuQDA5KRqk77VETbEBlG4gAWeb93TvBsYAjvoAvdODMd6ZeguPwf2YTRY3n7uvxXzQP4WayQ==') }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
use_ssl = {
{% for key, value in chain_custom_environment.iteritems() %}
{{ key }}="{{ value['ECTO_USE_SSL']|default('false') }}"{% if not loop.last %},{% endif %}
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['ECTO_USE_SSL']|default('false') }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
alb_ssl_policy = {
{% for key, value in chain_custom_environment.iteritems() %}
{{ key }}="{{ value['ALB_SSL_POLICY']|default('') }}"{% if not loop.last %},{% endif %}
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['ALB_SSL_POLICY']|default('') }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
alb_certificate_arn = {
{% for key, value in chain_custom_environment.iteritems() %}
{{ key }}="{{ value['ALB_CERTIFICATE_ARN']|default('') }}"{% if not loop.last %},{% endif %}
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }}="{{ hostvars[host]['env_vars']['ALB_CERTIFICATE_ARN']|default('') }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
chains = [
{% for key,value in chain_custom_environment.iteritems() %}
"{{ key }}"{% if not loop.last %},{% endif %}
{% for host in groups[group_names[0]] %}
"{{ hostvars[host]['chain'] }}"{% if not loop.last %},{% endif %}
{% endfor %}
]
chain_db_id = {
{% for key, value in chain_db_id.iteritems() %}
{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %}
db_id = {
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_id'] }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
chain_db_name = {
{% for key, value in chain_db_name.iteritems() %}
{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %}
db_name = {
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_name'] }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
chain_db_username = {
{% for key, value in chain_db_username.iteritems() %}
{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %}
db_username = {
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_username'] }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
chain_db_password = {
{% for key, value in chain_db_password.iteritems() %}
{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %}
db_password = {
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_password'] }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
chain_db_instance_class = {
{% for key, value in chain_db_instance_class.iteritems() %}
{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %}
db_instance_class = {
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_instance_class'] }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
chain_db_storage = {
{% for key, value in chain_db_storage.iteritems() %}
{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %}
db_storage = {
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_storage'] }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
chain_db_storage_type = {
{% for key, value in chain_db_storage_type.iteritems() %}
{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %}
db_storage_type = {
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_storage_type'] }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
chain_db_iops = {
{% for key, value in db_iops.iteritems() %}
{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %}
db_iops = {
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_iops'] }}"{% if not loop.last %},{% endif %}
{% endfor %}
}
chain_db_version = {
{% for key, value in chain_db_version.iteritems() %}
{{ key }} = "{{ value }}"{% if not loop.last %},{% endif %}
db_version = {
{% for host in groups[group_names[0]] %}
{{ hostvars[host]['chain'] }} = "{{ hostvars[host]['db_version'] }}"{% if not loop.last %},{% endif %}
{% endfor %}
}

View File

@ -1,52 +1,51 @@
- name: Clone BlockScout
git:
repo: "{{ blockscout_repo }}"
dest: "blockscout-{{ chain }}"
version: "{{ chain_branch[chain] }}"
dest: "blockscout-{{ group_names[0] }}-{{ chain }}"
version: "{{ branch }}"
force: true
when: skip_fetch | bool != true
- name: Git clean
command: "git clean -fdx"
args:
chdir: "blockscout-{{ chain }}"
chdir: "blockscout-{{ group_names[0] }}-{{ chain }}"
when: skip_fetch | bool != true
- name: Merge branches
command: "git merge {{ chain_merge_commit[chain] }}"
command: "git merge {{ merge_commit_item }}"
args:
chdir: "blockscout-{{ chain }}"
when: skip_fetch | bool != true and chain_merge_commit_item != 'false'
chdir: "blockscout-{{ group_names[0] }}-{{ chain }}"
when: merge_commit_item and not skip_fetch | bool
vars:
chain_mc: "{{ chain_merge_commit | default({}) }}"
chain_merge_commit_item: "{{ chain_mc[chain] | default('false') }}"
merge_commit_item: "{{ merge_commit | default(false) }}"
- name: Copy web config files
copy:
src: "blockscout-{{ chain }}/apps/block_scout_web/config/dev.secret.exs.example"
dest: "blockscout-{{ chain }}/apps/block_scout_web/config/dev.secret.exs"
src: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs.example"
dest: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/config/dev.secret.exs"
- name: Template explorer config files
- name: Template explorer config files
template:
src: dev.secret.exs.j2
dest: "blockscout-{{ chain }}/apps/explorer/config/dev.secret.exs"
when: ps_db is defined
dest: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs"
when: ps_user is defined
- name: Copy default explorer config files
copy:
src: "blockscout-{{ chain }}/apps/explorer/config/dev.secret.exs.example"
dest: "blockscout-{{ chain }}/apps/explorer/config/dev.secret.exs"
when: ps_db is undefined or ps_db == ""
src: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs.example"
dest: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/config/dev.secret.exs"
when: ps_user is undefined or ps_user == ""
- name: Remove static assets from previous deployment, if any
file:
path: "blockscout-{{ chain }}/apps/block_scout_web/priv/static"
path: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/priv/static"
state: absent
- name: Compile BlockScout
command: "mix do {{ item }}"
args:
chdir: "blockscout-{{ chain }}"
chdir: "blockscout-{{ group_names[0] }}-{{ chain }}"
with_items:
- deps.get
- local.rebar --force
@ -59,117 +58,132 @@
- name: Install Node modules at apps/block_scout_web/assets
command: npm install
args:
chdir: "blockscout-{{ chain }}/apps/block_scout_web/assets"
chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets"
- name: Execute webpack.js at apps/block_scout_web/assets/node_modules/webpack/bin
command: node_modules/webpack/bin/webpack.js --mode production
args:
chdir: "blockscout-{{ chain }}/apps/block_scout_web/assets"
chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets"
- name: Instal Node modules at apps/explorer
command: npm install
args:
chdir: "blockscout-{{ chain }}/apps/explorer"
chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer"
- name: Install SSL certificates
command: mix phx.gen.cert blockscout blockscout.local
args:
chdir: "blockscout-{{ chain }}/apps/block_scout_web"
chdir: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web"
- name: Fetch environment variables (via access key)
set_fact:
chain_env: "{{ lookup('aws_ssm', path, aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, region=aws_region|default('us-east-1'), shortnames=true, bypath=true, recursive=true ) }}"
env: "{{ lookup('aws_ssm', path, aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, region=aws_region|default('us-east-1'), shortnames=true, bypath=true, recursive=true ) }}"
vars:
path: "/{{ prefix }}/{{ chain }}"
path: "/{{ group_names[0] }}/{{ chain }}"
when: aws_access_key is defined
- name: Fetch environment variables (via profile)
set_fact:
chain_env: "{{ lookup('aws_ssm', path, aws_profile=aws_profile, shortnames=true, bypath=true, recursive=true ) }}"
env_compiled: "{{ lookup('aws_ssm', path, aws_profile=aws_profile, shortnames=true, bypath=true, recursive=true ) }}"
vars:
path: "/{{ prefix }}/{{ chain }}"
path: "/{{ group_names[0] }}/{{ chain }}"
when: aws_access_key is undefined
- name: Make config variables lowercase
set_fact:
chain_lower_env: "{{ chain_lower_env | combine ({item.key|lower : item.value}) }}"
with_dict: "{{ chain_custom_environment_chain }}"
when: chain_custom_environment_chain|length > 0
lower_env: "{{ lower_env | combine ({item.key|lower : item.value}) }}"
with_dict: "{{ custom_environment_chain }}"
when: custom_environment_chain|length > 0
vars:
chain_lower_env: {}
chain_custom_environment_chain: "{{ chain_cec[chain] | default({}) if chain_cec[chain]>0 else {} }}"
chain_cec: "{{ chain_custom_environment | default ({}) }}"
lower_env: {}
custom_environment_chain: "{{ env_vars | default({}) if env_vars>0 else {} }}"
- name: Override env variables
set_fact:
chain_env: "{{ chain_env | combine(chain_lower_env) }}"
when: chain_lower_env is defined
env_compiled: "{{ env_compilated | combine(lower_env) }}"
when: lower_env is defined
- name: Uppercase chain
set_fact:
chain_upper_env: "{{ chain_upper_env | combine ({item.key|upper : item.value}) }}"
with_dict: "{{ chain_env }}"
upper_env: "{{ upper_env | combine ({item.key|upper : item.value}) }}"
with_dict: "{{ env_compiled }}"
vars:
chain_upper_env: {}
upper_env: {}
- name: Start server
block:
- set_fact:
server_port: "{{ 65535|random(seed=inventory_hostname,start=1024) }}"
- set_fact:
server_env: "{{ upper_env | combine({'NETWORK_PATH':'/','PORT':server_port,'MIX_ENV':'prod'}) }}"
- name: Start server
command: "mix phx.server"
environment: "{{ chain_upper_env | combine({'NETWORK_PATH':'/'}) }}"
ignore_errors: true
environment: "{{ server_env }}"
args:
chdir: "blockscout-{{ chain }}"
chdir: "blockscout-{{ group_names[0] }}-{{ chain }}"
async: 10000
poll: 0
- debug:
msg: "Please, open your browser at following addresses:"
run_once: true
- debug:
msg: "{{ ansible_host }}:{{ server_port }}"
- name: User prompt
pause:
prompt: "Please, open your browser and open 4000 port at the machine were Ansible is currently run. BlockScout should appear. Ensure that there is no visual artifacts and then press Enter to continue. Press Ctrl+C and then A if you face any issues to cancel the deployment."
rescue:
- name: 'Stop execution'
fail:
msg: "Execution aborted."
prompt: "BlockScout should appear. Ensure that there is no visual artifacts and then press Enter to continue. Press Ctrl+C and then A if you face any issues to cancel the deployment. Note: Localhost stands for the machine were Ansible is currently run."
run_once: true
register: prompt
always:
- name: kill server
command: "pkill -f {{ item }}"
with_items:
- beam.smp
- node
- erlang
failed_when: false
when:
- webpack.js
failed_when: false
- name: Check for execution interrupt
fail:
msg: "Execution aborted"
when: prompt is failed
- name: Build static assets
command: mix phx.digest
args:
chdir: "blockscout-{{ chain }}"
chdir: "blockscout-{{ group_names[0] }}-{{ chain }}"
- name: User prompt
pause:
prompt: "Would you like to remove staging dependencies? [Yes/No] Default: Yes"
register: user_answer
until: user_answer.user_input|lower != "false" and user_answer.user_input|lower != "no" and user_answer.user_input|lower != "true" and user_answer.user_input|lower != "yes"
retries: 10000
delay: 1
- name: Remove dev dependencies
file:
state: absent
path: "{{ item }}"
with_items:
- "blockscout-{{ chain }}/_build/"
- "blockscout-{{ chain }}/deps/"
- "blockscout-{{ chain }}/apps/block_scout_web/assets/node_modules/"
- "blockscout-{{ chain }}/apps/explorer/node_modules/"
- "blockscout-{{ chain }}/logs/dev/"
when: user_answer.user_input|lower != "false" and user_answer.user_input|lower != "no"
- "blockscout-{{ group_names[0] }}-{{ chain }}/_build/"
- "blockscout-{{ group_names[0] }}-{{ chain }}/deps/"
- "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/assets/node_modules/"
- "blockscout-{{ group_names[0] }}-{{ chain }}/apps/explorer/node_modules/"
- "blockscout-{{ group_names[0] }}-{{ chain }}/logs/dev/"
when: user_answer.user_input | lower | bool
- name: Fix bug with favicon
replace:
regexp: '\"favicon\.ico\"\:\"favicon-[a-z0-9]+?\.ico\"'
replace: '"images/favicon.ico":"favicon.ico"'
path: "blockscout-{{ chain }}/apps/block_scout_web/priv/static/cache_manifest.json"
path: "blockscout-{{ group_names[0] }}-{{ chain }}/apps/block_scout_web/priv/static/cache_manifest.json"
- name: Upload Blockscout to S3
command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ prefix }}-explorer --s3-location s3://{{ prefix }}-explorer-codedeploy-releases/blockscout-{{ chain }}.zip --source=blockscout-{{ chain }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}"
command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} aws deploy push --application-name={{ group_names[0] }}-explorer --s3-location s3://{{ group_names[0] }}-explorer-codedeploy-releases/blockscout-{{ group_names[0] }}-{{ chain }}.zip --source=blockscout-{{ group_names[0] }}-{{ chain }} {{ '--profile='~aws_profile if aws_profile is defined else '' }}"
register: push_output
- name: Upload output
@ -180,10 +194,13 @@
pause:
prompt: "Do you want to update the Parameter Store variables? [Yes/No] Default: Yes"
register: user_answer
until: user_answer.user_input | lower != "false" and user_answer.user_input | lower != "no" and user_answer.user_input | lower != "true" and user_answer.user_input | lower != "yes"
retries: 10000
delay: 1
- name: Update chain variables
aws_ssm_parameter_store:
name: "/{{ prefix }}/{{ chain }}/{{ item.key }}"
name: "/{{ group_names[0] }}/{{ chain }}/{{ item.key }}"
value: "{{ item.value }}"
profile: "{{ profile }}"
aws_access_key: "{{ access_key }}"
@ -194,15 +211,17 @@
secret_key: "{{ aws_secret_key|default(omit) }}"
profile: "{{ aws_profile|default(omit) }}"
region: "{{ aws_region|default(omit) }}"
with_dict: "{{ chain_lower_env }}"
when: user_answer.user_input|lower != "false" and user_answer.user_input|lower != "no"
with_dict: "{{ lower_env }}"
when: user_answer.user_input | lower | bool
- name: User prompt
pause:
prompt: "Do you want to deploy BlockScout? [Yes/No] Default: Yes"
register: user_answer
until: user_answer.user_input | lower != "false" and user_answer.user_input | lower != "no" and user_answer.user_input | lower != "true" and user_answer.user_input | lower != "yes"
retries: 10000
delay: 1
- name: Deploy Blockscout
command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ prefix }}-explorer-dg{{ index }} --deployment-config-name CodeDeployDefault.OneAtATime --description '{{ chain_upper_env['BLOCKSCOUT_VERSION'] }}' {{ '--profile='~aws_profile if aws_profile is defined else '' }}"
when: user_answer.user_input|lower != "false" and user_answer.user_input|lower != "no"
command: "{{ 'AWS_ACCESS_KEY='~aws_access_key~' AWS_SECRET_ACCESS_KEY='~aws_secret_key~' AWS_DEFAULT_REGION='~aws_region if aws_profile is undefined else '' }} {{ push_output.stdout_lines[1] }} --deployment-group-name {{ group_names[0] }}-explorer-dg{{ groups[group_names[0]].index(inventory_hostname) }} --deployment-config-name CodeDeployDefault.OneAtATime --description '{{ env_compiled['BLOCKSCOUT_VERSION'] }}' {{ '--profile='~aws_profile if aws_profile is defined else '' }}"
when: user_answer.user_input | lower | bool

View File

@ -1,6 +1,6 @@
- name: Create S3 bucket
aws_s3:
bucket: "{{ prefix }}-{{ bucket }}"
bucket: "{{ group_names[0] }}-{{ bucket }}"
mode: create
permission: private
profile: "{{ profile }}"
@ -15,11 +15,11 @@
- name: Apply tags and versioning to create S3 bucket
s3_bucket:
name: "{{ prefix }}-{{ bucket }}"
name: "{{ group_names[0] }}-{{ bucket }}"
versioning: yes
tags:
origin: terraform
prefix: "{{ prefix }}"
prefix: "{{ inventory_hostname }}"
profile: "{{ profile }}"
aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}"
@ -32,7 +32,7 @@
- name: Add lifecycle management policy to created S3 bucket
s3_lifecycle:
name: "{{ prefix }}-{{ bucket }}"
name: "{{ group_names[0] }}-{{ bucket }}"
rule_id: "expire"
noncurrent_version_expiration_days: 90
status: enabled

View File

@ -0,0 +1,38 @@
- name: Check if config file exists
stat:
path: "{{ playbook_dir }}/{{ file }}"
register: stat_result
- name: Copy temporary file to be uploaded
command: "cp {{ playbook_dir }}/{{ file }} {{ playbook_dir }}/{{ file }}.temp"
when: stat_result.stat.exists
- name: Remove insecure AWS variables
replace:
path: "{{ playbook_dir }}/{{ file }}.temp"
regexp: 'aws_.*'
replace: '<There was an insecure variable to keep at S3. Removed>'
when: stat_result.stat.exists
- name: Upload config to S3 bucket
aws_s3:
bucket: "{{ group_names[0] }}-{{ bucket }}"
object: all.yml
src: "{{ playbook_dir }}/{{ file }}.temp"
mode: put
profile: "{{ profile }}"
aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}"
region: "{{ region }}"
vars:
access_key: "{{ aws_access_key|default(omit) }}"
secret_key: "{{ aws_secret_key|default(omit) }}"
profile: "{{ aws_profile|default(omit) }}"
region: "{{ aws_region|default(omit) }}"
when: stat_result.stat.exists
- name: Remove temp file
file:
path: "{{ playbook_dir }}/{{ file }}.temp"
state: absent
when: stat_result.stat.exists

View File

@ -1,45 +1,8 @@
- name: Check if config file exists
stat:
path: "{{ playbook_dir }}/group_vars/all.yml"
register: stat_result
- name: Copy temporary file to be uploaded
command: "cp {{ playbook_dir }}/group_vars/all.yml {{ playbook_dir }}/group_vars/all.yml.temp"
when: stat_result.stat.exists == True
- name: Remove insecure AWS variables
replace:
path: "{{ playbook_dir }}/group_vars/all.yml.temp"
regexp: 'aws_.*'
replace: '<There was an aws-related insecure variable to keep at S3. Removed>'
when: stat_result.stat.exists == True
- name: Remove other insecure variables
replace:
path: "{{ playbook_dir }}/group_vars/all.yml.temp"
regexp: 'secret_.*'
replace: '<There was an insecure variable to keep at S3. Removed>'
when: stat_result.stat.exists == True
- name: Upload config to S3 bucket
aws_s3:
bucket: "{{ prefix }}-{{ bucket }}"
object: all.yml
src: "{{ playbook_dir }}/group_vars/all.yml.temp"
mode: put
profile: "{{ profile }}"
aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}"
region: "{{ region }}"
vars:
access_key: "{{ aws_access_key|default(omit) }}"
secret_key: "{{ aws_secret_key|default(omit) }}"
profile: "{{ aws_profile|default(omit) }}"
region: "{{ aws_region|default(omit) }}"
when: stat_result.stat.exists == True
- name: Remove temp file
file:
path: "{{ playbook_dir }}/group_vars/all.yml.temp"
state: absent
when: stat_result.stat.exists == True
- name: "Loop over config files"
include: subtasks.yml file={{item}}
with_items:
- "group_vars/all.yml"
- "group_vars/{{ group_names[0] }}"
- "group_vars/{{ group_names[0] }}.yml"
- "host_vars/{{ inventory_hostname }}.yml"
- "host_vars/{{ inventory_hostname }}"

View File

@ -5,7 +5,7 @@
- name: Upload logs to s3
aws_s3:
bucket: "{{ prefix }}-{{ bucket }}"
bucket: "{{ group_names[0] }}-{{ bucket }}"
object: log.txt
src: "{{ playbook_dir }}/log.txt"
mode: put