Code refactor: added ansible as the deployment hypervisor

This commit is contained in:
a@a.ru 2019-02-23 20:23:41 +03:00
parent 51f73ac618
commit c54925fc9e
52 changed files with 437 additions and 903 deletions

7
.gitignore vendored
View File

@ -4,8 +4,9 @@
/ignore.tfvars
# Terraform State
/.terraform
/terraform.tfstate.d
*.terraform*
*terraform.tfstate.d*
*tfplan*
# Sensitive information
/*.privkey
@ -13,4 +14,4 @@
# Stack-specific information
/PREFIX
/plans/*.planfile

View File

@ -1 +0,0 @@
../common/backend.tf

View File

@ -1 +0,0 @@
../setup/main.tf

View File

@ -1 +0,0 @@
../common/provider.tf

View File

@ -1 +0,0 @@
../common/variables.tf

539
bin/infra
View File

@ -1,539 +0,0 @@
#!/usr/bin/env bash
set -e
# Color support
function disable_color() {
IS_TTY=false
txtrst=
txtbld=
bldred=
bldgrn=
bldylw=
bldblu=
bldmag=
bldcyn=
}
IS_TTY=false
if [ -t 1 ]; then
if command -v tput >/dev/null; then
IS_TTY=true
fi
fi
if [ "$IS_TTY" = "true" ]; then
txtrst=$(tput sgr0 || echo '\e[0m') # Reset
txtbld=$(tput bold || echo '\e[1m') # Bold
bldred=${txtbld}$(tput setaf 1 || echo '\e[31m') # Red
bldgrn=${txtbld}$(tput setaf 2 || echo '\e[32m') # Green
bldylw=${txtbld}$(tput setaf 3 || echo '\e[33m') # Yellow
bldblu=${txtbld}$(tput setaf 4 || echo '\e[34m') # Blue
bldmag=${txtbld}$(tput setaf 5 || echo '\e[35m') # Magenta
bldcyn=${txtbld}$(tput setaf 8 || echo '\e[38m') # Cyan
else
disable_color
fi
# Logging
# Print the given message in cyan, but only when --verbose was passed
function debug() {
if [ ! -z "$VERBOSE" ]; then
printf '%s%s%s\n' "$bldcyn" "$1" "$txtrst"
fi
}
# Print the given message in blue
function info() {
printf '%s%s%s\n' "$bldblu" "$1" "$txtrst"
}
# Print the given message in magenta
function action() {
printf '%s%s%s\n' "$bldmag" "$1" "$txtrst"
}
# Print the given message in yellow
function warn() {
printf '%s%s%s\n' "$bldylw" "$1" "$txtrst"
}
# Like warn, but expects the message via redirect
function warnb() {
printf '%s' "$bldylw"
while read -r data; do
printf '%s\n' "$data"
done
printf '%s\n' "$txtrst"
}
# Print the given message in red
function error() {
printf '%s%s%s\n' "$bldred" "$1" "$txtrst"
exit 1
}
# Like error, but expects the message via redirect
function errorb() {
printf '%s' "$bldred"
while read -r data; do
printf '%s\n' "$data"
done
printf '%s\n' "$txtrst"
exit 1
}
# Print the given message in green
function success() {
printf '%s%s%s\n' "$bldgrn" "$1" "$txtrst"
}
# Print help if requested
function help() {
cat << EOF
POA Infrastructure Management Tool
Usage:
./infra [global options] <task> [task args]
This script will bootstrap required AWS resources, then generate infrastructure via Terraform.
Tasks:
help Show help
provision Run the provisioner to generate or modify POA infrastructure
destroy Tear down any provisioned resources and local state
resources List ARNs of any generated resources (* see docs for caveats)
Global Options:
-v | --verbose This will print out verbose execution information for debugging
-h | --help Print this help message
--dry-run Perform as many actions as possible without performing side-effects
--no-color Turn off color
--skip-approval Automatically accept any prompts for confirmation
--profile=<name> Use a specific AWS profile rather than the default
EOF
exit 2
}
# Verify tools
function check_prereqs() {
if ! which jq >/dev/null; then
warnb << EOF
This script requires that the 'jq' utility has been installed and can be found in $PATH
On macOS, with Homebrew, this is as simple as 'brew install jq'.
For installs on other platforms, see https://stedolan.github.io/jq/download/
EOF
exit 2
fi
if ! which aws >/dev/null; then
warnb << EOF
This script requires that the AWS CLI tool has been installed and can be found in $PATH
On macOS, with Homebrew, this is as simple as 'brew install awscli'.
For installs on other platforms, see https://docs.aws.amazon.com/cli/latest/userguide/installing.html
EOF
exit 2
fi
if ! which terraform >/dev/null; then
warnb << EOF
This script requires that the Terraform CLI be installed and available in PATH!
On macOS, with Homebrew, this is as simple as 'brew install terraform'.
For other platforms, see https://www.terraform.io/intro/getting-started/install.html
EOF
exit 2
fi
}
# Load a value which is present in one of the Terraform config
# files in the current directory, with precedence such that user-provided
# .tfvars are loaded after main.tfvars, allowing one to override those values
function get_config() {
EXTRA_VARS="$(find . -name '*.tfvars' -and \! \( -name 'backend.tfvars' \))"
if [ ! -z "$EXTRA_VARS" ]; then
# shellcheck disable=SC2086 disable=2002
cat $EXTRA_VARS | \
grep -E "^$1 " | \
tail -n 1 | \
sed -r -e 's/^[^=]*= //' -e 's/"//g'
fi
}
function destroy_bucket() {
bucket="$(grep 'bucket' backend.tfvars | sed -e 's/bucket = //' -e 's/"//g')"
read -r -p "Are you super sure you want to delete the Terraform state bucket and all versions? (y/n) "
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
exit 2
fi
# Delete all versions and delete markers first
info "Disabling bucket versioning for S3 bucket '$bucket'.."
aws s3api put-bucket-versioning --bucket="$bucket" --versioning-configuration="Status=Suspended"
info "Deleting old versions of S3 bucket '$bucket'.."
# shellcheck disable=SC1004
aws s3api list-object-versions --bucket="$bucket" |\
jq '.Versions[], .DeleteMarkers[] | "\"\(.Key)\" \"\(.VersionId)\""' --raw-output |\
awk -v bucket="$bucket" '{ \
print "aws s3api delete-object", \
"--bucket=\"" bucket "\"", \
"--key=\"" $1 "\"", \
"--version-id=\"" $2 "\"" \
| "/bin/sh >/dev/null"; \
print "Deleted version " $2 "of " $1 " successfully"; \
}'
# Finally, delete the bucket and all its contents
aws s3 rb --force "s3://$bucket"
}
function destroy_dynamo_table() {
table="$(grep 'dynamodb_table' backend.tfvars | sed -e 's/dynamodb_table = //' -e 's/"//g')"
aws dynamodb delete-table --table-name="$table"
}
function destroy_generated_files() {
rm -f ./backend.tfvars
rm -f ./main.tfvars
}
# Tear down all provisioned infra
function destroy() {
# shellcheck disable=SC2086
terraform plan -destroy -var-file=main.tfvars -out plans/destroy.planfile main
read -r -p "Are you sure you want to run this plan? (y/n) "
if [[ $REPLY =~ ^[yY]$ ]]; then
terraform apply plans/destroy.planfile
rm -f plans/destroy.planfile
else
exit 0
fi
read -r -p "Do you wish to destroy the Terraform state? (y/n) "
if [[ $REPLY =~ ^[yY]$ ]]; then
destroy_bucket
destroy_dynamo_table
rm -rf terraform.tfstate.d
rm -rf .terraform
else
exit 0
fi
read -r -p "Do you want to delete the generated config files? (y/n) "
if [[ $REPLY =~ ^[yY]$ ]]; then
destroy_generated_files
fi
success "All generated infrastructure successfully removed!"
}
# Provision infrastructure
function provision() {
# If INFRA_PREFIX has not been set yet, request it from user
if [ -z "$INFRA_PREFIX" ]; then
DEFAULT_INFRA_PREFIX=$(LC_ALL=C tr -dc 'a-z0-9' < /dev/urandom | fold -w 5 | head -n 1)
warnb << EOF
# Infrastructure Prefix
In order to ensure that provisioned resources are unique, this script uses a
unique prefix for all resource names and ids.
By default, a random 5 character alphanumeric string is generated for you, but
if you wish to provide your own, now is your chance. This value will be stored
in 'main.tfvars' so that you only need provide it once, but make sure you source
control the file.
EOF
read -r -p "What prefix should be used? (default is $DEFAULT_INFRA_PREFIX): "
INFRA_PREFIX="$REPLY"
if [ -z "$INFRA_PREFIX" ]; then
INFRA_PREFIX="$DEFAULT_INFRA_PREFIX"
fi
fi
if ! echo "$INFRA_PREFIX" | grep -E '^[a-z0-9]{3,5}$'; then
errorb << EOF
The prefix '$INFRA_PREFIX' is invalid!
It must consist only of the lowercase characters a-z and digits 0-9,
and must be between 3 and 5 characters long.
EOF
fi
# EC2 key pairs
if [ -z "$KEY_PAIR" ]; then
KEY_PAIR="$(get_config 'key_name')"
if [ -z "$KEY_PAIR" ]; then
read -r -p "Please provide the name of the key pair to use with EC2 hosts: "
KEY_PAIR="$REPLY"
if [ -z "$KEY_PAIR" ]; then
error "You must provide a valid key pair name!"
exit 2
fi
fi
fi
if ! aws ec2 describe-key-pairs --key-names="$KEY_PAIR" 2>/dev/null; then
if [ "$DRY_RUN" == "true" ]; then
action "DRY RUN: Would have created an EC2 key pair"
else
info "The key pair '$KEY_PAIR' does not exist, creating..."
if ! output=$(aws ec2 create-key-pair --key-name="$KEY_PAIR"); then
error "$output\\nFailed to generate key pair!"
fi
echo "$output" | jq '.KeyMaterial' --raw-output > "$KEY_PAIR.privkey"
success "Created keypair successfully! Private key has been saved to ./$KEY_PAIR.privkey"
fi
fi
if [ -z "$SECRET_KEY_BASE" ]; then
SECRET_KEY_BASE="$(get_config 'secret_key_base')"
if [ -z "$SECRET_KEY_BASE" ]; then
SECRET_KEY_BASE="$(openssl rand -base64 64 | tr -d '\n')"
fi
fi
# Save variables used by Terraform modules
if [ ! -f ./backend.tfvars ] && [ ! -f ./main.tfvars ]; then
# shellcheck disable=SC2154
region="$TF_VAR_region"
if [ -z "$region" ]; then
# Try to pull region from local config
if [ -f "$HOME/.aws/config" ]; then
if [ "$AWS_PROFILE" == "default" ]; then
region=$(awk '/\[default\]/{a=1;next}; /\[/{a=0}a' ~/.aws/config | grep 'region' | sed -e 's/region = //')
else
#shellcheck disable=SC1117
region=$(awk "/\[profile $AWS_PROFILE\]/{a=1;next}; /\[/{a=0}a" ~/.aws/config | grep 'region' | sed -e 's/region = //')
fi
fi
fi
if [ -z "$region" ]; then
read -r -p "What region should infrastructure be created in (us-east-2): "
if [ -z "$REPLY" ]; then
region='us-east-2'
else
region="$REPLY"
fi
fi
bucket="$(get_config 'bucket')"
if [ -z "$bucket" ]; then
bucket="poa-terraform-state"
fi
dynamo_table="$(get_config 'dynamodb_table')"
if [ -z "$dynamo_table" ]; then
dynamo_table="poa-terraform-locks"
fi
# Backend config only!
{
echo "region = \"$region\""
echo "bucket = \"${INFRA_PREFIX}-$bucket\""
echo "dynamodb_table = \"${INFRA_PREFIX}-$dynamo_table\""
echo "key = \"terraform.tfstate\""
} > ./backend.tfvars
# Other configuration needs to go in main.tfvars or init will break
{
echo "region = \"$region\""
echo "bucket = \"$bucket\""
echo "dynamodb_table = \"$dynamo_table\""
echo "key_name = \"$KEY_PAIR\""
echo "prefix = \"$INFRA_PREFIX\""
echo "secret_key_base = \"$SECRET_KEY_BASE\""
} > ./main.tfvars
fi
# No Terraform state yet, so this is a fresh run
if [ ! -d .terraform ]; then
terraform workspace new base setup
terraform workspace select base setup
# shellcheck disable=SC2086
terraform init -backend-config=backend.tfvars setup
# shellcheck disable=SC2086
terraform plan -out plans/setup.planfile setup
if [ "$DRY_RUN" == "false" ]; then
# No need to show the plan, it has already been displayed
SKIP_SETUP_PLAN="true"
fi
fi
workspace="$(terraform workspace show)"
# Setup hasn't completed yet, perhaps due to a dry run
if [ -f plans/setup.planfile ]; then
if [ -z "$SKIP_SETUP_PLAN" ]; then
# Regenerate setup plan if not fresh
# shellcheck disable=SC2086
terraform plan -out plans/setup.planfile setup
fi
# Wait for user approval if we're going to proceed
if [ "$SKIP_APPROVAL" == "false" ]; then
read -r -p "Take a moment to review the generated plan, and press ENTER to continue"
fi
if [ "$DRY_RUN" == "true" ]; then
action "DRY RUN: Would have executed Terraform plan for S3 backend as just shown"
warn "Unable to dry run further steps until S3 backend has been created!"
exit 0
fi
terraform apply plans/setup.planfile
rm plans/setup.planfile
# Migrate state to S3
# shellcheck disable=SC2086
terraform init -force-copy -backend-config=backend.tfvars base
fi
if [ "$workspace" == "base" ]; then
# Switch to main workspace
terraform workspace new main main
terraform workspace select main main
fi
# shellcheck disable=SC2086
terraform init -backend-config=backend.tfvars -var-file=main.tfvars main
# Generate the plan for the remaining infra
# shellcheck disable=SC2086
terraform plan -var-file=main.tfvars -out plans/main.planfile main
if [ "$SKIP_APPROVAL" == "false" ]; then
read -r -p "Take a moment to review the generated plan, and press ENTER to continue"
fi
if [ "$DRY_RUN" == "true" ]; then
action "DRY RUN: Would have executed the Terraform plan just shown"
fi
# Apply the plan to provision the remaining infra
terraform apply plans/main.planfile
rm plans/main.planfile
success "Infrastructure has been successfully provisioned!"
}
# Print all resource ARNs tagged with prefix=INFRA_PREFIX
function resources() {
if [ -z "$INFRA_PREFIX" ]; then
error "No prefix set, unable to locate tagged resources"
exit 1
fi
# Yes, stagging, blame Amazon
aws resourcegroupstaggingapi get-resources \
--no-paginate \
--tag-filters="Key=prefix,Values=$INFRA_PREFIX" | \
jq '.ResourceTagMappingList[].ResourceARN' --raw-output
}
# Provide test data for validation
function precheck() {
# Save variables used by Terraform modules
if [ ! -f ./ignore.tfvars ]; then
{
echo "bucket = \"poa-terraform-state\""
echo "dynamodb_table = \"poa-terraform-locks\""
echo "key = \"terraform.tfstate\""
echo "key_name = \"poa\""
echo "prefix = \"prefix\""
} > ./ignore.tfvars
fi
}
# Parse options for this script
VERBOSE=false
HELP=false
DRY_RUN=false
# Environment variables for Terraform
AWS_PROFILE="${AWS_PROFILE:-default}"
COMMAND=
while [ "$1" != "" ]; do
param=$(echo "$1" | sed -re 's/^([^=]*)=/\1/')
val=$(echo "$1" | sed -re 's/^([^=]*)=//')
case $param in
-h | --help)
HELP=true
;;
-v | --verbose)
VERBOSE=true
;;
--dry-run)
DRY_RUN=true
;;
--no-color)
disable_color
;;
--profile)
AWS_PROFILE="$val"
;;
--skip-approval)
SKIP_APPROVAL="true"
;;
--)
shift
break
;;
*)
COMMAND="$param"
shift
break
;;
esac
shift
done
# Turn on debug mode if --verbose was set
if [ "$VERBOSE" == "true" ]; then
set -x
fi
# Set working directory to the project root
cd "$(dirname "${BASH_SOURCE[0]}")/.."
# Export AWS_PROFILE if a non-default profile was chosen
if [ ! "$AWS_PROFILE" == "default" ]; then
export AWS_PROFILE
fi
# If cached prefix is in PREFIX file, then use it
if [ -z "$INFRA_PREFIX" ]; then
if ls ./*.tfvars >/dev/null; then
INFRA_PREFIX="$(get_config 'prefix')"
fi
fi
# Override command if --help or -h was passed
if [ "$HELP" == "true" ]; then
# If we ever want to show help for a specific command we'll need this
# HELP_COMMAND="$COMMAND"
COMMAND=help
fi
check_prereqs
case $COMMAND in
help)
help
;;
provision)
provision
;;
destroy)
destroy
;;
resources)
resources
;;
precheck)
precheck
;;
destroy_setup)
destroy_bucket
destroy_dynamo_table
;;
*)
error "Unknown task '$COMMAND'. Try 'help' to see valid tasks"
exit 1
esac
exit 0

View File

@ -1,3 +0,0 @@
terraform {
backend "s3" {}
}

View File

@ -1,16 +0,0 @@
variable "bucket" {
description = "The name of the S3 bucket which will hold Terraform state"
}
variable "dynamodb_table" {
description = "The name of the DynamoDB table which will hold Terraform locks"
}
variable "region" {
description = "The AWS region to use"
default = "us-east-2"
}
variable "prefix" {
description = "The prefix used to identify all resources generated with this plan"
}

12
deploy.yml Normal file
View File

@ -0,0 +1,12 @@
- name: Prepare infrastructure
hosts: localhost
roles:
- { role: check }
- { role: s3, when: "backend|bool == true" }
- { role: dynamodb, when: "backend|bool == true" }
- { role: ec2_key }
- { role: main_infra }
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_REGION: "us-east-1"

12
destroy.yml Normal file
View File

@ -0,0 +1,12 @@
- name: Destroy infrastructure
hosts: localhost
roles:
- { role: destroy, when: "confirmation|bool == True" }
vars_prompt:
- name: "confirmation"
prompt: "Are you sure you want to destroy all the infra?"
default: False
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_REGION: "us-east-1"

146
group_vars/all.yml Normal file
View File

@ -0,0 +1,146 @@
# Credentials to connect to AWS
aws_access_key: ""
aws_secret_key: ""
# Deployment-related variables
## If set to true backend will be uploaded and stored at S3 bucket, so you can easily manage your deployment from any machine. It is highly recommended to do not change this variable
backend: true
## If this is set to true along with backend variable, this config file will be saved to s3 bucket. Please, make sure to name it as all.yml. Otherwise, no upload will be performed
upload_config_to_s3: true
### The bucket and dynamodb_table variables will be used only when backend variable is set to true
### Name of the bucket where TF state files will be stored
bucket: "poa-terraform-state"
### Name of the DynamoDB table where current lease of TF state file will be stored
dynamodb_table: "poa-terraform-lock"
## If ec2_ssh_key_content is empty all the virtual machines will be created with ec2_ssh_key_name key. Otherwise, playbooks will upload ec2_ssh_key_content with the name of ec2_ssh_key_name and launch virtual machines with that key
ec2_ssh_key_name: "sokol-test"
ec2_ssh_key_content: ""
## EC2 Instance will have the following size:
instance_type: "m5.xlarge"
## VPC containing Blockscout resources will be created as following:
vpc_cidr: "10.0.0.0/16"
public_subnet_cidr: "10.0.0.0/24"
db_subnet_cidr: "10.0.1.0/24"
## Internal DNS zone will looks like:
dns_zone_name: "poa.internal"
## All resources will be prefixed with this one
prefix: "sokol"
## The following settings are related to the RDS instance which will be created during deployment
db_id: "poa"
db_name: "poa"
db_username: "username"
db_password: "qwerty12345"
db_instance_class: "db.m4.xlarge"
db_storage: "120"
db_storage_type: "gp2"
## The following settngs are related to SSL of Application Load Balancer that will be deployed to AWS. If use_ssl is set to false, alb_* variables can be omitted
use_ssl: "false"
alb_ssl_policy: "ELBSecurityPolicy-2016-08"
alb_certificate_arn: "arn:aws:acm:us-east-1:290379793816:certificate/6d1bab74-fb46-4244-aab2-832bf519ab24"
## Size of the EC2 instance EBS root volume
root_block_size: 120
## Number of connections allowed by EC2 instance
pool_size: 30
## Secret key of Explorer. Please, generate your own key here. For example, you can use the following command: openssl rand -base64 64 | tr -d '\n'
secret_key_base: "TPGMvGK0iIwlXBQuQDA5KRqk77VETbEBlG4gAWeb93TvBsYAjvoAvdODMd6ZeguPwf2YTRY3n7uvxXzQP4WayQ=="
## New Relic related configs. Usually you want this empty
new_relic_app_name: ""
new_relic_license_key: ""
# Network related variables
## The following set of chain_* variables has Terraform HCL syntax and should be filled accordingly. For example for core and sokol network you should fill this variables as follows:
## This variable represents network RPC endpoint:
# chains = {
# "core" = "http://10.10.10.10:8545",
# "sokol" = "https://192.168.0.1:8545"
# }
## This variable represents network RPC endpoint in trace mode. Can be the same as the previous variable:
# chain_trace_endpoint = {
# "core" = "http://10.10.10.11:8545",
# "sokol" = "http://192.168.0.1:8546"
# }
## This variable represents network RPC endpoint in websocket mode:
# chain_ws_endpoint = {
# "core" = "ws://10.10.10.10/ws",
# "sokol" = "ws://192.168.0.1/ws"
# }
## Next variable represents the client that is used to connect to the chain.
# chain_jsonrpc_variant = {
# "core" = "parity",
# "sokol" = "geth"
# }
## Place your own logo at apps/block_scout_web/assets/static/images folder of blockscout repo and specify a relative path here
# chain_logo = {
# "core" = "core.svg",
# "sokol" = "sokol.svg"
# }
## The following variables represents a name of the coin that will be shown at blockchain explorer
# chain_coin = {
# "core" = "POA",
# "sokol" = "POA"
# }
## Next variable usually represents the name of the organization/community that hosts the chain
# chain_network = {
# "core" = "POA Network",
# "sokol" = "POA Network"
# }
## Next variable represents the actual name of the particular network
# chain_subnetwork = {
# "core" = "POA Core Network",
# "sokol" = "POA Sokol test network"
# }
## The next variable represent a relative URL path which will be used as an endpoint for defined chain. For example, if we will have our blockscout at blockscout.com domain and place "core" network at "/poa/core", then the resulting endpoint will be blockscout.com/poa/core for this network.
# chain_network_path = {
# "core" = "/poa/core"
# "sokol" = "/poa/sokol"
# }
## The following variable maps the chain name to the network navigation icon at apps/block_scout_web/lib/block_scout_web/templates/icons without .eex extension
# chain_network_icon = {
# "core" = "_test_network_icon.html",
# "sokol" = "_test_network_icon.html"
# }
networks: >
chains = {
"mychain" = "url/to/endpoint"
}
chain_trace_endpoint = {
"mychain" = "url/to/debug/endpoint/or/the/main/chain/endpoint"
}
chain_ws_endpoint = {
"mychain" = "url/to/websocket/endpoint"
}
chain_jsonrpc_variant = {
"mychain" = "parity"
}
chain_logo = {
"mychain" = "url/to/logo"
}
chain_coin = {
"mychain" = "coin"
}
chain_network = {
"mychain" = "network name"
}
chain_subnetwork = {
"mychain" = "subnetwork name"
}
chain_network_path = {
"mychain" = "path/to/root"
}
chain_network_icon = {
"mychain" = "_test_network_icon.html"
}

View File

@ -1 +0,0 @@
../common/backend.tf

View File

@ -1 +0,0 @@
../common/variables.tf

View File

@ -1,50 +0,0 @@
module "backend" {
source = "../modules/backend"
bootstrap = "0"
bucket = "${var.bucket}"
dynamodb_table = "${var.dynamodb_table}"
prefix = "${var.prefix}"
}
module "stack" {
source = "../modules/stack"
prefix = "${var.prefix}"
region = "${var.region}"
key_name = "${var.key_name}"
chain_jsonrpc_variant = "${var.chain_jsonrpc_variant}"
chains = "${var.chains}"
chain_trace_endpoint = "${var.chain_trace_endpoint}"
chain_ws_endpoint = "${var.chain_ws_endpoint}"
chain_logo = "${var.chain_logo}"
chain_coin = "${var.chain_coin}"
chain_network = "${var.chain_network}"
chain_subnetwork = "${var.chain_subnetwork}"
chain_network_path = "${var.chain_network_path}"
chain_network_icon = "${var.chain_network_icon}"
vpc_cidr = "${var.vpc_cidr}"
public_subnet_cidr = "${var.public_subnet_cidr}"
instance_type = "${var.instance_type}"
root_block_size = "${var.root_block_size}"
pool_size = "${var.pool_size}"
db_subnet_cidr = "${var.db_subnet_cidr}"
dns_zone_name = "${var.dns_zone_name}"
db_id = "${var.db_id}"
db_name = "${var.db_name}"
db_username = "${var.db_username}"
db_password = "${var.db_password}"
db_storage = "${var.db_storage}"
db_storage_type = "${var.db_storage_type}"
db_instance_class = "${var.db_instance_class}"
secret_key_base = "${var.secret_key_base}"
new_relic_app_name = "${var.new_relic_app_name}"
new_relic_license_key = "${var.new_relic_license_key}"
alb_ssl_policy = "${var.alb_ssl_policy}"
alb_certificate_arn = "${var.alb_certificate_arn}"
use_ssl = "${var.use_ssl}"
}

View File

@ -1 +0,0 @@
../common/provider.tf

View File

@ -1,174 +0,0 @@
variable "key_name" {
description = "The name of the SSH key to use with EC2 hosts"
default = "poa"
}
variable "vpc_cidr" {
description = "Virtual Private Cloud CIDR block"
default = "10.0.0.0/16"
}
variable "public_subnet_cidr" {
description = "The CIDR block for the public subnet"
default = "10.0.0.0/24"
}
variable "db_subnet_cidr" {
description = "The CIDR block for the database subnet"
default = "10.0.1.0/16"
}
variable "dns_zone_name" {
description = "The internal DNS name"
default = "poa.internal"
}
variable "instance_type" {
description = "The EC2 instance type to use for app servers"
default = "m5.xlarge"
}
variable "root_block_size" {
description = "The EC2 instance root block size in GB"
default = 8
}
variable "pool_size" {
description = "The number of connections available to the RDS instance"
default = 30
}
variable "chains" {
description = "A map of chain names to urls"
default = {
"sokol" = "https://sokol-trace.poa.network"
}
}
variable "chain_trace_endpoint" {
description = "A map of chain names to RPC tracing endpoint"
default = {
"sokol" = "https://sokol-trace.poa.network"
}
}
variable "chain_ws_endpoint" {
description = "A map of chain names to Websocket RPC Endpoint"
default = {
"sokol" = "wss://sokol-ws.poa.network/ws"
}
}
variable "chain_jsonrpc_variant" {
description = "A map of chain names to JSON RPC variant"
default = {
"sokol" = "parity"
}
}
variable "chain_logo" {
description = "A map of chain names to logo url"
default = {
"sokol" = "/images/sokol_logo.svg"
}
}
variable "chain_coin" {
description = "A map of chain name to coin symbol"
default = {
"sokol" = "POA"
}
}
variable "chain_network" {
description = "A map of chain names to network name"
default = {
"sokol" = "POA Network"
}
}
variable "chain_subnetwork" {
description = "A map of chain names to subnetwork name"
default = {
"sokol" = "Sokol Testnet"
}
}
variable "chain_network_path" {
description = "A map of chain names to network name path"
default = {
"sokol" = "/poa/sokol"
}
}
variable "chain_network_icon" {
description = "A map of chain names to network navigation icon"
default = {
"sokol" = "_test_network_icon.html"
}
}
# RDS/Database configuration
variable "db_id" {
description = "The identifier for the RDS database"
default = "poa"
}
variable "db_name" {
description = "The name of the database associated with the application"
default = "poa"
}
variable "db_username" {
description = "The name of the user which will be used to connect to the database"
default = "poa"
}
variable "db_password" {
description = "The password associated with the database user"
}
variable "db_storage" {
description = "The database storage size in GB"
default = "100"
}
variable "db_storage_type" {
description = "The type of database storage to use: magnetic, gp2, io1"
default = "gp2"
}
variable "db_instance_class" {
description = "The instance class of the database"
default = "db.m4.large"
}
variable "secret_key_base" {
description = "The secret key base to use for Explorer"
}
variable "new_relic_app_name" {
description = "The name of the application in New Relic"
default = ""
}
variable "new_relic_license_key" {
description = "The license key for talking to New Relic"
default = ""
}
# SSL Certificate configuration
variable "alb_ssl_policy" {
description = "The SSL Policy for the Application Load Balancer"
default = "ELBSecurityPolicy-2016-08"
}
variable "alb_certificate_arn" {
description = "The Certificate ARN for the Applicationn Load Balancer Policy"
default = "arn:aws:acm:us-east-1:008312654217:certificate/ce6ec2cb-eba4-4b02-af1d-e77ce8813497"
}
variable "use_ssl" {
description = "Enable SSL"
default = "true"
}

View File

@ -1,45 +0,0 @@
# S3 bucket
resource "aws_s3_bucket" "terraform_state" {
count = "${var.bootstrap}"
bucket = "${var.prefix}-${var.bucket}"
acl = "private"
versioning {
enabled = true
}
lifecycle_rule {
id = "expire"
enabled = true
noncurrent_version_expiration {
days = 90
}
}
tags {
origin = "terraform"
prefix = "${var.prefix}"
}
}
# DynamoDB table
resource "aws_dynamodb_table" "terraform_statelock" {
count = "${var.bootstrap}"
name = "${var.prefix}-${var.dynamodb_table}"
read_capacity = 1
write_capacity = 1
hash_key = "LockID"
attribute {
name = "LockID"
type = "S"
}
tags {
origin = "terraform"
prefix = "${var.prefix}"
}
}

View File

@ -1,8 +0,0 @@
variable "bootstrap" {
description = "Whether we are bootstrapping the required infra or not"
default = 0
}
variable "bucket" {}
variable "dynamodb_table" {}
variable "prefix" {}

View File

@ -1,29 +0,0 @@
output "codedeploy_app" {
description = "The name of the CodeDeploy application"
value = "${aws_codedeploy_app.explorer.name}"
}
output "codedeploy_deployment_group_names" {
description = "The names of all the CodeDeploy deployment groups"
value = "${aws_codedeploy_deployment_group.explorer.*.deployment_group_name}"
}
output "codedeploy_bucket" {
description = "The name of the CodeDeploy S3 bucket for applciation revisions"
value = "${aws_s3_bucket.explorer_releases.id}"
}
output "codedeploy_bucket_path" {
description = "The path for releases in the CodeDeploy S3 bucket"
value = "/"
}
output "explorer_urls" {
description = "A map of each chain to the DNS name of its corresponding Explorer instance"
value = "${zipmap(keys(var.chains), aws_lb.explorer.*.dns_name)}"
}
output "db_instance_address" {
description = "The IP address of the RDS instance"
value = "${aws_db_instance.default.address}"
}

View File

View File

@ -0,0 +1,29 @@
- name: Check prefix
fail:
msg: "The prefix '{{ prefix }}' is invalid. It must consist only of the lowercase characters a-z and digits 0-9, and must be between 3 and 5 characters long."
when: prefix|length > 2 and prefix|length < 6 and prefix is match("^[a-z0-9]+$")
- name: Check if terraform is installed
command: which terraform
register: terraform_status
- name: Terraform check result
fail:
msg: "Terraform is not installed"
when: terraform_status.stdout == ""
- name: Check if python is installed
command: which python
register: python_status
- name: Python check result
fail:
msg: "Python either is not installed or is too old. Please install python version 2.6 or higher"
when: python_stats.stdout == "" or ansible_python_version.split(".")[0] * 100 + ansible_python_version.split(".")[1] * 10 + ansible_python_version.split(".")[2] < 260
- name: Check if all required modules is installed
command: "python -c 'import {{ item }}'"
with_items:
- boto
- boto3
- botocore

View File

@ -0,0 +1 @@
backend: true

Binary file not shown.

View File

@ -0,0 +1,41 @@
- name: Local or remote backend selector (remote)
template:
src: roles/main_infra/templates/remote-backend-selector.tf.j2
dest: roles/main_infra/files/remote-backend-selector.tf
when:
- backend|bool == true
- name: Local or remote backend selector (local)
file:
state: absent
dest: roles/main_infra/files/remote-backend-selector.tf
when:
- backend | default ('false') | bool != true
- name: Generating variables file
template:
src: roles/main_infra/templates/terraform.tfvars.j2
dest: roles/main_infra/files/terraform.tfvars
- name: Generating backend file
template:
src: roles/main_infra/templates/backend.tfvars.j2
dest: roles/main_infra/files/backend.tfvars
when: backend|bool == true
- name: Terraform destroy main infra
shell: "echo yes | {{ terraform_location }} {{ item }}"
args:
chdir: "roles/main_infra/files"
with_items:
- "init {{ '-backend-config=backend.tfvars' if backend|bool == true else '' }}"
- destroy
- name: Destroy S3 bucket
s3_bucket:
name: "{{ prefix }}-{{ bucket }}"
state: absent
- dynamodb_table:
name: "{{ prefix }}-{{ table }}"
state: absent

View File

@ -0,0 +1,2 @@
prefix: "sokol"
table: "dynamo"

View File

@ -0,0 +1,10 @@
- dynamodb_table:
name: "{{ prefix }}-{{ table }}"
region: us-east-1
hash_key_name: LockID
hash_key_type: STRING
read_capacity: 1
write_capacity: 1
tags:
origin: terraform
prefix: "{{ prefix }}"

View File

@ -0,0 +1,2 @@
ec2_ssh_key_name: "sokol-test"
ec2_ssh_key_content: ""

View File

@ -0,0 +1,6 @@
- name: Creating a keypair
ec2_key:
name: "{{ ec2_ssh_key_name }} "
key_material: "{{ ec2_ssh_key_content }}"
force: true
when: ec2_ssh_key_content != ""

View File

@ -0,0 +1,58 @@
region: "us-east-1"
prefix: "sokol"
ec2_ssh_key_name: "sokol-test"
vpc_cidr: "10.0.0.0/16"
public_subnet_cidr: "10.0.0.0/24"
db_subnet_cidr: "10.0.1.0/24"
dns_zone_name: "poa.internal"
instance_type: "m5.xlarge"
root_block_size: 8
pool_size: 30
db_id: "poa"
db_name: "poa"
db_username: "username"
db_password: "qwerty12345"
db_instance_class: "db.m4.xlarge"
db_storage: "120"
db_storage_type: "gp2"
alb_ssl_policy: "ELBSecurityPolicy-2016-08"
alb_certificate_arn: "arn:aws:acm:us-east-1:008312654217:certificate/ce6ec2cb-eba4-4b02-af1d-e77ce8813497"
new_relic_app_name: ""
new_relic_license_key: ""
secret_key_base: "TPGMvGK0iIwlXBQuQDA5KRqk77VETbEBlG4gAWeb93TvBsYAjvoAvdODMd6ZeguPwf2YTRY3n7uvxXzQP4WayQ=="
use_ssl: false
networks: >
chains = {
"mychain" = "url/to/endpoint"
}
chain_trace_endpoint = {
"mychain" = "url/to/debug/endpoint/or/the/main/chain/endpoint"
}
chain_ws_endpoint = {
"mychain" = "url/to/websocket/endpoint"
}
chain_jsonrpc_variant = {
"mychain" = "parity"
}
chain_logo = {
"mychain" = "url/to/logo"
}
chain_coin = {
"mychain" = "coin"
}
chain_network = {
"mychain" = "network name"
}
chain_subnetwork = {
"mychain" = "subnetwork name"
}
chain_network_path = {
"mychain" = "path/to/root"
}
chain_network_icon = {
"mychain" = "_test_network_icon.html"
}

View File

@ -6,12 +6,12 @@ To deploy a new version of the application manually:
1) Run the following command to upload the application to S3.
aws deploy push --application-name=${module.stack.codedeploy_app} --s3-location s3://${module.stack.codedeploy_bucket}/path/to/release.zip --source=path/to/repo
aws deploy push --application-name=${aws_codedeploy_app.explorer.name} --s3-location s3://${aws_s3_bucket.explorer_releases.id}/path/to/release.zip --source=path/to/repo
2) Follow the instructions in the output from the `aws deploy push` command
to deploy the uploaded application. Use the deployment group names shown below:
- ${join("\n - ", formatlist("%s", module.stack.codedeploy_deployment_group_names))}
- ${join("\n - ", formatlist("%s", aws_codedeploy_deployment_group.explorer.*.deployment_group_name))}
You will also need to specify a deployment config name. Example:
@ -25,11 +25,11 @@ To deploy a new version of the application manually:
4) Once the deployment is complete, you can access each chain explorer from its respective url:
- ${join("\n - ", formatlist("%s: %s", keys(module.stack.explorer_urls), values(module.stack.explorer_urls)))}
- ${join("\n - ", formatlist("%s: %s", keys(zipmap(keys(var.chains), aws_lb.explorer.*.dns_name)), values(zipmap(keys(var.chains), aws_lb.explorer.*.dns_name))))}
OUTPUT
}
output "db_instance_address" {
description = "The internal IP address of the RDS instance"
value = "${module.stack.db_instance_address}"
value = "${aws_db_instance.default.address}"
}

View File

@ -7,7 +7,7 @@ resource "aws_subnet" "default" {
map_public_ip_on_launch = true
tags {
name = "${var.prefix}-default-subnet"
Name = "${var.prefix}-default-subnet"
prefix = "${var.prefix}"
origin = "terraform"
}
@ -22,7 +22,7 @@ resource "aws_subnet" "alb" {
map_public_ip_on_launch = true
tags {
name = "${var.prefix}-default-subnet"
Name = "${var.prefix}-default-subnet"
prefix = "${var.prefix}"
origin = "terraform"
}
@ -37,7 +37,7 @@ resource "aws_subnet" "database" {
map_public_ip_on_launch = false
tags {
name = "${var.prefix}-database-subnet${count.index}"
Name = "${var.prefix}-database-subnet${count.index}"
prefix = "${var.prefix}"
origin = "terraform"
}

View File

@ -14,6 +14,7 @@ resource "aws_vpc" "vpc" {
enable_dns_support = true
tags {
Name = "${var.prefix}"
prefix = "${var.prefix}"
origin = "terraform"
}

View File

@ -0,0 +1,39 @@
- name: Local or remote backend selector (remote)
template:
src: remote-backend-selector.tf.j2
dest: roles/terraform/files/remote-backend-selector.tf
when:
- backend|bool == true
- name: Local or remote backend selector (local)
file:
state: absent
dest: roles/terraform/files/remote-backend-selector.tf
when:
- backend | default ('false') | bool != true
- name: Generating variables file
template:
src: terraform.tfvars.j2
dest: roles/terraform/files/terraform.tfvars
- name: Generating backend file
template:
src: backend.tfvars.j2
dest: roles/terraform/files/backend.tfvars
when: backend|bool == true
#Workaround since terraform module return unexpected error.
- name: Terraform provisioning
shell: "echo yes | {{ terraform_location }} {{ item }}"
register: output
args:
chdir: "roles/main_infra/files"
with_items:
- "init{{ ' -backend-config=backend.tfvars' if backend|bool == true else '' }}"
- plan
- apply
- name: Output info from Terraform
debug:
var: output

View File

@ -0,0 +1,4 @@
region = "{{ lookup('env','AWS_REGION') }}"
bucket = "{{ prefix }}-{{ bucket }}"
dynamodb_table = "{{ prefix }}-{{ dynamodb_table }}"
key = "terraform.tfstate"

View File

@ -0,0 +1,4 @@
terraform {
backend "s3" {
}
}

View File

@ -0,0 +1,28 @@
region = "{{ lookup('env','AWS_REGION') }}"
prefix = "{{ prefix }}"
key_name = "{{ ec2_ssh_key_name }}"
vpc_cidr = "{{ vpc_cidr }}"
public_subnet_cidr = "{{ public_subnet_cidr }}"
db_subnet_cidr = "{{ db_subnet_cidr }}"
dns_zone_name = "{{ dns_zone_name }}"
instance_type = "{{ instance_type }}"
root_block_size = "{{ root_block_size }}"
pool_size = "{{ pool_size }}"
db_id = "{{ db_id }}"
db_name = "{{ db_name }}"
db_username = "{{ db_username }}"
db_password = "{{ db_password }}"
db_instance_class = "{{ db_instance_class }}"
db_storage = "{{ db_storage }}"
db_storage_type = "{{ db_storage_type }}"
alb_ssl_policy = "{{ alb_ssl_policy }}"
alb_certificate_arn = "{{ alb_certificate_arn }}"
use_ssl = "{{ use_ssl }}"
new_relic_app_name = "{{ new_relic_app_name }}"
new_relic_license_key = "{{ new_relic_license_key }}"
secret_key_base = "{{ secret_key_base }}"
{{ networks }}

View File

@ -0,0 +1,2 @@
prefix: "sokol"
bucket: "bucket"

29
roles/s3/tasks/main.yml Normal file
View File

@ -0,0 +1,29 @@
- name: Create S3 bucket
s3_bucket:
name: "{{ prefix }}-{{ bucket }}"
versioning: yes
tags:
origin: terraform
prefix: "{{ prefix }}"
- name: Add lifecycle management policy to created S3 bucket
s3_lifecycle:
name: "{{ prefix }}-{{ bucket }}"
rule_id: "expire"
noncurrent_version_expiration_days: 90
status: enabled
state: present
- name: Check if config file exists
stat:
path: "{{ playbook_dir }}/group_vars/all.yml"
register: stat_result
when: upload_config_to_s3|bool == True
- name: Upload config to S3 bucket
s3:
bucket: "{{ prefix }}-{{ bucket }}"
object: localhost.yml
src: "{{ playbook_dir }}/group_vars/all.yml"
mode: put
when: stat_result.stat.exists == True and upload_config_to_s3|bool == True

View File

@ -1,8 +0,0 @@
module "backend" {
source = "../modules/backend"
bootstrap = "${terraform.workspace == "base" ? 1 : 0}"
bucket = "${var.bucket}"
dynamodb_table = "${var.dynamodb_table}"
prefix = "${var.prefix}"
}

View File

@ -1 +0,0 @@
../common/provider.tf

View File

@ -1 +0,0 @@
../common/variables.tf

View File

@ -1,12 +0,0 @@
region = "us-east-1"
bucket = "poa-terraform-state"
dynamodb_table = "poa-terraform-lock"
key_name = "sokol-test"
prefix = "sokol"
db_password = "qwerty12345"
db_instance_class = "db.m4.xlarge"
db_storage = "120"
alb_ssl_policy = "ELBSecurityPolicy-2016-08"
alb_certificate_arn = "arn:aws:acm:us-east-1:290379793816:certificate/6d1bab74-fb46-4244-aab2-832bf519ab24"
root_block_size = 120
pool_size = 30