Merge PR #1234: Cloud network setup scripts
* Remotenet command for AWS and ansible fixes for all remotenet commands * Ansible for AWS, terraform for AWS * Moved remotenet commands to networks/ folder, created ansible playbooks and example scripts to set up remote testnets in the cloud * Added application deployment infrastructure scripts * Obsoleted DigitalOcean scripts, some cleanup in AWS scripts * Changelog -> pending, disclaimer
This commit is contained in:
parent
671d8494b4
commit
75eeaadaae
25
Makefile
25
Makefile
|
@ -198,32 +198,11 @@ localnet-start: localnet-stop
|
||||||
localnet-stop:
|
localnet-stop:
|
||||||
docker-compose down
|
docker-compose down
|
||||||
|
|
||||||
########################################
|
|
||||||
### Remote validator nodes using terraform and ansible
|
|
||||||
|
|
||||||
TESTNET_NAME?=remotenet
|
|
||||||
SERVERS?=4
|
|
||||||
BINARY=$(CURDIR)/build/gaiad
|
|
||||||
remotenet-start:
|
|
||||||
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
|
||||||
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
|
||||||
@if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi
|
|
||||||
cd networks/remote/terraform && terraform init && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var TESTNET_NAME="$(TESTNET_NAME)" -var SERVERS="$(SERVERS)"
|
|
||||||
cd networks/remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(TESTNET_NAME)" -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" setup-validators.yml
|
|
||||||
cd networks/remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l "$(TESTNET_NAME)" start.yml
|
|
||||||
|
|
||||||
remotenet-stop:
|
|
||||||
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
|
||||||
cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa"
|
|
||||||
|
|
||||||
remotenet-status:
|
|
||||||
cd networks/remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l "$(TESTNET_NAME)" status.yml
|
|
||||||
|
|
||||||
# To avoid unintended conflicts with file names, always add to .PHONY
|
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||||
# unless there is a reason not to.
|
# unless there is a reason not to.
|
||||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||||
.PHONY: build build_cosmos-sdk-cli build_examples install install_examples install_cosmos-sdk-cli install_debug dist \
|
.PHONY: build build_cosmos-sdk-cli build_examples install install_examples install_cosmos-sdk-cli install_debug dist \
|
||||||
check_tools check_dev_tools get_tools get_dev_tools get_vendor_deps draw_deps test test_cli test_unit \
|
check_tools check_dev_tools get_tools get_dev_tools get_vendor_deps draw_deps test test_cli test_unit \
|
||||||
test_cover test_lint benchmark devdoc_init devdoc devdoc_save devdoc_update \
|
test_cover test_lint benchmark devdoc_init devdoc devdoc_save devdoc_update \
|
||||||
build-linux build-docker-gaiadnode localnet-start localnet-stop remotenet-start \
|
build-linux build-docker-gaiadnode localnet-start localnet-stop \
|
||||||
remotenet-stop remotenet-status format check-ledger test_sim update_tools update_dev_tools
|
format check-ledger test_sim update_tools update_dev_tools
|
||||||
|
|
|
@ -30,6 +30,7 @@ FEATURES
|
||||||
* [baseapp] Initialize validator set on ResponseInitChain
|
* [baseapp] Initialize validator set on ResponseInitChain
|
||||||
* [cosmos-sdk-cli] Added support for cosmos-sdk-cli tool under cosmos-sdk/cmd
|
* [cosmos-sdk-cli] Added support for cosmos-sdk-cli tool under cosmos-sdk/cmd
|
||||||
* This allows SDK users to initialize a new project repository.
|
* This allows SDK users to initialize a new project repository.
|
||||||
|
* [tests] Remotenet commands for AWS (awsnet)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
* [baseapp] Allow any alphanumeric character in route
|
* [baseapp] Allow any alphanumeric character in route
|
||||||
|
@ -40,6 +41,7 @@ IMPROVEMENTS
|
||||||
* [tests] Add tests to example apps in docs
|
* [tests] Add tests to example apps in docs
|
||||||
* [x/gov] Votes on a proposal can now be queried
|
* [x/gov] Votes on a proposal can now be queried
|
||||||
* [x/bank] Unit tests are now table-driven
|
* [x/bank] Unit tests are now table-driven
|
||||||
|
* [tests] Fixes ansible scripts to work with AWS too
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
* \#1666 Add intra-tx counter to the genesis validators
|
* \#1666 Add intra-tx counter to the genesis validators
|
||||||
|
|
|
@ -0,0 +1,125 @@
|
||||||
|
########################################
|
||||||
|
### These targets were broken out of the main Makefile to enable easy setup of testnets.
|
||||||
|
### They use a form of terraform + ansible to build full nodes in AWS.
|
||||||
|
### The shell scripts in this folder are example uses of the targets.
|
||||||
|
|
||||||
|
# Name of the testnet. Used in chain-id.
|
||||||
|
TESTNET_NAME?=remotenet
|
||||||
|
|
||||||
|
# Name of the servers grouped together for management purposes. Used in tagging the servers in the cloud.
|
||||||
|
CLUSTER_NAME?=$(TESTNET_NAME)
|
||||||
|
|
||||||
|
# Number of servers to put in one availability zone in AWS.
|
||||||
|
SERVERS?=1
|
||||||
|
|
||||||
|
# Number of regions to use in AWS. One region usually contains 2-3 availability zones.
|
||||||
|
REGION_LIMIT?=1
|
||||||
|
|
||||||
|
# Path to gaiad for deployment. Must be a Linux binary.
|
||||||
|
BINARY?=$(CURDIR)/../build/gaiad
|
||||||
|
|
||||||
|
# Path to the genesis.json and config.toml files to deploy on full nodes.
|
||||||
|
GENESISFILE?=$(CURDIR)/../build/genesis.json
|
||||||
|
CONFIGFILE?=$(CURDIR)/../build/config.toml
|
||||||
|
|
||||||
|
# Name of application for app deployments
|
||||||
|
APP_NAME ?= faucettestnet1
|
||||||
|
# Region to deploy VPC and application in AWS
|
||||||
|
REGION ?= us-east-2
|
||||||
|
|
||||||
|
all:
|
||||||
|
@echo "There is no all. Only sum of the ones."
|
||||||
|
|
||||||
|
disclaimer:
|
||||||
|
@echo "WARNING: These are example network configuration scripts only and have not undergone security review. They should not be used for production deployments."
|
||||||
|
|
||||||
|
########################################
|
||||||
|
### Extract genesis.json and config.toml from a node in a cluster
|
||||||
|
|
||||||
|
extract-config: disclaimer
|
||||||
|
#Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access.
|
||||||
|
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b -e TESTNET_NAME="$(TESTNET_NAME)" -e GENESISFILE="$(GENESISFILE)" -e CONFIGFILE="$(CONFIGFILE)" extract-config.yml
|
||||||
|
|
||||||
|
|
||||||
|
########################################
|
||||||
|
### Remote validator nodes using terraform and ansible in AWS
|
||||||
|
|
||||||
|
validators-start: disclaimer
|
||||||
|
#Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access.
|
||||||
|
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||||
|
@if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi
|
||||||
|
cd remote/terraform-aws && terraform init && (terraform workspace new "$(CLUSTER_NAME)" || terraform workspace select "$(CLUSTER_NAME)") && terraform apply -auto-approve -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var TESTNET_NAME="$(CLUSTER_NAME)" -var SERVERS="$(SERVERS)" -var REGION_LIMIT="$(REGION_LIMIT)"
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" setup-validators.yml
|
||||||
|
cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b start.yml
|
||||||
|
|
||||||
|
validators-stop: disclaimer
|
||||||
|
cd remote/terraform-aws && terraform workspace select "$(CLUSTER_NAME)" && terraform destroy -force -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" && terraform workspace select default && terraform workspace delete "$(CLUSTER_NAME)"
|
||||||
|
rm -rf remote/ansible/keys/ remote/ansible/files/
|
||||||
|
|
||||||
|
validators-status: disclaimer
|
||||||
|
cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" status.yml
|
||||||
|
|
||||||
|
#validators-clear:
|
||||||
|
# cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b clear-config.yml
|
||||||
|
|
||||||
|
|
||||||
|
########################################
|
||||||
|
### Remote full nodes using terraform and ansible in Amazon AWS
|
||||||
|
|
||||||
|
fullnodes-start: disclaimer
|
||||||
|
#Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access.
|
||||||
|
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||||
|
@if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi
|
||||||
|
cd remote/terraform-aws && terraform init && (terraform workspace new "$(CLUSTER_NAME)" || terraform workspace select "$(CLUSTER_NAME)") && terraform apply -auto-approve -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var TESTNET_NAME="$(CLUSTER_NAME)" -var SERVERS="$(SERVERS)" -var REGION_LIMIT="$(REGION_LIMIT)"
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" -e GENESISFILE="$(GENESISFILE)" -e CONFIGFILE="$(CONFIGFILE)" setup-fullnodes.yml
|
||||||
|
cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b start.yml
|
||||||
|
|
||||||
|
fullnodes-stop: disclaimer
|
||||||
|
cd remote/terraform-aws && terraform workspace select "$(CLUSTER_NAME)" && terraform destroy -force -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" && terraform workspace select default && terraform workspace delete "$(CLUSTER_NAME)"
|
||||||
|
rm -rf remote/ansible/keys/ remote/ansible/files/
|
||||||
|
|
||||||
|
fullnodes-status: disclaimer
|
||||||
|
cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" status.yml
|
||||||
|
|
||||||
|
########################################
|
||||||
|
### Other calls
|
||||||
|
|
||||||
|
upgrade-gaiad: disclaimer
|
||||||
|
#Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access.
|
||||||
|
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||||
|
@if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b -e BINARY=$(BINARY) upgrade-gaiad.yml
|
||||||
|
|
||||||
|
list:
|
||||||
|
remote/ansible/inventory/ec2.py | python -c 'import json,sys ; print "\n".join(json.loads("".join(sys.stdin.readlines()))["tag_Environment_$(CLUSTER_NAME)"])'
|
||||||
|
|
||||||
|
install-datadog: disclaimer
|
||||||
|
#Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access.
|
||||||
|
@if [ -z "$(DD_API_KEY)" ]; then echo "DD_API_KEY environment variable not set." ; false ; fi
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b -e DD_API_KEY="$(DD_API_KEY)" -e TESTNET_NAME="$(TESTNET_NAME)" -e CLUSTER_NAME="$(CLUSTER_NAME)" install-datadog-agent.yml
|
||||||
|
|
||||||
|
remove-datadog: disclaimer
|
||||||
|
#Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access.
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b remove-datadog-agent.yml
|
||||||
|
|
||||||
|
|
||||||
|
########################################
|
||||||
|
### Application infrastructure setup
|
||||||
|
|
||||||
|
app-start: disclaimer
|
||||||
|
#Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access.
|
||||||
|
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||||
|
@if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi
|
||||||
|
cd remote/terraform-app && terraform init && (terraform workspace new "$(APP_NAME)" || terraform workspace select "$(APP_NAME)") && terraform apply -auto-approve -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var APP_NAME="$(APP_NAME)" -var SERVERS="$(SERVERS)" -var REGION="$(REGION)"
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(APP_NAME)" -u centos -b -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" -e GENESISFILE="$(GENESISFILE)" -e CONFIGFILE="$(CONFIGFILE)" setup-fullnodes.yml
|
||||||
|
cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(APP_NAME)" -u centos -b start.yml
|
||||||
|
|
||||||
|
app-stop: disclaimer
|
||||||
|
cd remote/terraform-app && terraform workspace select "$(APP_NAME)" && terraform destroy -force -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var APP_NAME=$(APP_NAME) && terraform workspace select default && terraform workspace delete "$(APP_NAME)"
|
||||||
|
rm -rf remote/ansible/keys/ remote/ansible/files/
|
||||||
|
|
||||||
|
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||||
|
# unless there is a reason not to.
|
||||||
|
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||||
|
.PHONY: all extract-config validators-start validators-stop validators-status fullnodes-start fullnodes-stop fullnodes-status upgrade-gaiad list install-datadog remove-datadog app-start app-stop
|
|
@ -0,0 +1,101 @@
|
||||||
|
########################################
|
||||||
|
### WARNING: The DigitalOcean scripts are deprecated. They are still here because
|
||||||
|
### they might be useful for developers.
|
||||||
|
### Use -f to call this Makefile: "make -f Makefile.do target"
|
||||||
|
|
||||||
|
# Name of the testnet. Used in chain-id.
|
||||||
|
TESTNET_NAME?=remotenet
|
||||||
|
|
||||||
|
# Name of the servers grouped together for management purposes. Used in tagging the servers in the cloud.
|
||||||
|
CLUSTER_NAME?=$(TESTNET_NAME)
|
||||||
|
|
||||||
|
# Number of servers deployed in Digital Ocean.
|
||||||
|
# Number of servers to put in one availability zone in AWS.
|
||||||
|
SERVERS?=1
|
||||||
|
|
||||||
|
# Path to gaiad for deployment. Must be a Linux binary.
|
||||||
|
BINARY?=$(CURDIR)/../build/gaiad
|
||||||
|
|
||||||
|
# Path to the genesis.json and config.toml files to deploy on full nodes.
|
||||||
|
GENESISFILE?=$(CURDIR)/../build/genesis.json
|
||||||
|
CONFIGFILE?=$(CURDIR)/../build/config.toml
|
||||||
|
|
||||||
|
all:
|
||||||
|
@echo "There is no all. Only sum of the ones."
|
||||||
|
|
||||||
|
|
||||||
|
########################################
|
||||||
|
### Extract genesis.json and config.toml from a node in a cluster
|
||||||
|
|
||||||
|
extract-config:
|
||||||
|
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||||
|
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -e TESTNET_NAME="$(TESTNET_NAME)" -e GENESISFILE="$(GENESISFILE)" -e CONFIGFILE="$(CONFIGFILE)" extract-config.yml
|
||||||
|
|
||||||
|
|
||||||
|
########################################
|
||||||
|
### Remote validator nodes using terraform and ansible in Digital Ocean
|
||||||
|
|
||||||
|
validators-start:
|
||||||
|
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||||
|
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||||
|
@if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi
|
||||||
|
cd remote/terraform-do && terraform init && (terraform workspace new "$(CLUSTER_NAME)" || terraform workspace select "$(CLUSTER_NAME)") && terraform apply -auto-approve -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var TESTNET_NAME="$(CLUSTER_NAME)" -var SERVERS="$(SERVERS)"
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -u root -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" setup-validators.yml
|
||||||
|
cd remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -u root start.yml
|
||||||
|
|
||||||
|
validators-stop:
|
||||||
|
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||||
|
cd remote/terraform-do && terraform workspace select "$(CLUSTER_NAME)" && terraform destroy -force -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" && terraform workspace select default && terraform workspace delete "$(CLUSTER_NAME)"
|
||||||
|
rm -rf remote/ansible/keys/
|
||||||
|
|
||||||
|
validators-status:
|
||||||
|
cd remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" status.yml
|
||||||
|
|
||||||
|
|
||||||
|
########################################
|
||||||
|
### Remote full nodes using terraform and ansible in Digital Ocean
|
||||||
|
|
||||||
|
fullnodes-start:
|
||||||
|
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||||
|
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||||
|
@if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi
|
||||||
|
cd remote/terraform-do && terraform init && (terraform workspace new "$(CLUSTER_NAME)" || terraform workspace select "$(CLUSTER_NAME)") && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var TESTNET_NAME="$(CLUSTER_NAME)" -var SERVERS="$(SERVERS)"
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" -e GENESISFILE="$(GENESISFILE)" -e CONFIGFILE="$(CONFIGFILE)" setup-fullnodes.yml
|
||||||
|
cd remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -u root start.yml
|
||||||
|
|
||||||
|
fullnodes-stop:
|
||||||
|
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||||
|
cd remote/terraform-do && terraform workspace select "$(CLUSTER_NAME)" && terraform destroy -force -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" && terraform workspace select default && terraform workspace delete "$(CLUSTER_NAME)"
|
||||||
|
|
||||||
|
fullnodes-status:
|
||||||
|
cd remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" status.yml
|
||||||
|
|
||||||
|
|
||||||
|
########################################
|
||||||
|
### Other calls
|
||||||
|
|
||||||
|
upgrade-gaiad:
|
||||||
|
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||||
|
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||||
|
@if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -e BINARY=$(BINARY) upgrade-gaiad.yml
|
||||||
|
|
||||||
|
list:
|
||||||
|
remote/ansible/inventory/digital_ocean.py | python -c 'import json,sys ; print "\n".join(json.loads("".join(sys.stdin.readlines()))["$(CLUSTER_NAME)"]["hosts"])'
|
||||||
|
|
||||||
|
install-datadog:
|
||||||
|
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||||
|
@if [ -z "$(DD_API_KEY)" ]; then echo "DD_API_KEY environment variable not set." ; false ; fi
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -u root -e DD_API_KEY="$(DD_API_KEY)" -e TESTNET_NAME=$(TESTNET_NAME) -e CLUSTER_NAME=$(CLUSTER_NAME) install-datadog-agent.yml
|
||||||
|
|
||||||
|
remove-datadog:
|
||||||
|
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||||
|
cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -u root remove-datadog-agent.yml
|
||||||
|
|
||||||
|
|
||||||
|
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||||
|
# unless there is a reason not to.
|
||||||
|
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||||
|
.PHONY: all extract-config validators-start validators-stop validators-status fullnodes-start fullnodes-stop fullnodes-status upgrade-gaiad list-do install-datadog remove-datadog
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
Terraform & Ansible
|
Terraform & Ansible
|
||||||
===================
|
===================
|
||||||
|
|
||||||
|
WARNING: The Digital Ocean scripts are obsolete. They are here because they might still be useful for developers.
|
||||||
|
|
||||||
Automated deployments are done using `Terraform <https://www.terraform.io/>`__ to create servers on Digital Ocean then
|
Automated deployments are done using `Terraform <https://www.terraform.io/>`__ to create servers on Digital Ocean then
|
||||||
`Ansible <http://www.ansible.com/>`__ to create and manage testnets on those servers.
|
`Ansible <http://www.ansible.com/>`__ to create and manage testnets on those servers.
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
Terraform & Ansible
|
||||||
|
===================
|
||||||
|
|
||||||
|
Automated deployments are done using `Terraform <https://www.terraform.io/>`__ to create servers on AWS then
|
||||||
|
`Ansible <http://www.ansible.com/>`__ to create and manage testnets on those servers.
|
||||||
|
|
||||||
|
Prerequisites
|
||||||
|
-------------
|
||||||
|
|
||||||
|
- Install `Terraform <https://www.terraform.io/downloads.html>`__ and `Ansible <http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html>`__ on a Linux machine.
|
||||||
|
- Create an `AWS API token <https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html>`__ with EC2 create capability.
|
||||||
|
- Create SSH keys
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
export AWS_ACCESS_KEY_ID="2345234jk2lh4234"
|
||||||
|
export AWS_SECRET_ACCESS_KEY="234jhkg234h52kh4g5khg34"
|
||||||
|
export TESTNET_NAME="remotenet"
|
||||||
|
export CLUSTER_NAME= "remotenetvalidators"
|
||||||
|
export SSH_PRIVATE_FILE="$HOME/.ssh/id_rsa"
|
||||||
|
export SSH_PUBLIC_FILE="$HOME/.ssh/id_rsa.pub"
|
||||||
|
|
||||||
|
These will be used by both ``terraform`` and ``ansible``.
|
||||||
|
|
||||||
|
Create a remote network
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
SERVERS=1 REGION_LIMIT=1 make validators-start
|
||||||
|
|
||||||
|
|
||||||
|
The testnet name is what's going to be used in --chain-id, while the cluster name is the administrative tag in AWS for the servers. The code will create SERVERS amount of servers in each availability zone up to the number of REGION_LIMITs, starting at us-east-2. (us-east-1 is excluded.) The below BaSH script does the same, but sometimes it's more comfortable for input.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
./new-testnet.sh "$TESTNET_NAME" "$CLUSTER_NAME" 1 1
|
||||||
|
|
||||||
|
|
||||||
|
Quickly see the /status endpoint
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
make validators-status
|
||||||
|
|
||||||
|
|
||||||
|
Delete servers
|
||||||
|
--------------
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
make validators-stop
|
||||||
|
|
||||||
|
Logging
|
||||||
|
-------
|
||||||
|
|
||||||
|
You can ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on `this page <https://app.logz.io/#/dashboard/data-sources/Filebeat>`__, then:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
yum install systemd-devel || echo "This will only work on RHEL-based systems."
|
||||||
|
apt-get install libsystemd-dev || echo "This will only work on Debian-based systems."
|
||||||
|
|
||||||
|
go get github.com/mheese/journalbeat
|
||||||
|
ansible-playbook -i inventory/digital_ocean.py -l remotenet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345
|
||||||
|
|
||||||
|
|
||||||
|
Monitoring
|
||||||
|
----------
|
||||||
|
|
||||||
|
You can install DataDog agent using
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
make datadog-install
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# add-cluster - example make call to add a set of nodes to an existing testnet in AWS
|
||||||
|
# WARNING: Run it from the current directory - it uses relative paths to ship the binary and the genesis.json,config.toml files
|
||||||
|
|
||||||
|
if [ $# -ne 4 ]; then
|
||||||
|
echo "Usage: ./add-cluster.sh <testnetname> <clustername> <regionlimit> <numberofnodesperavailabilityzone>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
# The testnet name is the same on all nodes
|
||||||
|
export TESTNET_NAME=$1
|
||||||
|
export CLUSTER_NAME=$2
|
||||||
|
export REGION_LIMIT=$3
|
||||||
|
export SERVERS=$4
|
||||||
|
|
||||||
|
# Build the AWS full nodes
|
||||||
|
rm -rf remote/ansible/keys
|
||||||
|
make fullnodes-start
|
||||||
|
|
||||||
|
# Save the private key seed words from the nodes
|
||||||
|
SEEDFOLDER="${TESTNET_NAME}-${CLUSTER_NAME}-seedwords"
|
||||||
|
mkdir -p "${SEEDFOLDER}"
|
||||||
|
test ! -f "${SEEDFOLDER}/node0" && mv remote/ansible/keys/* "${SEEDFOLDER}"
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# add-datadog - add datadog agent to a set of nodes
|
||||||
|
|
||||||
|
if [ $# -ne 2 ]; then
|
||||||
|
echo "Usage: ./add-datadog.sh <testnetname> <clustername>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
export TESTNET_NAME=$1
|
||||||
|
export CLUSTER_NAME=$2
|
||||||
|
|
||||||
|
make install-datadog
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# del-cluster - example make call to delete a set of nodes on an existing testnet in AWS
|
||||||
|
|
||||||
|
if [ $# -ne 1 ]; then
|
||||||
|
echo "Usage: ./add-cluster.sh <clustername>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
export CLUSTER_NAME=$1
|
||||||
|
|
||||||
|
# Delete the AWS nodes
|
||||||
|
make fullnodes-stop
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# del-datadog - aremove datadog agent from a set of nodes
|
||||||
|
|
||||||
|
if [ $# -ne 1 ]; then
|
||||||
|
echo "Usage: ./del-datadog.sh <clustername>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
export CLUSTER_NAME=$1
|
||||||
|
|
||||||
|
make remove-datadog
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# list - list the IPs of a set of nodes
|
||||||
|
|
||||||
|
if [ $# -ne 1 ]; then
|
||||||
|
echo "Usage: ./list.sh <clustername>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
export CLUSTER_NAME=$1
|
||||||
|
|
||||||
|
make list
|
||||||
|
|
|
@ -0,0 +1,30 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# new-testnet - example make call to create a new set of validator nodes in AWS
|
||||||
|
# WARNING: Run it from the current directory - it uses relative paths to ship the binary
|
||||||
|
|
||||||
|
if [ $# -ne 4 ]; then
|
||||||
|
echo "Usage: ./new-testnet.sh <testnetname> <clustername> <regionlimit> <numberofnodesperavailabilityzone>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
if [ -z "`file ../build/gaiad | grep 'ELF 64-bit'`" ]; then
|
||||||
|
# Build the linux binary we're going to ship to the nodes
|
||||||
|
make -C .. build-linux
|
||||||
|
fi
|
||||||
|
|
||||||
|
# The testnet name is the same on all nodes
|
||||||
|
export TESTNET_NAME=$1
|
||||||
|
export CLUSTER_NAME=$2
|
||||||
|
export REGION_LIMIT=$3
|
||||||
|
export SERVERS=$4
|
||||||
|
|
||||||
|
# Build the AWS validator nodes and extract the genesis.json and config.toml from one of them
|
||||||
|
rm -rf remote/ansible/keys
|
||||||
|
make validators-start extract-config
|
||||||
|
|
||||||
|
# Save the private key seed words from the validators
|
||||||
|
SEEDFOLDER="${TESTNET_NAME}-${CLUSTER_NAME}-seedwords"
|
||||||
|
mkdir -p "${SEEDFOLDER}"
|
||||||
|
test ! -f "${SEEDFOLDER}/node0" && mv remote/ansible/keys/* "${SEEDFOLDER}"
|
||||||
|
|
|
@ -1,2 +1,3 @@
|
||||||
*.retry
|
*.retry
|
||||||
files/*
|
files/*
|
||||||
|
keys/*
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
user: root
|
|
||||||
any_errors_fatal: true
|
any_errors_fatal: true
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
roles:
|
roles:
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
|
any_errors_fatal: true
|
||||||
|
gather_facts: no
|
||||||
|
roles:
|
||||||
|
- extract-config
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
#DD_API_KEY,TESTNET_NAME,CLUSTER_NAME required
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
|
any_errors_fatal: true
|
||||||
|
gather_facts: no
|
||||||
|
roles:
|
||||||
|
- install-datadog-agent
|
||||||
|
|
|
@ -0,0 +1,209 @@
|
||||||
|
# Ansible EC2 external inventory script settings
|
||||||
|
#
|
||||||
|
|
||||||
|
[ec2]
|
||||||
|
|
||||||
|
# to talk to a private eucalyptus instance uncomment these lines
|
||||||
|
# and edit edit eucalyptus_host to be the host name of your cloud controller
|
||||||
|
#eucalyptus = True
|
||||||
|
#eucalyptus_host = clc.cloud.domain.org
|
||||||
|
|
||||||
|
# AWS regions to make calls to. Set this to 'all' to make request to all regions
|
||||||
|
# in AWS and merge the results together. Alternatively, set this to a comma
|
||||||
|
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not
|
||||||
|
# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or
|
||||||
|
# AWS_DEFAULT_REGION environment variable will be read to determine the region.
|
||||||
|
regions = all
|
||||||
|
regions_exclude = us-gov-west-1, cn-north-1
|
||||||
|
|
||||||
|
# When generating inventory, Ansible needs to know how to address a server.
|
||||||
|
# Each EC2 instance has a lot of variables associated with it. Here is the list:
|
||||||
|
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
|
||||||
|
# Below are 2 variables that are used as the address of a server:
|
||||||
|
# - destination_variable
|
||||||
|
# - vpc_destination_variable
|
||||||
|
|
||||||
|
# This is the normal destination variable to use. If you are running Ansible
|
||||||
|
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
|
||||||
|
# running Ansible from within EC2, then perhaps you want to use the internal
|
||||||
|
# address, and should set this to 'private_dns_name'. The key of an EC2 tag
|
||||||
|
# may optionally be used; however the boto instance variables hold precedence
|
||||||
|
# in the event of a collision.
|
||||||
|
destination_variable = public_dns_name
|
||||||
|
|
||||||
|
# This allows you to override the inventory_name with an ec2 variable, instead
|
||||||
|
# of using the destination_variable above. Addressing (aka ansible_ssh_host)
|
||||||
|
# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
|
||||||
|
#hostname_variable = tag_Name
|
||||||
|
|
||||||
|
# For server inside a VPC, using DNS names may not make sense. When an instance
|
||||||
|
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
|
||||||
|
# this to 'ip_address' will return the public IP address. For instances in a
|
||||||
|
# private subnet, this should be set to 'private_ip_address', and Ansible must
|
||||||
|
# be run from within EC2. The key of an EC2 tag may optionally be used; however
|
||||||
|
# the boto instance variables hold precedence in the event of a collision.
|
||||||
|
# WARNING: - instances that are in the private vpc, _without_ public ip address
|
||||||
|
# will not be listed in the inventory until You set:
|
||||||
|
# vpc_destination_variable = private_ip_address
|
||||||
|
vpc_destination_variable = ip_address
|
||||||
|
|
||||||
|
# The following two settings allow flexible ansible host naming based on a
|
||||||
|
# python format string and a comma-separated list of ec2 tags. Note that:
|
||||||
|
#
|
||||||
|
# 1) If the tags referenced are not present for some instances, empty strings
|
||||||
|
# will be substituted in the format string.
|
||||||
|
# 2) This overrides both destination_variable and vpc_destination_variable.
|
||||||
|
#
|
||||||
|
#destination_format = {0}.{1}.example.com
|
||||||
|
#destination_format_tags = Name,environment
|
||||||
|
|
||||||
|
# To tag instances on EC2 with the resource records that point to them from
|
||||||
|
# Route53, set 'route53' to True.
|
||||||
|
route53 = False
|
||||||
|
|
||||||
|
# To use Route53 records as the inventory hostnames, uncomment and set
|
||||||
|
# to equal the domain name you wish to use. You must also have 'route53' (above)
|
||||||
|
# set to True.
|
||||||
|
# route53_hostnames = .example.com
|
||||||
|
|
||||||
|
# To exclude RDS instances from the inventory, uncomment and set to False.
|
||||||
|
#rds = False
|
||||||
|
|
||||||
|
# To exclude ElastiCache instances from the inventory, uncomment and set to False.
|
||||||
|
#elasticache = False
|
||||||
|
|
||||||
|
# Additionally, you can specify the list of zones to exclude looking up in
|
||||||
|
# 'route53_excluded_zones' as a comma-separated list.
|
||||||
|
# route53_excluded_zones = samplezone1.com, samplezone2.com
|
||||||
|
|
||||||
|
# By default, only EC2 instances in the 'running' state are returned. Set
|
||||||
|
# 'all_instances' to True to return all instances regardless of state.
|
||||||
|
all_instances = False
|
||||||
|
|
||||||
|
# By default, only EC2 instances in the 'running' state are returned. Specify
|
||||||
|
# EC2 instance states to return as a comma-separated list. This
|
||||||
|
# option is overridden when 'all_instances' is True.
|
||||||
|
# instance_states = pending, running, shutting-down, terminated, stopping, stopped
|
||||||
|
|
||||||
|
# By default, only RDS instances in the 'available' state are returned. Set
|
||||||
|
# 'all_rds_instances' to True return all RDS instances regardless of state.
|
||||||
|
all_rds_instances = False
|
||||||
|
|
||||||
|
# Include RDS cluster information (Aurora etc.)
|
||||||
|
include_rds_clusters = False
|
||||||
|
|
||||||
|
# By default, only ElastiCache clusters and nodes in the 'available' state
|
||||||
|
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
|
||||||
|
# to True return all ElastiCache clusters and nodes, regardless of state.
|
||||||
|
#
|
||||||
|
# Note that all_elasticache_nodes only applies to listed clusters. That means
|
||||||
|
# if you set all_elastic_clusters to false, no node will be return from
|
||||||
|
# unavailable clusters, regardless of the state and to what you set for
|
||||||
|
# all_elasticache_nodes.
|
||||||
|
all_elasticache_replication_groups = False
|
||||||
|
all_elasticache_clusters = False
|
||||||
|
all_elasticache_nodes = False
|
||||||
|
|
||||||
|
# API calls to EC2 are slow. For this reason, we cache the results of an API
|
||||||
|
# call. Set this to the path you want cache files to be written to. Two files
|
||||||
|
# will be written to this directory:
|
||||||
|
# - ansible-ec2.cache
|
||||||
|
# - ansible-ec2.index
|
||||||
|
cache_path = ~/.ansible/tmp
|
||||||
|
|
||||||
|
# The number of seconds a cache file is considered valid. After this many
|
||||||
|
# seconds, a new API call will be made, and the cache file will be updated.
|
||||||
|
# To disable the cache, set this value to 0
|
||||||
|
cache_max_age = 300
|
||||||
|
|
||||||
|
# Organize groups into a nested/hierarchy instead of a flat namespace.
|
||||||
|
nested_groups = False
|
||||||
|
|
||||||
|
# Replace - tags when creating groups to avoid issues with ansible
|
||||||
|
replace_dash_in_groups = True
|
||||||
|
|
||||||
|
# If set to true, any tag of the form "a,b,c" is expanded into a list
|
||||||
|
# and the results are used to create additional tag_* inventory groups.
|
||||||
|
expand_csv_tags = False
|
||||||
|
|
||||||
|
# The EC2 inventory output can become very large. To manage its size,
|
||||||
|
# configure which groups should be created.
|
||||||
|
group_by_instance_id = True
|
||||||
|
group_by_region = True
|
||||||
|
group_by_availability_zone = True
|
||||||
|
group_by_aws_account = False
|
||||||
|
group_by_ami_id = True
|
||||||
|
group_by_instance_type = True
|
||||||
|
group_by_instance_state = False
|
||||||
|
group_by_key_pair = True
|
||||||
|
group_by_vpc_id = True
|
||||||
|
group_by_security_group = True
|
||||||
|
group_by_tag_keys = True
|
||||||
|
group_by_tag_none = True
|
||||||
|
group_by_route53_names = True
|
||||||
|
group_by_rds_engine = True
|
||||||
|
group_by_rds_parameter_group = True
|
||||||
|
group_by_elasticache_engine = True
|
||||||
|
group_by_elasticache_cluster = True
|
||||||
|
group_by_elasticache_parameter_group = True
|
||||||
|
group_by_elasticache_replication_group = True
|
||||||
|
|
||||||
|
# If you only want to include hosts that match a certain regular expression
|
||||||
|
# pattern_include = staging-*
|
||||||
|
|
||||||
|
# If you want to exclude any hosts that match a certain regular expression
|
||||||
|
# pattern_exclude = staging-*
|
||||||
|
|
||||||
|
# Instance filters can be used to control which instances are retrieved for
|
||||||
|
# inventory. For the full list of possible filters, please read the EC2 API
|
||||||
|
# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
|
||||||
|
# Filters are key/value pairs separated by '=', to list multiple filters use
|
||||||
|
# a list separated by commas. See examples below.
|
||||||
|
|
||||||
|
# If you want to apply multiple filters simultaneously, set stack_filters to
|
||||||
|
# True. Default behaviour is to combine the results of all filters. Stacking
|
||||||
|
# allows the use of multiple conditions to filter down, for example by
|
||||||
|
# environment and type of host.
|
||||||
|
stack_filters = False
|
||||||
|
|
||||||
|
# Retrieve only instances with (key=value) env=staging tag
|
||||||
|
# instance_filters = tag:env=staging
|
||||||
|
|
||||||
|
# Retrieve only instances with role=webservers OR role=dbservers tag
|
||||||
|
# instance_filters = tag:role=webservers,tag:role=dbservers
|
||||||
|
|
||||||
|
# Retrieve only t1.micro instances OR instances with tag env=staging
|
||||||
|
# instance_filters = instance-type=t1.micro,tag:env=staging
|
||||||
|
|
||||||
|
# You can use wildcards in filter values also. Below will list instances which
|
||||||
|
# tag Name value matches webservers1*
|
||||||
|
# (ex. webservers15, webservers1a, webservers123 etc)
|
||||||
|
# instance_filters = tag:Name=webservers1*
|
||||||
|
|
||||||
|
# An IAM role can be assumed, so all requests are run as that role.
|
||||||
|
# This can be useful for connecting across different accounts, or to limit user
|
||||||
|
# access
|
||||||
|
# iam_role = role-arn
|
||||||
|
|
||||||
|
# A boto configuration profile may be used to separate out credentials
|
||||||
|
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
|
||||||
|
# boto_profile = some-boto-profile-name
|
||||||
|
|
||||||
|
|
||||||
|
[credentials]
|
||||||
|
|
||||||
|
# The AWS credentials can optionally be specified here. Credentials specified
|
||||||
|
# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
|
||||||
|
# AWS_PROFILE is set, or if the boto_profile property above is set.
|
||||||
|
#
|
||||||
|
# Supplying AWS credentials here is not recommended, as it introduces
|
||||||
|
# non-trivial security concerns. When going down this route, please make sure
|
||||||
|
# to set access permissions for this file correctly, e.g. handle it the same
|
||||||
|
# way as you would a private SSH key.
|
||||||
|
#
|
||||||
|
# Unlike the boto and AWS configure files, this section does not support
|
||||||
|
# profiles.
|
||||||
|
#
|
||||||
|
# aws_access_key_id = AXXXXXXXXXXXXXX
|
||||||
|
# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
|
||||||
|
# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
File diff suppressed because it is too large
Load Diff
|
@ -3,7 +3,6 @@
|
||||||
#Note: You need to add LOGZIO_TOKEN variable with your API key. Like this: ansible-playbook -e LOGZIO_TOKEN=ABCXYZ123456
|
#Note: You need to add LOGZIO_TOKEN variable with your API key. Like this: ansible-playbook -e LOGZIO_TOKEN=ABCXYZ123456
|
||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
user: root
|
|
||||||
any_errors_fatal: true
|
any_errors_fatal: true
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
vars:
|
vars:
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
|
any_errors_fatal: true
|
||||||
|
gather_facts: no
|
||||||
|
roles:
|
||||||
|
- remove-datadog-agent
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
TESTNET_NAME: remotenet
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: Fetch genesis.json
|
||||||
|
fetch: "src=/home/gaiad/.gaiad/config/genesis.json dest={{GENESISFILE}} flat=yes"
|
||||||
|
run_once: yes
|
||||||
|
become: yes
|
||||||
|
become_user: gaiad
|
||||||
|
|
||||||
|
- name: Fetch config.toml
|
||||||
|
fetch: "src=/home/gaiad/.gaiad/config/config.toml dest={{CONFIGFILE}} flat=yes"
|
||||||
|
run_once: yes
|
||||||
|
become: yes
|
||||||
|
become_user: gaiad
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: restart datadog-agent
|
||||||
|
service: name=datadog-agent state=restarted
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: Remove old datadog.yaml, if exist
|
||||||
|
file: path=/etc/datadog-agent/datadog.yaml state=absent
|
||||||
|
notify: restart datadog-agent
|
||||||
|
|
||||||
|
- name: Download DataDog agent script
|
||||||
|
get_url: url=https://raw.githubusercontent.com/DataDog/datadog-agent/master/cmd/agent/install_script.sh dest=/tmp/datadog-agent-install.sh mode=0755
|
||||||
|
|
||||||
|
- name: Install DataDog agent
|
||||||
|
command: "/tmp/datadog-agent-install.sh"
|
||||||
|
environment:
|
||||||
|
DD_API_KEY: "{{DD_API_KEY}}"
|
||||||
|
DD_HOST_TAGS: "testnet:{{TESTNET_NAME}},cluster:{{CLUSTER_NAME}}"
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: Stop datadog service
|
||||||
|
failed_when: false
|
||||||
|
service: name=datadog-agent state=stopped
|
||||||
|
|
||||||
|
- name: Uninstall datadg-agent
|
||||||
|
yum: name=datadog-agent state=absent
|
||||||
|
|
||||||
|
- name: Remove datadog-agent folder
|
||||||
|
file: path=/etc/datadog-agent state=absent
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
TESTNET_NAME: remotenet
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: Ensure keys folder exists locally
|
||||||
|
file: path=keys state=directory
|
||||||
|
connection: local
|
||||||
|
run_once: true
|
||||||
|
become: no
|
||||||
|
|
||||||
|
- name: Copy binary
|
||||||
|
copy:
|
||||||
|
src: "{{BINARY}}"
|
||||||
|
dest: /usr/bin
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Get node ID
|
||||||
|
command: "cat /etc/gaiad-nodeid"
|
||||||
|
changed_when: false
|
||||||
|
register: nodeid
|
||||||
|
|
||||||
|
- name: gaiad init
|
||||||
|
command: "/usr/bin/gaiad init --chain-id={{TESTNET_NAME}} --name=fullnode{{nodeid.stdout_lines[0]}}"
|
||||||
|
become: yes
|
||||||
|
become_user: gaiad
|
||||||
|
register: initresult
|
||||||
|
args:
|
||||||
|
creates: /home/gaiad/.gaiad/config
|
||||||
|
|
||||||
|
- name: Get wallet word seed from result of initial transaction locally
|
||||||
|
when: initresult["changed"]
|
||||||
|
shell: "echo '{{initresult.stdout}}' | python -c 'import json,sys ; print json.loads(\"\".join(sys.stdin.readlines()))[\"app_message\"][\"secret\"]'"
|
||||||
|
changed_when: false
|
||||||
|
register: walletkey
|
||||||
|
connection: local
|
||||||
|
|
||||||
|
- name: Write wallet word seed to local files
|
||||||
|
when: initresult["changed"]
|
||||||
|
copy: "content={{walletkey.stdout}} dest=keys/node{{nodeid.stdout_lines[0]}}"
|
||||||
|
become: no
|
||||||
|
connection: local
|
||||||
|
|
||||||
|
- name: Copy genesis file
|
||||||
|
copy:
|
||||||
|
src: "{{GENESISFILE}}"
|
||||||
|
dest: /home/gaiad/.gaiad/config/genesis.json
|
||||||
|
become: yes
|
||||||
|
become_user: gaiad
|
||||||
|
|
||||||
|
- name: Copy config.toml file
|
||||||
|
copy:
|
||||||
|
src: "{{CONFIGFILE}}"
|
||||||
|
dest: /home/gaiad/.gaiad/config/config.toml
|
||||||
|
become: yes
|
||||||
|
become_user: gaiad
|
||||||
|
|
|
@ -1,5 +1,11 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
|
- name: Ensure keys folder exists locally
|
||||||
|
file: path=keys state=directory
|
||||||
|
connection: local
|
||||||
|
run_once: true
|
||||||
|
become: no
|
||||||
|
|
||||||
- name: Copy binary
|
- name: Copy binary
|
||||||
copy:
|
copy:
|
||||||
src: "{{BINARY}}"
|
src: "{{BINARY}}"
|
||||||
|
@ -12,12 +18,26 @@
|
||||||
register: nodeid
|
register: nodeid
|
||||||
|
|
||||||
- name: Create initial transaction
|
- name: Create initial transaction
|
||||||
command: "/usr/bin/gaiad init gen-tx --name=node{{nodeid.stdout_lines[0]}}"
|
command: "/usr/bin/gaiad init gen-tx --name=node{{nodeid.stdout_lines[0]}} --ip={{inventory_hostname}}"
|
||||||
|
register: gentxresult
|
||||||
become: yes
|
become: yes
|
||||||
become_user: gaiad
|
become_user: gaiad
|
||||||
args:
|
args:
|
||||||
creates: /home/gaiad/.gaiad/config/gentx
|
creates: /home/gaiad/.gaiad/config/gentx
|
||||||
|
|
||||||
|
- name: Get wallet word seed from result of initial transaction locally
|
||||||
|
when: gentxresult["changed"]
|
||||||
|
shell: "echo '{{gentxresult.stdout}}' | python -c 'import json,sys ; print json.loads(\"\".join(sys.stdin.readlines()))[\"app_message\"][\"secret\"]'"
|
||||||
|
changed_when: false
|
||||||
|
register: walletkey
|
||||||
|
connection: local
|
||||||
|
|
||||||
|
- name: Write wallet word seed to local files
|
||||||
|
when: gentxresult["changed"]
|
||||||
|
copy: "content={{walletkey.stdout}} dest=keys/node{{nodeid.stdout_lines[0]}}"
|
||||||
|
become: no
|
||||||
|
connection: local
|
||||||
|
|
||||||
- name: Find gentx file
|
- name: Find gentx file
|
||||||
command: "ls /home/gaiad/.gaiad/config/gentx"
|
command: "ls /home/gaiad/.gaiad/config/gentx"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
@ -28,18 +48,19 @@
|
||||||
connection: local
|
connection: local
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
|
||||||
- name: Get gen-tx
|
- name: Get gen-tx file
|
||||||
fetch:
|
fetch:
|
||||||
dest: files/
|
dest: files/
|
||||||
src: "/home/gaiad/.gaiad/config/gentx/{{gentxfile.stdout_lines[0]}}"
|
src: "/home/gaiad/.gaiad/config/gentx/{{gentxfile.stdout_lines[0]}}"
|
||||||
flat: yes
|
flat: yes
|
||||||
|
|
||||||
- name: Copy generated transactions to all nodes
|
- name: Compress gathered gen-tx files locally
|
||||||
copy:
|
archive: path=files/ exclude_path=files/gen-tx.tgz dest=files/gen-tx.tgz
|
||||||
src: files/
|
run_once: yes
|
||||||
dest: /home/gaiad/.gaiad/config/gentx/
|
connection: local
|
||||||
become: yes
|
|
||||||
become_user: gaiad
|
- name: Unpack gen-tx archive
|
||||||
|
unarchive: src=files/gen-tx.tgz dest=/home/gaiad/.gaiad/config/gentx owner=gaiad
|
||||||
|
|
||||||
- name: Generate genesis.json
|
- name: Generate genesis.json
|
||||||
command: "/usr/bin/gaiad init --gen-txs --name=node{{nodeid.stdout_lines[0]}} --chain-id={{TESTNET_NAME}}"
|
command: "/usr/bin/gaiad init --gen-txs --name=node{{nodeid.stdout_lines[0]}} --chain-id={{TESTNET_NAME}}"
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: restart gaiad
|
||||||
|
service: name=gaiad state=restarted
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: Copy binary
|
||||||
|
copy:
|
||||||
|
src: "{{BINARY}}"
|
||||||
|
dest: /usr/bin
|
||||||
|
mode: 0755
|
||||||
|
notify: restart gaiad
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
#GENESISFILE required
|
||||||
|
#CONFIGFILE required
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
|
any_errors_fatal: true
|
||||||
|
gather_facts: no
|
||||||
|
roles:
|
||||||
|
- setup-fullnodes
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
user: root
|
|
||||||
any_errors_fatal: true
|
any_errors_fatal: true
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
roles:
|
roles:
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
user: root
|
|
||||||
any_errors_fatal: true
|
any_errors_fatal: true
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
vars:
|
vars:
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
user: root
|
|
||||||
any_errors_fatal: true
|
any_errors_fatal: true
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
vars:
|
vars:
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
|
any_errors_fatal: true
|
||||||
|
gather_facts: no
|
||||||
|
roles:
|
||||||
|
- upgrade-gaiad
|
||||||
|
|
|
@ -2,3 +2,4 @@
|
||||||
terraform.tfstate
|
terraform.tfstate
|
||||||
terraform.tfstate.backup
|
terraform.tfstate.backup
|
||||||
terraform.tfstate.d
|
terraform.tfstate.d
|
||||||
|
.terraform.tfstate.lock.info
|
|
@ -0,0 +1,16 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# Script to initialize a testnet settings on a server
|
||||||
|
|
||||||
|
#Usage: terraform.sh <testnet_name> <testnet_node_number>
|
||||||
|
|
||||||
|
#Add gaiad node number for remote identification
|
||||||
|
echo "$2" > /etc/gaiad-nodeid
|
||||||
|
|
||||||
|
#Create gaiad user
|
||||||
|
useradd -m -s /bin/bash gaiad
|
||||||
|
|
||||||
|
#Reload services to enable the gaiad service (note that the gaiad binary is not available yet)
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable gaiad
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
# This is the reason why we can't separate nodes and load balancer creation into different modules.
|
||||||
|
# https://github.com/hashicorp/terraform/issues/10857
|
||||||
|
# In short: the list of instances coming from the nodes module is a generated variable
|
||||||
|
# and it should be the input for the load-balancer generation. However when attaching the instances
|
||||||
|
# to the load-balancer, aws_lb_target_group_attachment.count cannot be a generated value.
|
||||||
|
|
||||||
|
#Instance Attachment (autoscaling is the future)
|
||||||
|
resource "aws_lb_target_group_attachment" "lb_attach" {
|
||||||
|
count = "${var.SERVERS*length(data.aws_availability_zones.zones.names)}"
|
||||||
|
target_group_arn = "${aws_lb_target_group.lb_target_group.arn}"
|
||||||
|
target_id = "${element(aws_instance.node.*.id,count.index)}"
|
||||||
|
port = 80
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
resource "aws_key_pair" "key" {
|
||||||
|
key_name = "${var.name}"
|
||||||
|
public_key = "${file(var.ssh_public_file)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "aws_ami" "linux" {
|
||||||
|
most_recent = true
|
||||||
|
filter {
|
||||||
|
name = "name"
|
||||||
|
values = ["${var.image_name}"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_instance" "node" {
|
||||||
|
# depends_on = ["${element(aws_route_table_association.route_table_association.*,count.index)}"]
|
||||||
|
count = "${var.SERVERS*length(data.aws_availability_zones.zones.names)}"
|
||||||
|
ami = "${data.aws_ami.linux.image_id}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
key_name = "${aws_key_pair.key.key_name}"
|
||||||
|
associate_public_ip_address = true
|
||||||
|
vpc_security_group_ids = [ "${aws_security_group.secgroup.id}" ]
|
||||||
|
subnet_id = "${element(aws_subnet.subnet.*.id,count.index)}"
|
||||||
|
availability_zone = "${element(data.aws_availability_zones.zones.names,count.index)}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Environment = "${var.name}"
|
||||||
|
Name = "${var.name}-${element(data.aws_availability_zones.zones.names,count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_tags {
|
||||||
|
Environment = "${var.name}"
|
||||||
|
Name = "${var.name}-${element(data.aws_availability_zones.zones.names,count.index)}-VOLUME"
|
||||||
|
}
|
||||||
|
|
||||||
|
root_block_device {
|
||||||
|
volume_size = 20
|
||||||
|
}
|
||||||
|
|
||||||
|
connection {
|
||||||
|
user = "centos"
|
||||||
|
private_key = "${file(var.ssh_private_file)}"
|
||||||
|
timeout = "600s"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
source = "files/terraform.sh"
|
||||||
|
destination = "/tmp/terraform.sh"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
source = "files/gaiad.service"
|
||||||
|
destination = "/tmp/gaiad.service"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"sudo cp /tmp/gaiad.service /etc/systemd/system/gaiad.service",
|
||||||
|
"chmod +x /tmp/terraform.sh",
|
||||||
|
"sudo /tmp/terraform.sh ${var.name} ${count.index}",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
resource "aws_lb" "lb" {
|
||||||
|
name = "${var.name}"
|
||||||
|
subnets = ["${aws_subnet.subnet.*.id}"]
|
||||||
|
# security_groups = ["${split(",", var.lb_security_groups)}"]
|
||||||
|
tags {
|
||||||
|
Name = "${var.name}"
|
||||||
|
}
|
||||||
|
# access_logs {
|
||||||
|
# bucket = "${var.s3_bucket}"
|
||||||
|
# prefix = "ELB-logs"
|
||||||
|
# }
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_lb_listener" "lb_listener" {
|
||||||
|
load_balancer_arn = "${aws_lb.lb.arn}"
|
||||||
|
port = "80"
|
||||||
|
protocol = "HTTP"
|
||||||
|
|
||||||
|
default_action {
|
||||||
|
target_group_arn = "${aws_lb_target_group.lb_target_group.arn}"
|
||||||
|
type = "forward"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_lb_listener_rule" "listener_rule" {
|
||||||
|
# depends_on = ["aws_lb_target_group.lb_target_group"]
|
||||||
|
listener_arn = "${aws_lb_listener.lb_listener.arn}"
|
||||||
|
priority = "100"
|
||||||
|
action {
|
||||||
|
type = "forward"
|
||||||
|
target_group_arn = "${aws_lb_target_group.lb_target_group.id}"
|
||||||
|
}
|
||||||
|
condition {
|
||||||
|
field = "path-pattern"
|
||||||
|
values = ["/"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_lb_target_group" "lb_target_group" {
|
||||||
|
name = "${var.name}"
|
||||||
|
port = "80"
|
||||||
|
protocol = "HTTP"
|
||||||
|
vpc_id = "${aws_vpc.vpc.id}"
|
||||||
|
tags {
|
||||||
|
name = "${var.name}"
|
||||||
|
}
|
||||||
|
# stickiness {
|
||||||
|
# type = "lb_cookie"
|
||||||
|
# cookie_duration = 1800
|
||||||
|
# enabled = "true"
|
||||||
|
# }
|
||||||
|
# health_check {
|
||||||
|
# healthy_threshold = 3
|
||||||
|
# unhealthy_threshold = 10
|
||||||
|
# timeout = 5
|
||||||
|
# interval = 10
|
||||||
|
# path = "${var.target_group_path}"
|
||||||
|
# port = "${var.target_group_port}"
|
||||||
|
# }
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
// The cluster name
|
||||||
|
output "name" {
|
||||||
|
value = "${var.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
// The list of cluster instance IDs
|
||||||
|
output "instances" {
|
||||||
|
value = ["${aws_instance.node.*.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "instances_count" {
|
||||||
|
value = "${length(aws_instance.node.*)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
// The list of cluster instance public IPs
|
||||||
|
output "public_ips" {
|
||||||
|
value = ["${aws_instance.node.*.public_ip}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the ALB
|
||||||
|
output "lb_name" {
|
||||||
|
value = "${aws_lb.lb.dns_name}"
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
variable "name" {
|
||||||
|
description = "The testnet name, e.g cdn"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "image_name" {
|
||||||
|
description = "Image name"
|
||||||
|
default = "CentOS Linux 7 x86_64 HVM EBS 1704_01"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_type" {
|
||||||
|
description = "The instance size to use"
|
||||||
|
default = "t2.small"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "SERVERS" {
|
||||||
|
description = "Number of servers in an availability zone"
|
||||||
|
default = "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_private_file" {
|
||||||
|
description = "SSH private key file to be used to connect to the nodes"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_public_file" {
|
||||||
|
description = "SSH public key file to be used on the nodes"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
resource "aws_vpc" "vpc" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "${var.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_internet_gateway" "internet_gateway" {
|
||||||
|
vpc_id = "${aws_vpc.vpc.id}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "${var.name}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table" "route_table" {
|
||||||
|
vpc_id = "${aws_vpc.vpc.id}"
|
||||||
|
|
||||||
|
route {
|
||||||
|
cidr_block = "0.0.0.0/0"
|
||||||
|
gateway_id = "${aws_internet_gateway.internet_gateway.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "${var.name}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data "aws_availability_zones" "zones" {
|
||||||
|
state = "available"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "subnet" {
|
||||||
|
count = "${length(data.aws_availability_zones.zones.names)}"
|
||||||
|
vpc_id = "${aws_vpc.vpc.id}"
|
||||||
|
availability_zone = "${element(data.aws_availability_zones.zones.names,count.index)}"
|
||||||
|
cidr_block = "${cidrsubnet(aws_vpc.vpc.cidr_block, 8, count.index)}"
|
||||||
|
map_public_ip_on_launch = "true"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "${var.name}-${element(data.aws_availability_zones.zones.names,count.index)}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table_association" "route_table_association" {
|
||||||
|
count = "${length(data.aws_availability_zones.zones.names)}"
|
||||||
|
subnet_id = "${element(aws_subnet.subnet.*.id,count.index)}"
|
||||||
|
route_table_id = "${aws_route_table.route_table.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "secgroup" {
|
||||||
|
name = "${var.name}"
|
||||||
|
vpc_id = "${aws_vpc.vpc.id}"
|
||||||
|
description = "Automated security group for application instances"
|
||||||
|
tags {
|
||||||
|
Name = "${var.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 80
|
||||||
|
to_port = 80
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 26656
|
||||||
|
to_port = 26657
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 26660
|
||||||
|
to_port = 26660
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
egress {
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
#Terraform Configuration
|
||||||
|
|
||||||
|
variable "APP_NAME" {
|
||||||
|
description = "Name of the application"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "SERVERS" {
|
||||||
|
description = "Number of servers in an availability zone"
|
||||||
|
default = "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
#See https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region
|
||||||
|
#eu-west-3 does not contain CentOS images
|
||||||
|
variable "REGION" {
|
||||||
|
description = "AWS Regions"
|
||||||
|
default = "us-east-2"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "SSH_PRIVATE_FILE" {
|
||||||
|
description = "SSH private key file to be used to connect to the nodes"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "SSH_PUBLIC_FILE" {
|
||||||
|
description = "SSH public key file to be used on the nodes"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ap-southeast-1 and ap-southeast-2 does not contain the newer CentOS 1704 image
|
||||||
|
variable "image" {
|
||||||
|
description = "AWS image name"
|
||||||
|
default = "CentOS Linux 7 x86_64 HVM EBS 1703_01"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_type" {
|
||||||
|
description = "AWS instance type"
|
||||||
|
default = "t2.medium"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "aws" {
|
||||||
|
region = "${var.REGION}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes" {
|
||||||
|
source = "infra"
|
||||||
|
name = "${var.APP_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "public_ips" {
|
||||||
|
value = "${module.nodes.public_ips}",
|
||||||
|
}
|
||||||
|
|
||||||
|
output "lb_name" {
|
||||||
|
value = "${module.nodes.lb_name}"
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
.terraform
|
||||||
|
terraform.tfstate
|
||||||
|
terraform.tfstate.backup
|
||||||
|
terraform.tfstate.d
|
||||||
|
.terraform.tfstate.lock.info
|
|
@ -0,0 +1,17 @@
|
||||||
|
[Unit]
|
||||||
|
Description=gaiad
|
||||||
|
Requires=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=on-failure
|
||||||
|
User=gaiad
|
||||||
|
Group=gaiad
|
||||||
|
PermissionsStartOnly=true
|
||||||
|
ExecStart=/usr/bin/gaiad start
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
KillSignal=SIGTERM
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# Script to initialize a testnet settings on a server
|
||||||
|
|
||||||
|
#Usage: terraform.sh <testnet_name> <testnet_region_number> <testnet_node_number>
|
||||||
|
|
||||||
|
#Add gaiad node number for remote identification
|
||||||
|
REGION="$(($2 + 1))"
|
||||||
|
RNODE="$(($3 + 1))"
|
||||||
|
ID="$((${REGION} * 100 + ${RNODE}))"
|
||||||
|
echo "$ID" > /etc/gaiad-nodeid
|
||||||
|
|
||||||
|
#Create gaiad user
|
||||||
|
useradd -m -s /bin/bash gaiad
|
||||||
|
|
||||||
|
#Reload services to enable the gaiad service (note that the gaiad binary is not available yet)
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable gaiad
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,249 @@
|
||||||
|
#Terraform Configuration
|
||||||
|
|
||||||
|
#See https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region
|
||||||
|
#eu-west-3 does not contain CentOS images
|
||||||
|
#us-east-1 usually contains other infrastructure and creating keys and security groups might conflict with that
|
||||||
|
variable "REGIONS" {
|
||||||
|
description = "AWS Regions"
|
||||||
|
type = "list"
|
||||||
|
default = ["us-east-2", "us-west-1", "us-west-2", "ap-south-1", "ap-northeast-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ca-central-1", "eu-central-1", "eu-west-1", "eu-west-2", "sa-east-1"]
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "TESTNET_NAME" {
|
||||||
|
description = "Name of the testnet"
|
||||||
|
default = "remotenet"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "REGION_LIMIT" {
|
||||||
|
description = "Number of regions to populate"
|
||||||
|
default = "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "SERVERS" {
|
||||||
|
description = "Number of servers in an availability zone"
|
||||||
|
default = "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "SSH_PRIVATE_FILE" {
|
||||||
|
description = "SSH private key file to be used to connect to the nodes"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "SSH_PUBLIC_FILE" {
|
||||||
|
description = "SSH public key file to be used on the nodes"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ap-southeast-1 and ap-southeast-2 does not contain the newer CentOS 1704 image
|
||||||
|
variable "image" {
|
||||||
|
description = "AWS image name"
|
||||||
|
default = "CentOS Linux 7 x86_64 HVM EBS 1703_01"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_type" {
|
||||||
|
description = "AWS instance type"
|
||||||
|
default = "t2.medium"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-0" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,0)}"
|
||||||
|
multiplier = "0"
|
||||||
|
execute = "${var.REGION_LIMIT > 0}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-1" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,1)}"
|
||||||
|
multiplier = "1"
|
||||||
|
execute = "${var.REGION_LIMIT > 1}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-2" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,2)}"
|
||||||
|
multiplier = "2"
|
||||||
|
execute = "${var.REGION_LIMIT > 2}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-3" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,3)}"
|
||||||
|
multiplier = "3"
|
||||||
|
execute = "${var.REGION_LIMIT > 3}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-4" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,4)}"
|
||||||
|
multiplier = "4"
|
||||||
|
execute = "${var.REGION_LIMIT > 4}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-5" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,5)}"
|
||||||
|
multiplier = "5"
|
||||||
|
execute = "${var.REGION_LIMIT > 5}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-6" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,6)}"
|
||||||
|
multiplier = "6"
|
||||||
|
execute = "${var.REGION_LIMIT > 6}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-7" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,7)}"
|
||||||
|
multiplier = "7"
|
||||||
|
execute = "${var.REGION_LIMIT > 7}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-8" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,8)}"
|
||||||
|
multiplier = "8"
|
||||||
|
execute = "${var.REGION_LIMIT > 8}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-9" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,9)}"
|
||||||
|
multiplier = "9"
|
||||||
|
execute = "${var.REGION_LIMIT > 9}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-10" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,10)}"
|
||||||
|
multiplier = "10"
|
||||||
|
execute = "${var.REGION_LIMIT > 10}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-11" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,11)}"
|
||||||
|
multiplier = "11"
|
||||||
|
execute = "${var.REGION_LIMIT > 11}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-12" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,12)}"
|
||||||
|
multiplier = "12"
|
||||||
|
execute = "${var.REGION_LIMIT > 12}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "nodes-13" {
|
||||||
|
source = "nodes"
|
||||||
|
name = "${var.TESTNET_NAME}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
region = "${element(var.REGIONS,13)}"
|
||||||
|
multiplier = "13"
|
||||||
|
execute = "${var.REGION_LIMIT > 13}"
|
||||||
|
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
|
||||||
|
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
|
||||||
|
SERVERS = "${var.SERVERS}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "public_ips" {
|
||||||
|
value = "${concat(
|
||||||
|
module.nodes-0.public_ips,
|
||||||
|
module.nodes-1.public_ips,
|
||||||
|
module.nodes-2.public_ips,
|
||||||
|
module.nodes-3.public_ips,
|
||||||
|
module.nodes-4.public_ips,
|
||||||
|
module.nodes-5.public_ips,
|
||||||
|
module.nodes-6.public_ips,
|
||||||
|
module.nodes-7.public_ips,
|
||||||
|
module.nodes-8.public_ips,
|
||||||
|
module.nodes-9.public_ips,
|
||||||
|
module.nodes-10.public_ips,
|
||||||
|
module.nodes-11.public_ips,
|
||||||
|
module.nodes-12.public_ips,
|
||||||
|
module.nodes-13.public_ips
|
||||||
|
)}",
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
|
||||||
|
provider "aws" {
|
||||||
|
region = "${var.region}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_key_pair" "testnets" {
|
||||||
|
count = "${var.execute?1:0}"
|
||||||
|
key_name = "testnets-${var.name}"
|
||||||
|
public_key = "${file(var.ssh_public_file)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "aws_ami" "linux" {
|
||||||
|
most_recent = true
|
||||||
|
filter {
|
||||||
|
name = "name"
|
||||||
|
values = ["${var.image_name}"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data "aws_availability_zones" "zones" {
|
||||||
|
state = "available"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "secgroup" {
|
||||||
|
count = "${var.execute?1:0}"
|
||||||
|
name = "${var.name}"
|
||||||
|
description = "Automated security group for performance testing testnets"
|
||||||
|
tags {
|
||||||
|
Name = "testnets-${var.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 26656
|
||||||
|
to_port = 26657
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 26660
|
||||||
|
to_port = 26660
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
egress {
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_instance" "node" {
|
||||||
|
count = "${var.execute?var.SERVERS*length(data.aws_availability_zones.zones.names):0}"
|
||||||
|
ami = "${data.aws_ami.linux.image_id}"
|
||||||
|
instance_type = "${var.instance_type}"
|
||||||
|
key_name = "${aws_key_pair.testnets.key_name}"
|
||||||
|
associate_public_ip_address = true
|
||||||
|
security_groups = [ "${aws_security_group.secgroup.name}" ]
|
||||||
|
availability_zone = "${element(data.aws_availability_zones.zones.names,count.index)}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Environment = "${var.name}"
|
||||||
|
Name = "${var.name}-${element(data.aws_availability_zones.zones.names,count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_tags {
|
||||||
|
Environment = "${var.name}"
|
||||||
|
Name = "${var.name}-${element(data.aws_availability_zones.zones.names,count.index)}-VOLUME"
|
||||||
|
}
|
||||||
|
|
||||||
|
root_block_device {
|
||||||
|
volume_size = 20
|
||||||
|
}
|
||||||
|
|
||||||
|
connection {
|
||||||
|
user = "centos"
|
||||||
|
private_key = "${file(var.ssh_private_file)}"
|
||||||
|
timeout = "600s"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
source = "files/terraform.sh"
|
||||||
|
destination = "/tmp/terraform.sh"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
source = "files/gaiad.service"
|
||||||
|
destination = "/tmp/gaiad.service"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"sudo cp /tmp/gaiad.service /etc/systemd/system/gaiad.service",
|
||||||
|
"chmod +x /tmp/terraform.sh",
|
||||||
|
"sudo /tmp/terraform.sh ${var.name} ${var.multiplier} ${count.index}",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
// The cluster name
|
||||||
|
output "name" {
|
||||||
|
value = "${var.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
// The list of cluster instance IDs
|
||||||
|
output "instances" {
|
||||||
|
value = ["${aws_instance.node.*.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// The list of cluster instance public IPs
|
||||||
|
output "public_ips" {
|
||||||
|
value = ["${aws_instance.node.*.public_ip}"]
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
variable "name" {
|
||||||
|
description = "The testnet name, e.g cdn"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "image_name" {
|
||||||
|
description = "Image name"
|
||||||
|
default = "CentOS Linux 7 x86_64 HVM EBS 1704_01"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_type" {
|
||||||
|
description = "The instance size to use"
|
||||||
|
default = "t2.small"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "region" {
|
||||||
|
description = "AWS region to use"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "multiplier" {
|
||||||
|
description = "Multiplier for node identification"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "execute" {
|
||||||
|
description = "Set to false to disable the module"
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "SERVERS" {
|
||||||
|
description = "Number of servers in an availability zone"
|
||||||
|
default = "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_private_file" {
|
||||||
|
description = "SSH private key file to be used to connect to the nodes"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_public_file" {
|
||||||
|
description = "SSH public key file to be used on the nodes"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
.terraform
|
||||||
|
terraform.tfstate
|
||||||
|
terraform.tfstate.backup
|
||||||
|
terraform.tfstate.d
|
||||||
|
.terraform.tfstate.lock.info
|
||||||
|
|
|
@ -22,7 +22,6 @@ resource "digitalocean_droplet" "cluster" {
|
||||||
|
|
||||||
connection {
|
connection {
|
||||||
private_key = "${file(var.ssh_private_file)}"
|
private_key = "${file(var.ssh_private_file)}"
|
||||||
timeout = "30s"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
|
@ -0,0 +1,17 @@
|
||||||
|
[Unit]
|
||||||
|
Description=gaiad
|
||||||
|
Requires=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=on-failure
|
||||||
|
User=gaiad
|
||||||
|
Group=gaiad
|
||||||
|
PermissionsStartOnly=true
|
||||||
|
ExecStart=/usr/bin/gaiad start
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
KillSignal=SIGTERM
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# upgrade-gaiad - example make call to upgrade gaiad on a set of nodes in AWS
|
||||||
|
# WARNING: Run it from the current directory - it uses relative paths to ship the binary and the genesis.json,config.toml files
|
||||||
|
|
||||||
|
if [ $# -ne 1 ]; then
|
||||||
|
echo "Usage: ./upgrade-gaiad.sh <clustername>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
export CLUSTER_NAME=$1
|
||||||
|
|
||||||
|
make upgrade-gaiad
|
||||||
|
|
Loading…
Reference in New Issue