Merge branch 'develop' into rigel/piggy-bank-fee-spec

This commit is contained in:
Rigel 2018-08-15 23:54:44 -04:00 committed by GitHub
commit 54cc27fde9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
164 changed files with 4034 additions and 2498 deletions

View File

@ -1,5 +1,34 @@
# Changelog
## 0.23.1
*July 27th, 2018*
BUG FIXES
* [tendermint] Update to v0.22.8
- [consensus, blockchain] Register the Evidence interface so it can be
marshalled/unmarshalled by the blockchain and consensus reactors
## 0.23.0
*July 25th, 2018*
BREAKING CHANGES
* [x/stake] Fixed the period check for the inflation calculation
IMPROVEMENTS
* [cli] Improve error messages for all txs when the account doesn't exist
* [tendermint] Update to v0.22.6
- Updates the crypto imports/API (#1966)
* [x/stake] Add revoked to human-readable validator
BUG FIXES
* [tendermint] Update to v0.22.6
- Fixes some security vulnerabilities reported in the [Bug Bounty](https://hackerone.com/tendermint)
* \#1797 Fix off-by-one error in slashing for downtime
* \#1787 Fixed bug where Tally fails due to revoked/unbonding validator
* \#1666 Add intra-tx counter to the genesis validators
## 0.22.0
*July 16th, 2018*

74
Gopkg.lock generated
View File

@ -34,11 +34,11 @@
[[projects]]
branch = "master"
digest = "1:6aabc1566d6351115d561d038da82a4c19b46c3b6e17f4a0a2fa60260663dc79"
digest = "1:2c00f064ba355903866cbfbf3f7f4c0fe64af6638cc7d1b8bdcf3181bc67f1d8"
name = "github.com/btcsuite/btcd"
packages = ["btcec"]
pruneopts = "UT"
revision = "cf05f92c3f815bbd5091ed6c73eff51f7b1945e8"
revision = "f899737d7f2764dc13e4d01ff00108ec58f766a9"
[[projects]]
digest = "1:386de157f7d19259a7f9c81f26ce011223ce0f090353c1152ffdf730d7d10ac2"
@ -71,7 +71,7 @@
version = "v1.4.7"
[[projects]]
digest = "1:fa30c0652956e159cdb97dcb2ef8b8db63ed668c02a5c3a40961c8f0641252fe"
digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11"
name = "github.com/go-kit/kit"
packages = [
"log",
@ -103,7 +103,7 @@
version = "v1.7.0"
[[projects]]
digest = "1:212285efb97b9ec2e20550d81f0446cb7897e57cbdfd7301b1363ab113d8be45"
digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
@ -118,7 +118,7 @@
version = "v1.1.1"
[[projects]]
digest = "1:cb22af0ed7c72d495d8be1106233ee553898950f15fd3f5404406d44c2e86888"
digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
name = "github.com/golang/protobuf"
packages = [
"proto",
@ -165,7 +165,7 @@
[[projects]]
branch = "master"
digest = "1:ac64f01acc5eeea9dde40e326de6b6471e501392ec06524c3b51033aa50789bc"
digest = "1:a361611b8c8c75a1091f00027767f7779b29cb37c456a71b8f2604c88057ab40"
name = "github.com/hashicorp/hcl"
packages = [
".",
@ -263,7 +263,7 @@
version = "v1.0.0"
[[projects]]
digest = "1:98225904b7abff96c052b669b25788f18225a36673fba022fb93514bb9a2a64e"
digest = "1:c1a04665f9613e082e1209cf288bf64f4068dcd6c87a64bf1c4ff006ad422ba0"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
@ -274,7 +274,7 @@
[[projects]]
branch = "master"
digest = "1:0f37e09b3e92aaeda5991581311f8dbf38944b36a3edec61cc2d1991f527554a"
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "UT"
@ -282,7 +282,7 @@
[[projects]]
branch = "master"
digest = "1:dad2e5a2153ee7a6c9ab8fc13673a16ee4fb64434a7da980965a3741b0c981a3"
digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5"
name = "github.com/prometheus/common"
packages = [
"expfmt",
@ -294,7 +294,7 @@
[[projects]]
branch = "master"
digest = "1:a37c98f4b7a66bb5c539c0539f0915a74ef1c8e0b3b6f45735289d94cae92bfd"
digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290"
name = "github.com/prometheus/procfs"
packages = [
".",
@ -313,7 +313,7 @@
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
[[projects]]
digest = "1:37ace7f35375adec11634126944bdc45a673415e2fcc07382d03b75ec76ea94c"
digest = "1:bd1ae00087d17c5a748660b8e89e1043e1e5479d0fea743352cda2f8dd8c4f84"
name = "github.com/spf13/afero"
packages = [
".",
@ -332,7 +332,7 @@
version = "v1.2.0"
[[projects]]
digest = "1:627ab2f549a6a55c44f46fa24a4307f4d0da81bfc7934ed0473bf38b24051d26"
digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e"
name = "github.com/spf13/cobra"
packages = ["."]
pruneopts = "UT"
@ -341,19 +341,19 @@
[[projects]]
branch = "master"
digest = "1:080e5f630945ad754f4b920e60b4d3095ba0237ebf88dc462eb28002932e3805"
digest = "1:8a020f916b23ff574845789daee6818daf8d25a4852419aae3f0b12378ba432a"
name = "github.com/spf13/jwalterweatherman"
packages = ["."]
pruneopts = "UT"
revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394"
revision = "14d3d4c518341bea657dd8a226f5121c0ff8c9f2"
[[projects]]
digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7"
digest = "1:dab83a1bbc7ad3d7a6ba1a1cc1760f25ac38cdf7d96a5cdd55cd915a4f5ceaf9"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "UT"
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
version = "v1.0.2"
[[projects]]
digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96"
@ -364,7 +364,7 @@
version = "v1.0.0"
[[projects]]
digest = "1:73697231b93fb74a73ebd8384b68b9a60c57ea6b13c56d2425414566a72c8e6d"
digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6"
name = "github.com/stretchr/testify"
packages = [
"assert",
@ -376,7 +376,7 @@
[[projects]]
branch = "master"
digest = "1:922191411ad8f61bcd8018ac127589bb489712c1d1a0ab2497aca4b16de417d2"
digest = "1:b3cfb8d82b1601a846417c3f31c03a7961862cb2c98dcf0959c473843e6d9a2b"
name = "github.com/syndtr/goleveldb"
packages = [
"leveldb",
@ -397,7 +397,7 @@
[[projects]]
branch = "master"
digest = "1:203b409c21115233a576f99e8f13d8e07ad82b25500491f7e1cca12588fb3232"
digest = "1:087aaa7920e5d0bf79586feb57ce01c35c830396ab4392798112e8aae8c47722"
name = "github.com/tendermint/ed25519"
packages = [
".",
@ -424,7 +424,7 @@
version = "v0.9.2"
[[projects]]
digest = "1:049c779b867a182cea567c65d7c81e3b9e4e4a7eece4c35a19639f75d2aa7da9"
digest = "1:26146cdb2811ce481e72138439b9b1aa17a64d54364f96bb92f97a9ef8ba4f01"
name = "github.com/tendermint/tendermint"
packages = [
"abci/client",
@ -487,23 +487,26 @@
"version",
]
pruneopts = "UT"
revision = "5fdbcd70df57b71ffba71e1ff5f00d617852a9c0"
version = "v0.22.6"
revision = "013b9cef642f875634c614019ab13b17570778ad"
version = "v0.23.0"
[[projects]]
digest = "1:5bd938386bd1f61a581bf8cd6ff2b7b2f79c542929176db4ceb44965440dae07"
digest = "1:4dcb0dd65feecb068ce23a234d1a07c7868a1e39f52a6defcae0bb371d03abf6"
name = "github.com/zondax/ledger-goclient"
packages = ["."]
pruneopts = "UT"
revision = "39ba4728c137c75718a21f9b4b3280fa31b9139b"
revision = "4296ee5701e945f9b3a7dbe51f402e0b9be57259"
[[projects]]
branch = "master"
digest = "1:e8206c1653e050116ec8c9a823a86413fc9f9ee3c2f3ae977c96d6a1747f7325"
digest = "1:7a71fffde456d746c52f9cd09c50b034533a3180fb1f6320abb149f2ccc579e5"
name = "golang.org/x/crypto"
packages = [
"blowfish",
"chacha20poly1305",
"curve25519",
"hkdf",
"internal/chacha20",
"internal/subtle",
"nacl/box",
"nacl/secretbox",
@ -515,10 +518,10 @@
"salsa20/salsa",
]
pruneopts = "UT"
revision = "f027049dab0ad238e394a753dba2d14753473a04"
revision = "de0752318171da717af4ce24d0a2e8626afaeb11"
[[projects]]
digest = "1:04dda8391c3e2397daf254ac68003f30141c069b228d06baec8324a5f81dc1e9"
digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1"
name = "golang.org/x/net"
packages = [
"context",
@ -535,14 +538,17 @@
[[projects]]
branch = "master"
digest = "1:4d7a8265af700258feaff86722049eb5b787240d66dfaf45ff4962f09de6e0be"
digest = "1:4bd75b1a219bc590b05c976bbebf47f4e993314ebb5c7cbf2efe05a09a184d54"
name = "golang.org/x/sys"
packages = ["unix"]
packages = [
"cpu",
"unix",
]
pruneopts = "UT"
revision = "acbc56fc7007d2a01796d5bde54f39e3b3e95945"
revision = "4e1fef5609515ec7a2cee7b5de30ba6d9b438cbf"
[[projects]]
digest = "1:7509ba4347d1f8de6ae9be8818b0cd1abc3deeffe28aeaf4be6d4b6b5178d9ca"
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
name = "golang.org/x/text"
packages = [
"collate",
@ -570,10 +576,10 @@
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = "UT"
revision = "daca94659cb50e9f37c1b834680f2e46358f10b0"
revision = "383e8b2c3b9e36c4076b235b32537292176bae20"
[[projects]]
digest = "1:4515e3030c440845b046354fd5d57671238428b820deebce2e9dabb5cd3c51ac"
digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74"
name = "google.golang.org/grpc"
packages = [
".",

View File

@ -57,7 +57,7 @@
[[override]]
name = "github.com/tendermint/tendermint"
version = "=v0.22.6"
version = "=v0.23.0"
[[constraint]]
name = "github.com/bartekn/go-bip39"
@ -65,7 +65,7 @@
[[constraint]]
name = "github.com/zondax/ledger-goclient"
revision = "39ba4728c137c75718a21f9b4b3280fa31b9139b"
revision = "4296ee5701e945f9b3a7dbe51f402e0b9be57259"
[prune]
go-tests = true

View File

@ -5,6 +5,7 @@ BUILD_TAGS = netgo ledger
BUILD_FLAGS = -tags "${BUILD_TAGS}" -ldflags "-X github.com/cosmos/cosmos-sdk/version.GitCommit=${COMMIT_HASH}"
GCC := $(shell command -v gcc 2> /dev/null)
LEDGER_ENABLED ?= true
UNAME_S := $(shell uname -s)
all: get_tools get_vendor_deps install install_examples install_cosmos-sdk-cli test_lint test
########################################
@ -17,12 +18,18 @@ ci: get_tools get_vendor_deps install test_cover test_lint test
check-ledger:
ifeq ($(LEDGER_ENABLED),true)
ifndef GCC
$(error "gcc not installed for ledger support, please install")
endif
ifeq ($(UNAME_S),OpenBSD)
$(info "OpenBSD detected, disabling ledger support (https://github.com/cosmos/cosmos-sdk/issues/1988)")
TMP_BUILD_TAGS := $(BUILD_TAGS)
BUILD_TAGS = $(filter-out ledger, $(TMP_BUILD_TAGS))
else
ifndef GCC
$(error "gcc not installed for ledger support, please install or set LEDGER_ENABLED to false in the Makefile")
endif
endif
else
TMP_BUILD_TAGS := $(BUILD_TAGS)
BUILD_TAGS = $(filter-out ledger, $(TMP_BUILD_TAGS))
TMP_BUILD_TAGS := $(BUILD_TAGS)
BUILD_TAGS = $(filter-out ledger, $(TMP_BUILD_TAGS))
endif
build: check-ledger
@ -100,6 +107,7 @@ get_dev_tools:
get_vendor_deps:
@echo "--> Running dep ensure"
@rm -rf .vendor-new
@dep ensure -v
draw_deps:

View File

@ -1,8 +1,15 @@
## v0.24.0 PENDING
^--- PENDING wasn't purged on sdk v0.23.0 release.
BREAKING CHANGES
* Update to tendermint v0.23.0. This involves removing crypto.Pubkey,
maintaining a validator address to pubkey map, and using time.Time instead of int64 for time. [SDK PR](https://github.com/cosmos/cosmos-sdk/pull/1927)
## PENDING
BREAKING CHANGES
* API
- \#1880 [x/stake] changed the endpoints to be more REST-ful
- \#1880 and \#2000 [x/stake] changed the endpoints to be more REST-ful
* Update to tendermint v0.22.5. This involves changing all of the cryptography imports. [Ref](https://github.com/tendermint/tendermint/pull/1966)
* [baseapp] Msgs are no longer run on CheckTx, removed `ctx.IsCheckTx()`
* [x/gov] CLI flag changed from `proposalID` to `proposal-id`
@ -25,12 +32,14 @@ BREAKING CHANGES
* `gaiacli gov submit-proposal --proposer`
* `gaiacli gov deposit --depositer`
* `gaiacli gov vote --voter`
* [x/gov] Added tags sub-package, changed tags to use dash-case
* [x/gov] Added tags sub-package, changed tags to use dash-case
* [x/gov] Governance parameters are now stored in globalparams store
* [core] \#1807 Switch from use of rational to decimal
* [lcd] \#1866 Updated lcd /slashing/signing_info endpoint to take cosmosvalpub instead of cosmosvaladdr
* [types] sdk.NewCoin now takes sdk.Int, sdk.NewInt64Coin takes int64
* [cli] #1551: Officially removed `--name` from CLI commands
* [cli] Genesis/key creation (`init`) now supports user-provided key passwords
* [cli] unsafe_reset_all, show_validator, and show_node_id have been renamed to unsafe-reset-all, show-validator, and show-node-id
FEATURES
* [lcd] Can now query governance proposals by ProposalStatus
@ -39,7 +48,7 @@ FEATURES
* Modules can test random combinations of their own operations
* Applications can integrate operations and invariants from modules together for an integrated simulation
* [baseapp] Initialize validator set on ResponseInitChain
* [cosmos-sdk-cli] Added support for cosmos-sdk-cli tool under cosmos-sdk/cmd
* [cosmos-sdk-cli] Added support for cosmos-sdk-cli tool under cosmos-sdk/cmd
* This allows SDK users to initialize a new project repository.
* [tests] Remotenet commands for AWS (awsnet)
* [networks] Added ansible scripts to upgrade seed nodes on a network
@ -47,6 +56,7 @@ FEATURES
* [gov] Add slashing for validators who do not vote on a proposal
* [cli] added `gov query-proposals` command to CLI. Can filter by `depositer`, `voter`, and `status`
* [core] added BaseApp.Seal - ability to seal baseapp parameters once they've been set
* [scripts] added log output monitoring to DataDog using Ansible scripts
* [gov] added TallyResult type that gets added stored in Proposal after tallying is finished
IMPROVEMENTS
@ -54,7 +64,7 @@ IMPROVEMENTS
* [cli] Improve error messages for all txs when the account doesn't exist
* [tools] Remove `rm -rf vendor/` from `make get_vendor_deps`
* [x/auth] Recover ErrorOutOfGas panic in order to set sdk.Result attributes correctly
* [x/stake] Add revoked to human-readable validator
* [x/stake] Add revoked to human-readable validator
* [spec] \#967 Inflation and distribution specs drastically improved
* [tests] Add tests to example apps in docs
* [x/gov] Votes on a proposal can now be queried
@ -62,10 +72,15 @@ IMPROVEMENTS
* [tests] Fixes ansible scripts to work with AWS too
* [tests] \#1806 CLI tests are now behind the build flag 'cli_test', so go test works on a new repo
* [x/gov] Initial governance parameters can now be set in the genesis file
* [x/stake] \#1815 Sped up the processing of `EditValidator` txs.
* [x/stake] \#1815 Sped up the processing of `EditValidator` txs.
* [server] \#1930 Transactions indexer indexes all tags by default.
* [x/stake] \#2000 Added tests for new staking endpoints
* [x/stake] [#2023](https://github.com/cosmos/cosmos-sdk/pull/2023) Terminate iteration loop in `UpdateBondedValidators` and `UpdateBondedValidatorsFull` when the first revoked validator is encountered and perform a sanity check.
* [tools] Make get_vendor_deps deletes `.vendor-new` directories, in case scratch files are present.
* [spec] Added simple piggy bank distribution spec
BUG FIXES
* \#1988 Make us compile on OpenBSD (disable ledger) [#1988] (https://github.com/cosmos/cosmos-sdk/issues/1988)
* \#1666 Add intra-tx counter to the genesis validators
* \#1797 Fix off-by-one error in slashing for downtime
* \#1787 Fixed bug where Tally fails due to revoked/unbonding validator
@ -74,16 +89,17 @@ BUG FIXES
* \#1799 Fix `gaiad export`
* \#1828 Force user to specify amount on create-validator command by removing default
* \#1839 Fixed bug where intra-tx counter wasn't set correctly for genesis validators
* [tests] \#1675 Fix non-deterministic `test_cover`
* [staking] [#1858](https://github.com/cosmos/cosmos-sdk/pull/1858) Fixed bug where the cliff validator was not be updated correctly
* [tests] \#1675 Fix non-deterministic `test_cover`
* [client] \#1551: Refactored `CoreContext`
* Renamed `CoreContext` to `QueryContext`
* Removed all tx related fields and logic (building & signing) to separate
structure `TxContext` in `x/auth/client/context`
* Cleaned up documentation and API of what used to be `CoreContext`
* Implemented `KeyType` enum for key info
BUG FIXES
* \#1666 Add intra-tx counter to the genesis validators
* [tests] \#1551: Fixed invalid LCD test JSON payload in `doIBCTransfer`
* \#1787 Fixed bug where Tally fails due to revoked/unbonding validator
* \#1787 Fixed bug where Tally fails due to revoked/unbonding validator
* [basecoin] Fixes coin transaction failure and account query [discussion](https://forum.cosmos.network/t/unmarshalbinarybare-expected-to-read-prefix-bytes-75fbfab8-since-it-is-registered-concrete-but-got-0a141dfa/664/6)
* [cli] \#1997 Handle panics gracefully when `gaiacli stake {delegation,unbond}` fail to unmarshal delegation.

View File

@ -12,37 +12,33 @@
[![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#cosmos-sdk:matrix.org)
The Cosmos-SDK is a framework for building blockchain applications in Golang.
It is being used to build `Gaia`, the first implementation of the [Cosmos Hub](https://cosmos.network),
It is being used to build `Gaia`, the first implementation of the [Cosmos Hub](https://cosmos.network/docs/),
**WARNING**: The SDK has mostly stabilized, but we are still making some
breaking changes.
**Note**: The `master` branch is an active development branch. For the latest
release, see the [release page](https://github.com/cosmos/cosmos-sdk/releases).
**Note**: Requires [Go 1.10+](https://golang.org/dl/)
## Gaia Testnet
For more information on connecting to the testnet, see
[cmd/gaia/testnets](/cmd/gaia/testnets)
To join the latest testnet, follow
[the guide](https://cosmos.network/docs/getting-started/full-node.html#setting-up-a-new-node).
For the latest status of the testnet, see the [status
file](/cmd/gaia/testnets/STATUS.md).
For status updates and genesis files, see the
[testnets repo](https://github.com/cosmos/testnets).
## Install
See the [install instructions](/docs/install.md)
See the
[install instructions](https://cosmos.network/docs/getting-started/installation.html).
## Quick Start
- [Documentation](https://cosmos.network/docs/)
- [Examples](/examples)
- [Cosmos Hub Specification](https://cosmos.network/docs/spec/)
<!---
uncomment once the godocs improve
- [Godocs for API referrence](https://godoc.org/github.com/cosmos/cosmos-sdk)
-->
See the [Cosmos Docs](https://cosmos.network/docs/)
- [Getting started with the SDK](https://cosmos.network/docs/sdk/core/intro.html)
- [SDK Examples](/examples)
- [Join the testnet](https://cosmos.network/docs/getting-started/full-node.html#run-a-full-node)
## Disambiguation

View File

@ -387,7 +387,8 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg
}
// set the signed validators for addition to context in deliverTx
app.signedValidators = req.Validators
// TODO: communicate this result to the address to pubkey map in slashing
app.signedValidators = req.LastCommitInfo.GetValidators()
return
}
@ -412,11 +413,7 @@ func (app *BaseApp) CheckTx(txBytes []byte) (res abci.ResponseCheckTx) {
Log: result.Log,
GasWanted: result.GasWanted,
GasUsed: result.GasUsed,
Fee: cmn.KI64Pair{
[]byte(result.FeeDenom),
result.FeeAmount,
},
Tags: result.Tags,
Tags: result.Tags,
}
}

View File

@ -6,6 +6,7 @@ import (
"net/http"
"regexp"
"testing"
"time"
"github.com/cosmos/cosmos-sdk/client/tx"
@ -375,73 +376,76 @@ func TestValidatorQuery(t *testing.T) {
require.Equal(t, 1, len(pks))
validator1Owner := sdk.AccAddress(pks[0].Address())
validator := getValidator(t, port, validator1Owner)
bech32ValAddress, err := sdk.Bech32ifyValPub(pks[0])
require.NoError(t, err)
assert.Equal(t, validator.PubKey, bech32ValAddress, "The returned validator does not hold the correct data")
assert.Equal(t, validator.Owner, validator1Owner, "The returned validator does not hold the correct data")
}
func TestBonding(t *testing.T) {
name, password, denom := "test", "1234567890", "steak"
addr, seed := CreateAddr(t, "test", password, GetKeyBase(t))
addr, seed := CreateAddr(t, name, password, GetKeyBase(t))
cleanup, pks, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr})
defer cleanup()
validator1Owner := sdk.AccAddress(pks[0].Address())
validator := getValidator(t, port, validator1Owner)
// create bond TX
resultTx := doDelegate(t, port, seed, name, password, addr, validator1Owner)
resultTx := doDelegate(t, port, seed, name, password, addr, validator1Owner, 60)
tests.WaitForHeight(resultTx.Height+1, port)
// check if tx was committed
require.Equal(t, uint32(0), resultTx.CheckTx.Code)
require.Equal(t, uint32(0), resultTx.DeliverTx.Code)
// query sender
acc := getAccount(t, port, addr)
coins := acc.GetCoins()
require.Equal(t, int64(40), coins.AmountOf(denom).Int64())
// query validator
bond := getDelegation(t, port, addr, validator1Owner)
require.Equal(t, "60.0000000000", bond.Shares)
summary := getDelegationSummary(t, port, addr)
require.Len(t, summary.Delegations, 1, "Delegation summary holds all delegations")
require.Equal(t, "60.0000000000", summary.Delegations[0].Shares)
require.Len(t, summary.UnbondingDelegations, 0, "Delegation summary holds all unbonding-delegations")
bondedValidators := getDelegatorValidators(t, port, addr)
require.Len(t, bondedValidators, 1)
require.Equal(t, validator1Owner, bondedValidators[0].Owner)
require.Equal(t, validator.DelegatorShares.Add(sdk.NewDec(60)).String(), bondedValidators[0].DelegatorShares.String())
bondedValidator := getDelegatorValidator(t, port, addr, validator1Owner)
require.Equal(t, validator1Owner, bondedValidator.Owner)
//////////////////////
// testing unbonding
// create unbond TX
resultTx = doBeginUnbonding(t, port, seed, name, password, addr, validator1Owner)
resultTx = doBeginUnbonding(t, port, seed, name, password, addr, validator1Owner, 60)
tests.WaitForHeight(resultTx.Height+1, port)
// query validator
bond = getDelegation(t, port, addr, validator1Owner)
require.Equal(t, "30.0000000000", bond.Shares)
// check if tx was committed
require.Equal(t, uint32(0), resultTx.CheckTx.Code)
require.Equal(t, uint32(0), resultTx.DeliverTx.Code)
// should the sender should have not received any coins as the unbonding has only just begun
// query sender
// sender should have not received any coins as the unbonding has only just begun
acc = getAccount(t, port, addr)
coins = acc.GetCoins()
require.Equal(t, int64(40), coins.AmountOf("steak").Int64())
// query unbonding delegation
validatorAddr := sdk.AccAddress(pks[0].Address())
unbondings := getUndelegations(t, port, addr, validatorAddr)
assert.Len(t, unbondings, 1, "Unbondings holds all unbonding-delegations")
assert.Equal(t, "30", unbondings[0].Balance.Amount.String())
unbondings := getUndelegations(t, port, addr, validator1Owner)
require.Len(t, unbondings, 1, "Unbondings holds all unbonding-delegations")
require.Equal(t, "60", unbondings[0].Balance.Amount.String())
// query summary
summary := getDelegationSummary(t, port, addr)
summary = getDelegationSummary(t, port, addr)
assert.Len(t, summary.Delegations, 1, "Delegation summary holds all delegations")
assert.Equal(t, "30.0000000000", summary.Delegations[0].Shares)
assert.Len(t, summary.UnbondingDelegations, 1, "Delegation summary holds all unbonding-delegations")
assert.Equal(t, "30", summary.UnbondingDelegations[0].Balance.Amount.String())
require.Len(t, summary.Delegations, 0, "Delegation summary holds all delegations")
require.Len(t, summary.UnbondingDelegations, 1, "Delegation summary holds all unbonding-delegations")
require.Equal(t, "60", summary.UnbondingDelegations[0].Balance.Amount.String())
bondedValidators = getDelegatorValidators(t, port, addr)
require.Len(t, bondedValidators, 0, "There's no delegation as the user withdraw all funds")
// TODO Undonding status not currently implemented
// require.Equal(t, sdk.Unbonding, bondedValidators[0].Status)
// TODO add redelegation, need more complex capabilities such to mock context and
// TODO check summary for redelegation
@ -564,7 +568,7 @@ func TestUnrevoke(t *testing.T) {
signingInfo := getSigningInfo(t, port, pkString)
tests.WaitForHeight(4, port)
require.Equal(t, true, signingInfo.IndexOffset > 0)
require.Equal(t, int64(0), signingInfo.JailedUntil)
require.Equal(t, time.Unix(0, 0).UTC(), signingInfo.JailedUntil)
require.Equal(t, true, signingInfo.SignedBlocksCounter > 0)
}
@ -756,64 +760,89 @@ func getSigningInfo(t *testing.T, port string, validatorPubKey string) slashing.
// ============= Stake Module ================
func getDelegation(t *testing.T, port string, delegatorAddr, validatorAddr sdk.AccAddress) rest.DelegationWithoutRat {
// get the account to get the sequence
res, body := Request(t, port, "GET", fmt.Sprintf("/stake/delegators/%s/delegations/%s", delegatorAddr, validatorAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var bond rest.DelegationWithoutRat
err := cdc.UnmarshalJSON([]byte(body), &bond)
require.Nil(t, err)
return bond
}
func getUndelegations(t *testing.T, port string, delegatorAddr, validatorAddr sdk.AccAddress) []stake.UnbondingDelegation {
// get the account to get the sequence
res, body := Request(t, port, "GET", fmt.Sprintf("/stake/delegators/%s/unbonding_delegations/%s", delegatorAddr, validatorAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var unbondings []stake.UnbondingDelegation
err := cdc.UnmarshalJSON([]byte(body), &unbondings)
require.Nil(t, err)
return unbondings
}
func getDelegationSummary(t *testing.T, port string, delegatorAddr sdk.AccAddress) rest.DelegationSummary {
// get the account to get the sequence
res, body := Request(t, port, "GET", fmt.Sprintf("/stake/delegators/%s", delegatorAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var summary rest.DelegationSummary
err := cdc.UnmarshalJSON([]byte(body), &summary)
require.Nil(t, err)
return summary
}
func getBondingTxs(t *testing.T, port string, delegatorAddr sdk.AccAddress, query string) []tx.Info {
// get the account to get the sequence
var res *http.Response
var body string
if len(query) > 0 {
res, body = Request(t, port, "GET", fmt.Sprintf("/stake/delegators/%s/txs?type=%s", delegatorAddr, query), nil)
} else {
res, body = Request(t, port, "GET", fmt.Sprintf("/stake/delegators/%s/txs", delegatorAddr), nil)
}
require.Equal(t, http.StatusOK, res.StatusCode, body)
var txs []tx.Info
err := cdc.UnmarshalJSON([]byte(body), &txs)
require.Nil(t, err)
return txs
}
func doDelegate(t *testing.T, port, seed, name, password string, delegatorAddr, validatorAddr sdk.AccAddress) (resultTx ctypes.ResultBroadcastTxCommit) {
// get the account to get the sequence
func getDelegatorValidators(t *testing.T, port string, delegatorAddr sdk.AccAddress) []stake.BechValidator {
res, body := Request(t, port, "GET", fmt.Sprintf("/stake/delegators/%s/validators", delegatorAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var bondedValidators []stake.BechValidator
err := cdc.UnmarshalJSON([]byte(body), &bondedValidators)
require.Nil(t, err)
return bondedValidators
}
func getDelegatorValidator(t *testing.T, port string, delegatorAddr sdk.AccAddress, validatorAddr sdk.AccAddress) stake.BechValidator {
res, body := Request(t, port, "GET", fmt.Sprintf("/stake/delegators/%s/validators/%s", delegatorAddr, validatorAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var bondedValidator stake.BechValidator
err := cdc.UnmarshalJSON([]byte(body), &bondedValidator)
require.Nil(t, err)
return bondedValidator
}
func doDelegate(t *testing.T, port, seed, name, password string, delegatorAddr, validatorAddr sdk.AccAddress, amount int64) (resultTx ctypes.ResultBroadcastTxCommit) {
acc := getAccount(t, port, delegatorAddr)
accnum := acc.GetAccountNumber()
sequence := acc.GetSequence()
chainID := viper.GetString(client.FlagChainID)
// send
jsonStr := []byte(fmt.Sprintf(`{
"name": "%s",
"password": "%s",
@ -825,14 +854,15 @@ func doDelegate(t *testing.T, port, seed, name, password string, delegatorAddr,
{
"delegator_addr": "%s",
"validator_addr": "%s",
"delegation": { "denom": "%s", "amount": "60" }
"delegation": { "denom": "%s", "amount": "%d" }
}
],
"begin_unbondings": [],
"complete_unbondings": [],
"begin_redelegates": [],
"complete_redelegates": []
}`, name, password, accnum, sequence, chainID, delegatorAddr, validatorAddr, "steak"))
}`, name, password, accnum, sequence, chainID, delegatorAddr, validatorAddr, "steak", amount))
res, body := Request(t, port, "POST", fmt.Sprintf("/stake/delegators/%s/delegations", delegatorAddr), jsonStr)
require.Equal(t, http.StatusOK, res.StatusCode, body)
@ -844,16 +874,13 @@ func doDelegate(t *testing.T, port, seed, name, password string, delegatorAddr,
}
func doBeginUnbonding(t *testing.T, port, seed, name, password string,
delegatorAddr, validatorAddr sdk.AccAddress) (resultTx ctypes.ResultBroadcastTxCommit) {
delegatorAddr, validatorAddr sdk.AccAddress, amount int64) (resultTx ctypes.ResultBroadcastTxCommit) {
// get the account to get the sequence
acc := getAccount(t, port, delegatorAddr)
accnum := acc.GetAccountNumber()
sequence := acc.GetSequence()
chainID := viper.GetString(client.FlagChainID)
// send
jsonStr := []byte(fmt.Sprintf(`{
"name": "%s",
"password": "%s",
@ -866,13 +893,14 @@ func doBeginUnbonding(t *testing.T, port, seed, name, password string,
{
"delegator_addr": "%s",
"validator_addr": "%s",
"shares": "30"
"shares": "%d"
}
],
"complete_unbondings": [],
"begin_redelegates": [],
"complete_redelegates": []
}`, name, password, accnum, sequence, chainID, delegatorAddr, validatorAddr))
}`, name, password, accnum, sequence, chainID, delegatorAddr, validatorAddr, amount))
res, body := Request(t, port, "POST", fmt.Sprintf("/stake/delegators/%s/delegations", delegatorAddr), jsonStr)
require.Equal(t, http.StatusOK, res.StatusCode, body)
@ -886,14 +914,12 @@ func doBeginUnbonding(t *testing.T, port, seed, name, password string,
func doBeginRedelegation(t *testing.T, port, seed, name, password string,
delegatorAddr, validatorSrcAddr, validatorDstAddr sdk.AccAddress) (resultTx ctypes.ResultBroadcastTxCommit) {
// get the account to get the sequence
acc := getAccount(t, port, delegatorAddr)
accnum := acc.GetAccountNumber()
sequence := acc.GetSequence()
chainID := viper.GetString(client.FlagChainID)
// send
jsonStr := []byte(fmt.Sprintf(`{
"name": "%s",
"password": "%s",
@ -914,6 +940,7 @@ func doBeginRedelegation(t *testing.T, port, seed, name, password string,
],
"complete_redelegates": []
}`, name, password, accnum, sequence, chainID, delegatorAddr, validatorSrcAddr, validatorDstAddr))
res, body := Request(t, port, "POST", fmt.Sprintf("/stake/delegators/%s/delegations", delegatorAddr), jsonStr)
require.Equal(t, http.StatusOK, res.StatusCode, body)
@ -925,7 +952,6 @@ func doBeginRedelegation(t *testing.T, port, seed, name, password string,
}
func getValidators(t *testing.T, port string) []stake.BechValidator {
// get the account to get the sequence
res, body := Request(t, port, "GET", "/stake/validators", nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var validators []stake.BechValidator
@ -935,7 +961,6 @@ func getValidators(t *testing.T, port string) []stake.BechValidator {
}
func getValidator(t *testing.T, port string, validatorAddr sdk.AccAddress) stake.BechValidator {
// get the account to get the sequence
res, body := Request(t, port, "GET", fmt.Sprintf("/stake/validators/%s", validatorAddr.String()), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var validator stake.BechValidator
@ -1033,7 +1058,7 @@ func getProposalsFilterStatus(t *testing.T, port string, status gov.ProposalStat
}
func doSubmitProposal(t *testing.T, port, seed, name, password string, proposerAddr sdk.AccAddress) (resultTx ctypes.ResultBroadcastTxCommit) {
// get the account to get the sequence
acc := getAccount(t, port, proposerAddr)
accnum := acc.GetAccountNumber()
sequence := acc.GetSequence()
@ -1067,7 +1092,7 @@ func doSubmitProposal(t *testing.T, port, seed, name, password string, proposerA
}
func doDeposit(t *testing.T, port, seed, name, password string, proposerAddr sdk.AccAddress, proposalID int64) (resultTx ctypes.ResultBroadcastTxCommit) {
// get the account to get the sequence
acc := getAccount(t, port, proposerAddr)
accnum := acc.GetAccountNumber()
sequence := acc.GetSequence()

View File

@ -173,7 +173,7 @@ func InitializeTestLCD(t *testing.T, nValidators int, initAddrs []sdk.AccAddress
accAuth.Coins = sdk.Coins{sdk.NewInt64Coin("steak", 100)}
acc := gapp.NewGenesisAccount(&accAuth)
genesisState.Accounts = append(genesisState.Accounts, acc)
genesisState.StakeData.Pool.LooseTokens = genesisState.StakeData.Pool.LooseTokens.Add(sdk.NewRat(100))
genesisState.StakeData.Pool.LooseTokens = genesisState.StakeData.Pool.LooseTokens.Add(sdk.NewDec(100))
}
appState, err := wire.MarshalJSONIndent(cdc, genesisState)
@ -223,7 +223,7 @@ func startTM(
proxy.NewLocalClientCreator(app),
genDocProvider,
dbProvider,
nm.DefaultMetricsProvider,
nm.DefaultMetricsProvider(tmcfg.Instrumentation),
logger.With("module", "node"),
)
if err != nil {

View File

@ -44,5 +44,5 @@ func SignTxRequstHandler(w http.ResponseWriter, r *http.Request) {
return
}
w.Write(sig.Bytes())
w.Write(sig)
}

View File

@ -148,7 +148,8 @@ func (app *GaiaApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) ab
func (app *GaiaApp) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock {
tags := gov.EndBlocker(ctx, app.govKeeper)
validatorUpdates := stake.EndBlocker(ctx, app.stakeKeeper)
// Add these new validators to the addr -> pubkey map.
app.slashingKeeper.AddValidators(ctx, validatorUpdates)
return abci.ResponseEndBlock{
ValidatorUpdates: validatorUpdates,
Tags: tags,
@ -181,6 +182,9 @@ func (app *GaiaApp) initChainer(ctx sdk.Context, req abci.RequestInitChain) abci
// return sdk.ErrGenesisParse("").TraceCause(err, "")
}
// load the address to pubkey map
slashing.InitGenesis(ctx, app.slashingKeeper, genesisState.StakeData)
gov.InitGenesis(ctx, app.govKeeper, gov.DefaultGenesisState())
return abci.ResponseInitChain{

View File

@ -185,7 +185,7 @@ func GaiaAppGenState(cdc *wire.Codec, appGenTxs []json.RawMessage) (genesisState
}
acc := NewGenesisAccount(&accAuth)
genaccs[i] = acc
stakeData.Pool.LooseTokens = stakeData.Pool.LooseTokens.Add(sdk.NewRat(freeFermionsAcc)) // increase the supply
stakeData.Pool.LooseTokens = stakeData.Pool.LooseTokens.Add(sdk.NewDec(freeFermionsAcc)) // increase the supply
// add the validator
if len(genTx.Name) > 0 {
@ -193,10 +193,10 @@ func GaiaAppGenState(cdc *wire.Codec, appGenTxs []json.RawMessage) (genesisState
validator := stake.NewValidator(genTx.Address,
sdk.MustGetAccPubKeyBech32(genTx.PubKey), desc)
stakeData.Pool.LooseTokens = stakeData.Pool.LooseTokens.Add(sdk.NewRat(freeFermionVal)) // increase the supply
stakeData.Pool.LooseTokens = stakeData.Pool.LooseTokens.Add(sdk.NewDec(freeFermionVal)) // increase the supply
// add some new shares to the validator
var issuedDelShares sdk.Rat
var issuedDelShares sdk.Dec
validator, stakeData.Pool, issuedDelShares = validator.AddTokensFromDel(stakeData.Pool, freeFermionVal)
stakeData.Validators = append(stakeData.Validators, validator)

View File

@ -48,7 +48,7 @@ func appStateFn(r *rand.Rand, accs []sdk.AccAddress) json.RawMessage {
// Default genesis state
stakeGenesis := stake.DefaultGenesisState()
stakeGenesis.Pool.LooseTokens = sdk.NewRat(1000)
stakeGenesis.Pool.LooseTokens = sdk.NewDec(1000)
genesis := GenesisState{
Accounts: genesisAccounts,
StakeData: stakeGenesis,

View File

@ -34,7 +34,7 @@ func init() {
}
func TestGaiaCLISend(t *testing.T) {
tests.ExecuteT(t, fmt.Sprintf("gaiad --home=%s unsafe_reset_all", gaiadHome), "")
tests.ExecuteT(t, fmt.Sprintf("gaiad --home=%s unsafe-reset-all", gaiadHome), "")
executeWrite(t, fmt.Sprintf("gaiacli keys delete --home=%s foo", gaiacliHome), app.DefaultKeyPass)
executeWrite(t, fmt.Sprintf("gaiacli keys delete --home=%s bar", gaiacliHome), app.DefaultKeyPass)
@ -87,7 +87,7 @@ func TestGaiaCLISend(t *testing.T) {
}
func TestGaiaCLICreateValidator(t *testing.T) {
tests.ExecuteT(t, fmt.Sprintf("gaiad --home=%s unsafe_reset_all", gaiadHome), "")
tests.ExecuteT(t, fmt.Sprintf("gaiad --home=%s unsafe-reset-all", gaiadHome), "")
executeWrite(t, fmt.Sprintf("gaiacli keys delete --home=%s foo", gaiacliHome), app.DefaultKeyPass)
executeWrite(t, fmt.Sprintf("gaiacli keys delete --home=%s bar", gaiacliHome), app.DefaultKeyPass)
chainID := executeInit(t, fmt.Sprintf("gaiad init -o --name=foo --home=%s --home-client=%s", gaiadHome, gaiacliHome))
@ -132,7 +132,7 @@ func TestGaiaCLICreateValidator(t *testing.T) {
validator := executeGetValidator(t, fmt.Sprintf("gaiacli stake validator %s --output=json %v", barAddr, flags))
require.Equal(t, validator.Owner, barAddr)
require.True(sdk.RatEq(t, sdk.NewRat(2), validator.Tokens))
require.True(sdk.DecEq(t, sdk.NewDec(2), validator.Tokens))
// unbond a single share
unbondStr := fmt.Sprintf("gaiacli stake unbond begin %v", flags)
@ -149,11 +149,11 @@ func TestGaiaCLICreateValidator(t *testing.T) {
require.Equal(t, int64(9), barAcc.GetCoins().AmountOf("steak").Int64(), "%v", barAcc)
*/
validator = executeGetValidator(t, fmt.Sprintf("gaiacli stake validator %s --output=json %v", barAddr, flags))
require.Equal(t, "1/1", validator.Tokens.String())
require.Equal(t, "1.0000000000", validator.Tokens.String())
}
func TestGaiaCLISubmitProposal(t *testing.T) {
tests.ExecuteT(t, fmt.Sprintf("gaiad --home=%s unsafe_reset_all", gaiadHome), "")
tests.ExecuteT(t, fmt.Sprintf("gaiad --home=%s unsafe-reset-all", gaiadHome), "")
executeWrite(t, fmt.Sprintf("gaiacli keys delete --home=%s foo", gaiacliHome), app.DefaultKeyPass)
executeWrite(t, fmt.Sprintf("gaiacli keys delete --home=%s bar", gaiacliHome), app.DefaultKeyPass)
chainID := executeInit(t, fmt.Sprintf("gaiad init -o --name=foo --home=%s --home-client=%s", gaiadHome, gaiacliHome))

Binary file not shown.

View File

@ -1,498 +1,7 @@
# Connect to the `gaia-7001` Testnet
# DEPRECATED
_**NOTE:**_ We are aware this documentation is a work in progress. We are actively
working to improve the tooling and the documentation to make this process as painless as
possible. In the meantime, join the [Validator Chat](https://riot.im/app/#/room/#cosmos_validators:matrix.org)
for technical support, and [open issues](https://github.com/cosmos/cosmos-sdk) if you run into any! Thanks very much for your patience and support. :)
The content of this file was moved to the `/docs` folder and is hosted on the
[website](https://cosmos.network/docs/getting-started/full-node.html#run-a-full-node).
## Setting Up a New Node
These instructions are for setting up a brand new full node from scratch. If you ran a full node on a previous testnet you will need to start from scratch due to some breaking changes in key format.
### Install Go
Install `go` by following the [official docs](https://golang.org/doc/install).
**Go 1.10+** is required for the Cosmos SDK. Remember to properly setup your `$GOPATH`, `$GOBIN`, and `$PATH` variables, for example:
```bash
mkdir -p $HOME/go/bin
echo "export GOPATH=$HOME/go" >> ~/.bash_profile
echo "export GOBIN=$GOPATH/bin" >> ~/.bash_profile
echo "export PATH=$PATH:$GOBIN" >> ~/.bash_profile
```
### Install Cosmos SDK
Next, let's install the testnet's version of the Cosmos SDK.
```bash
mkdir -p $GOPATH/src/github.com/cosmos
cd $GOPATH/src/github.com/cosmos
git clone https://github.com/cosmos/cosmos-sdk
cd cosmos-sdk && git checkout v0.22.0
make get_tools && make get_vendor_deps && make install
```
That will install the `gaiad` and `gaiacli` binaries. Verify that everything is OK:
```bash
$ gaiad version
0.22.0
$ gaiacli version
0.22.0
```
### Node Setup
Create the required configuration files, and initialize the node:
```bash
gaiad init --name <your_custom_moniker>
```
> *NOTE:* Note that only ASCII characters are supported for the `--name`. Using Unicode renders your node unreachable.
You can also edit this `moniker` in the `~/.gaiad/config/config.toml` file:
```toml
# A custom human readable name for this node
moniker = "<your_custom_moniker>"
```
Your full node has been initialized!
## Upgrading From Previous Testnet
These instructions are for full nodes that have ran on previous testnets and
would like to upgrade to the latest testnet.
### Reset Data
First, remove the outdated files and reset the data.
```bash
rm $HOME/.gaiad/config/addrbook.json $HOME/.gaiad/config/genesis.json
gaiad unsafe_reset_all
```
Your node is now in a pristine state while keeping the original `priv_validator.json` and `config.toml`.
If you had any sentry nodes or full nodes setup before, your node will still try to connect to them,
but may fail if they haven't also been upgraded.
**WARNING:** Make sure that every node has a unique `priv_validator.json`.
Do not copy the `priv_validator.json` from an old node to multiple new nodes.
Running two nodes with the same `priv_validator.json` will cause you to double sign.
NOTE: key formats changed between gaia-6002 and gaia-7000. If you're trying to upgrade from gaia-6002,
you will also need to delete your `priv_validator.json`:
```
rm $HOME/.gaiad/config/priv_validator.json
```
### Software Upgrade
Now it is time to upgrade the software:
```bash
cd $GOPATH/src/github.com/cosmos/cosmos-sdk
git fetch --all && git checkout v0.22.0
make update_tools && make get_vendor_deps && make install
```
Your full node has been cleanly upgraded!
## Genesis & Seeds
### Copy the Genesis File
Fetch the testnet's `genesis.json` file and place it in `gaiad`'s config directory.
```bash
mkdir -p $HOME/.gaiad/config
curl https://gist.githubusercontent.com/cwgoes/311da6ba05be6e113185a716538a44c3/raw/7b6e784cf29761b5781488006313bd69d164aa6c/chris-final.json > $HOME/.gaiad/config/genesis.json
```
### Add Seed Nodes
Your node needs to know how to find peers. You'll need to add healthy seed nodes to `$HOME/.gaiad/config/config.toml`. Here are some seed nodes you can use:
```toml
# Comma separated list of seed nodes to connect to
seeds = "718145d422a823fd2a4e1e36e91b92bb0c4ddf8e@gaia-7000.coinculture.net:26656,5922bf29b48a18c2300b85cc53f424fce23927ab@67.207.73.206:26656,7c8b8fd03577cd4817f5be1f03d506f879df98d8@gaia-7000-seed1.interblock.io:26656,a28737ff02391a6e00a1d3b79befd57e68e8264c@gaia-7000-seed2.interblock.io:26656,987ffd26640cd03d08ed7e53b24dfaa7956e612d@gaia-7000-seed3.interblock.io:26656"
```
If those seeds aren't working, you can find more seeds and persistent peers on the [Cosmos Explorer](https://explorecosmos.network/nodes). Open the the `Full Nodes` pane and select nodes that do not have private (`10.x.x.x`) or [local IP addresses](https://en.wikipedia.org/wiki/Private_network). The `Persistent Peer` field contains the connection string. For best results use 4-6.
For more information on seeds and peers, [read this](https://github.com/tendermint/tendermint/blob/develop/docs/using-tendermint.md#peers).
## Run a Full Node
Start the full node with this command:
```bash
gaiad start
```
Check that everything is running smoothly:
```bash
gaiacli status
```
View the status of the network with the [Cosmos Explorer](https://explorecosmos.network). Once your full node syncs up to the current block height, you should see it appear on the [list of full nodes](https://explorecosmos.network/validators). If it doesn't show up, that's ok--the Explorer does not connect to every node.
## Generating Keys
### A Note on Keys in Cosmos:
There are three types of key representations that are used in this tutorial:
- `cosmosaccaddr`
* Derived from account keys generated by `gaiacli keys add`
* Used to receive funds
* e.g. `cosmosaccaddr15h6vd5f0wqps26zjlwrc6chah08ryu4hzzdwhc`
- `cosmosaccpub`
* Derived from account keys generated by `gaiacli keys add`
* e.g. `cosmosaccpub1zcjduc3q7fu03jnlu2xpl75s2nkt7krm6grh4cc5aqth73v0zwmea25wj2hsqhlqzm`
- `cosmosvalpub`
* Generated when the node is created with `gaiad init`.
* Get this value with `gaiad tendermint show_validator`
* e.g. `cosmosvalpub1zcjduc3qcyj09qc03elte23zwshdx92jm6ce88fgc90rtqhjx8v0608qh5ssp0w94c`
### Key Generation
You'll need an account private and public key pair \(a.k.a. `sk, pk` respectively\) to be able to receive funds, send txs, bond tx, etc.
To generate a new key \(default _ed25519_ elliptic curve\):
```bash
gaiacli keys add <account_name>
```
Next, you will have to create a passphrase to protect the key on disk. The output of the above command will contain a _seed phrase_. Save the _seed phrase_ in a safe place in case you forget the password!
If you check your private keys, you'll now see `<account_name>`:
```bash
gaiacli keys show <account_name>
```
You can see all your available keys by typing:
```bash
gaiacli keys list
```
View the validator pubkey for your node by typing:
```bash
gaiad tendermint show_validator
```
**WARNING:** We strongly recommend NOT using the same passphrase for multiple keys. The Tendermint team and the Interchain Foundation will not be responsible for the loss of funds. This is not as important on the testnets, but is good security practice and should be followed.
## Fund your account
The best way to get tokens is from the [Cosmos Testnet Faucet](https://faucetcosmos.network). If the faucet is not working for you, try asking [#cosmos-validators](https://riot.im/app/#/room/#cosmos-validators:matrix.org). The faucet needs the `cosmosaccaddr` from the account you wish to use for staking.
After receiving tokens to your address, you can view your account's balance by typing:
```bash
gaiacli account <account_cosmosaccaddr>
```
> _*Note:*_ When you query an account balance with zero tokens, you will get this error: `No account with address <account_cosmosaccaddr> was found in the state.` This can also happen if you fund the account before your node has fully synced with the chain. These are both normal. Also, we're working on improving our error messages!
## Run a Validator Node
[Validators](https://cosmos.network/validators) are responsible for committing new blocks to the blockchain through voting. A validator's stake is slashed if they become unavailable, double sign a transaction, or don't cast their votes. If you only want to run a full node, a VM in the cloud is fine. However, if you are want to become a validator for the Hub's `mainnet`, you should research hardened setups. Please read [Sentry Node Architecture](https://forum.cosmos.network/t/sentry-node-architecture-overview/454) to protect your node from DDOS and ensure high-availability. Also see the [technical requirements](https://github.com/cosmos/cosmos/blob/master/VALIDATORS_FAQ.md#technical-requirements)). There's also more info on our [website](https://cosmos.network/validators).
### Create Your Validator
Your `cosmosvalpub` can be used to create a new validator by staking tokens. You can find your validator pubkey by running:
```bash
gaiad tendermint show_validator
```
Next, craft your `gaiacli stake create-validator` command:
> _*NOTE:*_ Don't use more `steak` thank you have! You can always get more by using the [Faucet](https://faucetcosmos.network/)!
```bash
gaiacli stake create-validator \
--amount=5steak \
--pubkey=$(gaiad tendermint show_validator) \
--address-validator=<account_cosmosaccaddr>
--moniker="choose a moniker" \
--chain-id=gaia-7001 \
--from=<key_name>
```
### Edit Validator Description
You can edit your validator's public description. This info is to identify your validator, and will be relied on by delegators to decide which validators to stake to. Make sure to provide input for every flag below, otherwise the field will default to empty (`--moniker` defaults to the machine name).
The `--identity` can be used as to verify identity with systems like Keybase or UPort. When using with Keybase `--identity` should be populated with a 16-digit string that is generated with a [keybase.io](https://keybase.io) account. It's a cryptographically secure method of verifying your identity across multiple online networks. The Keybase API allows us to retrieve your Keybase avatar. This is how you can add a logo to your validator profile.
```bash
gaiacli stake edit-validator
--address-validator=<account_cosmosaccaddr>
--moniker="choose a moniker" \
--website="https://cosmos.network" \
--identity=6A0D65E29A4CBC8E
--details="To infinity and beyond!"
--chain-id=gaia-7001 \
--from=<key_name>
```
### View Validator Description
View the validator's information with this command:
```bash
gaiacli stake validator \
--address-validator=<account_cosmosaccaddr> \
--chain-id=gaia-7001
```
Your validator is active if the following command returns anything:
```bash
gaiacli advanced tendermint validator-set | grep "$(gaiad tendermint show_validator)"
```
You should also be able to see your validator on the [Explorer](https://explorecosmos.network/validators). You are looking for the `bech32` encoded `address` in the `~/.gaiad/config/priv_validator.json` file.
> _*Note:*_ To be in the validator set, you need to have more total voting power than the 100th validator. This is not normally an issue.
### Problem #1: My validator has `voting_power: 0`
Your validator has become auto-unbonded. In `gaia-7001`, we unbond validators if they do not vote on `50` of the last `100` blocks. Since blocks are proposed every ~2 seconds, a validator unresponsive for ~100 seconds will become unbonded. This usually happens when your `gaiad` process crashes.
Here's how you can return the voting power back to your validator. First, if `gaiad` is not running, start it up again:
```bash
gaiad start
```
Wait for your full node to catch up to the latest block. Next, run the following command. Note that `<cosmosaccaddr>` is the address of your validator account, and `<name>` is the name of the validator account. You can find this info by running `gaiacli keys list`.
```bash
gaiacli stake unrevoke <cosmosaccaddr> --chain-id=gaia-7001 --from=<name>
```
**WARNING:** If you don't wait for `gaiad` to sync before running `unrevoke`, you will receive an error message telling you your validator is still jailed.
Lastly, check your validator again to see if your voting power is back.
```bash
gaiacli status
```
You may notice that your voting power is less than it used to be. That's because you got slashed for downtime!
### Problem #2: My `gaiad` crashes because of `too many open files`
The default number of files Linux can open (per-process) is `1024`. `gaiad` is known to open more than `1024` files. This causes the process to crash. A quick fix is to run `ulimit -n 4096` (increase the number of open files allowed) and then restart the process with `gaiad start`. If you are using `systemd` or another process manager to launch `gaiad` this may require some configuration at that level. A sample `systemd` file to fix this issue is below:
```toml
# /etc/systemd/system/gaiad.service
[Unit]
Description=Cosmos Gaia Node
After=network.target
[Service]
Type=simple
User=ubuntu
WorkingDirectory=/home/ubuntu
ExecStart=/home/ubuntu/go/bin/gaiad start
Restart=on-failure
RestartSec=3
LimitNOFILE=4096
[Install]
WantedBy=multi-user.target
```
## Delegating to a Validator
On the upcoming mainnet, you can delegate `Atom` to a validator. These [delegators](https://cosmos.network/resources/delegators) can receive part of the validator's fee revenue. Read more about the [Cosmos Token Model](https://github.com/cosmos/cosmos/raw/master/Cosmos_Token_Model.pdf).
### Bond Tokens
On the testnet, we delegate `steak` instead of `Atom`. Here's how you can bond tokens to a testnet validator:
```bash
gaiacli stake delegate \
--amount=10steak \
--address-delegator=<account_cosmosaccaddr> \
--address-validator=<validator_cosmosaccaddr> \
--from=<key_name> \
--chain-id=gaia-7001
```
While tokens are bonded, they are pooled with all the other bonded tokens in the network. Validators and delegators obtain a percentage of shares that equal their stake in this pool.
> _*NOTE:*_ Don't use more `steak` thank you have! You can always get more by using the [Faucet](https://gaia.faucetcosmos.network/)!
### Unbond Tokens
If for any reason the validator misbehaves, or you want to unbond a certain amount of tokens, use this following command. You can unbond a specific amount of`shares`\(eg:`12.1`\) or all of them \(`MAX`\).
```bash
gaiacli stake unbond \
--address-delegator=<account_cosmosaccaddr> \
--address-validator=<validator_cosmosaccaddr> \
--shares=MAX \
--from=<key_name> \
--chain-id=gaia-7001
```
You can check your balance and your stake delegation to see that the unbonding went through successfully.
```bash
gaiacli account <account_cosmosaccaddr>
gaiacli stake delegation \
--address-delegator=<account_cosmosaccaddr> \
--address-validator=<validator_cosmosaccaddr> \
--chain-id=gaia-7001
```
## Governance
Governance is the process from which users in the Cosmos Hub can come to consensus on software upgrades, parameters of the mainnet or on custom text proposals. This is done through voting on proposals, which will be submitted by `Atom` holders on the mainnet.
Some considerations about the voting process:
- Voting is done by bonded `Atom` holders on a 1 bonded `Atom` 1 vote basis
- Delegators inherit the vote of their validator if they don't vote
- **Validators MUST vote on every proposal**. If a validator does not vote on a proposal, they will be **partially slashed**
- Votes are tallied at the end of the voting period (2 weeks on mainnet). Each address can vote multiple times to update its `Option` value (paying the transaction fee each time), only the last casted vote will count as valid
- Voters can choose between options `Yes`, `No`, `NoWithVeto` and `Abstain`
At the end of the voting period, a proposal is accepted if `(YesVotes/(YesVotes+NoVotes+NoWithVetoVotes))>1/2` and `(NoWithVetoVotes/(YesVotes+NoVotes+NoWithVetoVotes))<1/3`. It is rejected otherwise
For more information about the governance process and how it works, please check out the Governance module [specification](https://github.com/cosmos/cosmos-sdk/tree/develop/docs/spec/governance).
### Create a Governance proposal
In order to create a governance proposal, you must submit an initial deposit along with the proposal details:
- `title`: Title of the proposal
- `description`: Description of the proposal
- `type`: Type of proposal. Must be of value _Text_ (types _SoftwareUpgrade_ and _ParameterChange_ not supported yet).
```bash
gaiacli gov submit-proposal \
--title=<title> \
--description=<description> \
--type=<Text/ParameterChange/SoftwareUpgrade> \
--proposer=<account_cosmosaccaddr> \
--deposit=<40steak> \
--from=<name> \
--chain-id=gaia-7001
```
### Increase deposit
In order for a proposal to be broadcasted to the network, the amount deposited must be above a `minDeposit` value (default: `10 steak`). If the proposal you previously created didn't meet this requirement, you can still increase the total amount deposited to activate it. Once the minimum deposit is reached, the proposal enters voting period:
```bash
gaiacli gov deposit \
--proposalID=<proposal_id> \
--depositer=<account_cosmosaccaddr> \
--deposit=<200steak> \
--from=<name> \
--chain-id=gaia-7001
```
> _NOTE_: Proposals that don't meet this requirement will be deleted after `MaxDepositPeriod` is reached.
#### Query proposal
Once created, you can now query information of the proposal:
```bash
gaiacli gov query-proposal \
--proposalID=<proposal_id> \
--chain-id=gaia-7001
```
### Vote on a proposal
After a proposal's deposit reaches the `MinDeposit` value, the voting period opens. Bonded `Atom` holders can then cast vote on it:
```bash
gaiacli gov vote \
--proposalID=<proposal_id> \
--voter=<account_cosmosaccaddr> \
--option=<Yes/No/NoWithVeto/Abstain> \
--from=<name> \
--chain-id=gaia-7001
```
#### Query vote
Check the vote with the option you just submitted:
```bash
gaiacli gov query-vote \
--proposalID=<proposal_id> \
--voter=<account_cosmosaccaddr> \
--chain-id=gaia-7001
```
## Other Operations
### Send Tokens
```bash
gaiacli send \
--amount=10faucetToken \
--chain-id=gaia-7001 \
--from=<key_name> \
--to=<destination_cosmosaccaddr>
```
> _*NOTE:*_ The `--amount` flag accepts the format `--amount=<value|coin_name>`.
Now, view the updated balances of the origin and destination accounts:
```bash
gaiacli account <account_cosmosaccaddr>
gaiacli account <destination_cosmosaccaddr>
```
You can also check your balance at a given block by using the `--block` flag:
```bash
gaiacli account <account_cosmosaccaddr> --block=<block_height>
```
## Create your Own Testnet
To create your own testnet, first each validator will need to install gaiad and
run `gen-tx`:
```bash
gaiad init gen-tx --name <account_name>
```
The validator will be prompted to enter a password for their new account.
This populates `$HOME/.gaiad/gen-tx/` with a json file.
Now these json files need to be aggregated together via Github, a Google form, pastebin or other methods.
Place all files on one computer in `$HOME/.gaiad/gen-tx/`
```bash
gaiad init --gen-txs -o --chain=<chain-name>
```
This will generate a `genesis.json` in `$HOME/.gaiad/config/genesis.json` distribute this file to all validators on your testnet.
The rest of this folder was moved to the [testnets
repo](https://github.com/cosmos/testnets).

View File

@ -1,4 +1,6 @@
# TESTNET STATUS
# DEPRECATED
See [testnets repo](https://github.com/cosmos/testnets).
## *July 22, 2018, 5:30 EST* - Gaia-7001 Consensus Failure

View File

@ -15,12 +15,14 @@ type byter interface {
Bytes() []byte
}
func checkAminoBinary(t *testing.T, src byter, dst interface{}, size int) {
func checkAminoBinary(t *testing.T, src, dst interface{}, size int) {
// Marshal to binary bytes.
bz, err := cdc.MarshalBinaryBare(src)
require.Nil(t, err, "%+v", err)
// Make sure this is compatible with current (Bytes()) encoding.
require.Equal(t, src.Bytes(), bz, "Amino binary vs Bytes() mismatch")
if byterSrc, ok := src.(byter); ok {
// Make sure this is compatible with current (Bytes()) encoding.
require.Equal(t, byterSrc.Bytes(), bz, "Amino binary vs Bytes() mismatch")
}
// Make sure we have the expected length.
if size != -1 {
require.Equal(t, size, len(bz), "Amino binary size mismatch")
@ -55,8 +57,6 @@ func ExamplePrintRegisteredTypes() {
//| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | |
//| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | |
//| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | |
//| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | |
//| SignatureSecp256k1 | tendermint/SignatureSecp256k1 | 0x7FC4A495 | variable | |
}
func TestKeyEncodings(t *testing.T) {
@ -86,13 +86,11 @@ func TestKeyEncodings(t *testing.T) {
require.EqualValues(t, tc.privKey, priv3)
// Check (de/en)codings of Signatures.
var sig1, sig2, sig3 tcrypto.Signature
var sig1, sig2 []byte
sig1, err := tc.privKey.Sign([]byte("something"))
require.NoError(t, err)
checkAminoBinary(t, sig1, &sig2, -1) // Signature size changes for Secp anyways.
require.EqualValues(t, sig1, sig2)
checkAminoJSON(t, sig1, &sig3, false) // TODO also check Prefix bytes.
require.EqualValues(t, sig1, sig3)
// Check (de/en)codings of PubKeys.
pubKey := tc.privKey.PubKey()
@ -107,7 +105,7 @@ func TestKeyEncodings(t *testing.T) {
func TestNilEncodings(t *testing.T) {
// Check nil Signature.
var a, b tcrypto.Signature
var a, b []byte
checkAminoJSON(t, &a, &b, true)
require.EqualValues(t, a, b)

View File

@ -199,7 +199,7 @@ func (kb dbKeybase) Get(name string) (Info, error) {
// Sign signs the msg with the named key.
// It returns an error if the key doesn't exist or the decryption fails.
func (kb dbKeybase) Sign(name, passphrase string, msg []byte) (sig tmcrypto.Signature, pub tmcrypto.PubKey, err error) {
func (kb dbKeybase) Sign(name, passphrase string, msg []byte) (sig []byte, pub tmcrypto.PubKey, err error) {
info, err := kb.Get(name)
if err != nil {
return

View File

@ -146,7 +146,7 @@ func TestSignVerify(t *testing.T) {
cases := []struct {
key crypto.PubKey
data []byte
sig crypto.Signature
sig []byte
valid bool
}{
// proper matches

View File

@ -16,7 +16,7 @@ type Keybase interface {
Delete(name, passphrase string) error
// Sign some bytes, looking up the private key to use
Sign(name, passphrase string, msg []byte) (crypto.Signature, crypto.PubKey, error)
Sign(name, passphrase string, msg []byte) ([]byte, crypto.PubKey, error)
// CreateMnemonic creates a new mnemonic, and derives a hierarchical deterministic
// key from that.

View File

@ -114,7 +114,7 @@ func (pkl PrivKeyLedgerSecp256k1) Equals(other tmcrypto.PrivKey) bool {
// Communication is checked on NewPrivKeyLedger and PrivKeyFromBytes, returning
// an error, so this should only trigger if the private key is held in memory
// for a while before use.
func (pkl PrivKeyLedgerSecp256k1) Sign(msg []byte) (tmcrypto.Signature, error) {
func (pkl PrivKeyLedgerSecp256k1) Sign(msg []byte) ([]byte, error) {
sig, err := pkl.signLedgerSecp256k1(msg)
if err != nil {
return nil, err
@ -135,13 +135,8 @@ func (pkl PrivKeyLedgerSecp256k1) getPubKey() (key tmcrypto.PubKey, err error) {
return key, err
}
func (pkl PrivKeyLedgerSecp256k1) signLedgerSecp256k1(msg []byte) (tmcrypto.Signature, error) {
sigBytes, err := pkl.ledger.SignSECP256K1(pkl.Path, msg)
if err != nil {
return nil, err
}
return tmsecp256k1.SignatureSecp256k1FromBytes(sigBytes), nil
func (pkl PrivKeyLedgerSecp256k1) signLedgerSecp256k1(msg []byte) ([]byte, error) {
return pkl.ledger.SignSECP256K1(pkl.Path, msg)
}
func (pkl PrivKeyLedgerSecp256k1) pubkeyLedgerSecp256k1() (pub tmcrypto.PubKey, err error) {

View File

@ -1,8 +1,8 @@
## Fees
- Collection
- Gas price based on parameter
- (which gets changed automatically)
- https://github.com/cosmos/cosmos-sdk/issues/1921
- Per block gas usage as %
- Windowing function
- Block N,
@ -22,7 +22,8 @@
- Only use text proposals
- On-chain mechanism for agreeing on when to "flip" to new functionality
## Slashing/Stability
## Staking/Slashing/Stability
- Unbonding state for validators https://github.com/cosmos/cosmos-sdk/issues/1676
- current: downtime, double signing during unbonding
- who gets slashed when -- needs review about edge cases
- need to communicate to everyone that lite has this edge case
@ -68,3 +69,10 @@
## Slashing/Stability
- tendermint evidence: we dont yet slash byzantine signatures (signing at all) when not bonded.
# Other priority
## gaiad // gaiacli
- Documentation // language
## gaialite
- Documentation // language

View File

@ -493,6 +493,49 @@ paths:
500:
description: Internal Server Error
/stake/delegators/{delegatorAddr}/validators:
parameters:
- in: path
name: delegatorAddr
description: Bech32 AccAddress of Delegator
required: true
type: string
get:
summary: Query all validators that a delegator is bonded to
tags:
- stake
produces:
- application/json
responses:
200:
description: OK
404:
description: Not Found
/stake/delegators/{delegatorAddr}/validators/{validatorAddr}:
parameters:
- in: path
name: delegatorAddr
description: Bech32 AccAddress of Delegator
required: true
type: string
- in: path
name: validatorAddr
description: Bech32 ValAddress of Delegator
required: true
type: string
get:
summary: Query a validator that a delegator is bonded to
tags:
- stake
produces:
- application/json
responses:
200:
description: OK
404:
description: Not Found
/stake/delegators/{delegatorAddr}/txs:
parameters:
- in: path

View File

@ -1,5 +1,11 @@
# Join the Testnet
::: tip Current Testnet
See the [testnet repo](https://github.com/cosmos/testnets) for
information on the latest testnet, including the correct version
of the Cosmos-SDK to use and details about the genesis file.
:::
Please ensure you have the [Cosmos SDK](/getting-started/installation.md) installed. If you ran a full node on a previous testnet, please skip to [Upgrading From Previous Testnet](#upgrading-from-previous-testnet).
## Setting Up a New Node
@ -35,7 +41,7 @@ First, remove the outdated files and reset the data.
```bash
rm $HOME/.gaiad/config/addrbook.json $HOME/.gaiad/config/genesis.json
gaiad unsafe_reset_all
gaiad unsafe-reset-all
```
Your node is now in a pristine state while keeping the original `priv_validator.json` and `config.toml`. If you had any sentry nodes or full nodes setup before,
@ -52,30 +58,40 @@ Now it is time to upgrade the software:
```bash
cd $GOPATH/src/github.com/cosmos/cosmos-sdk
git fetch --all && git checkout v0.19.0
git fetch --all && git checkout master
make update_tools && make get_vendor_deps && make install
```
Note we use `master` here since it contains the latest stable release.
See the [testnet repo](https://github.com/cosmos/testnets)
for details on which version is needed for which testnet,
and the [SDK release page](https://github.com/cosmos/cosmos-sdk/releases)
for details on each release.
Your full node has been cleanly upgraded!
## Genesis & Seeds
### Copy the Genesis File
Copy the testnet's `genesis.json` file and place it in `gaiad`'s config directory.
Fetch the testnet's `genesis.json` file into `gaiad`'s config directory.
```bash
mkdir -p $HOME/.gaiad/config
cp -a $GOPATH/src/github.com/cosmos/cosmos-sdk/cmd/gaia/testnets/gaia-6002/genesis.json $HOME/.gaiad/config/genesis.json
curl https://raw.githubusercontent.com/cosmos/testnets/master/latest/genesis.json > $HOME/.gaiad/config/genesis.json
```
Note we use the `latest` directory in the [testnets repo](https://github.com/cosmos/testnets)
which contains details for the latest testnet. If you are connecting to a different testnet, ensure you
get the right files.
### Add Seed Nodes
Your node needs to know how to find peers. You'll need to add healthy seed nodes to `$HOME/.gaiad/config/config.toml`. Here are some seed nodes you can use:
```toml
# Comma separated list of seed nodes to connect to
seeds = "38aa9bec3998f12ae9088b21a2d910d19d565c27@gaia-6002.coinculture.net:46656,80a35a46ce09cfb31ee220c8141a25e73e0b239b@seed.cosmos.cryptium.ch:46656,80a35a46ce09cfb31ee220c8141a25e73e0b239b@35.198.166.171:46656,032fa56301de335d835057fb6ad9f7ce2242a66d@165.227.236.213:46656"
seeds = "718145d422a823fd2a4e1e36e91b92bb0c4ddf8e@gaia-testnet.coinculture.net:26656,5922bf29b48a18c2300b85cc53f424fce23927ab@67.207.73.206:26656,7c8b8fd03577cd4817f5be1f03d506f879df98d8@gaia-7000-seed1.interblock.io:26656,a28737ff02391a6e00a1d3b79befd57e68e8264c@gaia-7000-seed2.interblock.io:26656,987ffd26640cd03d08ed7e53b24dfaa7956e612d@gaia-7000-seed3.interblock.io:26656"
```
If those seeds aren't working, you can find more seeds and persistent peers on the [Cosmos Explorer](https://explorecosmos.network/nodes). Open the the `Full Nodes` pane and select nodes that do not have private (`10.x.x.x`) or [local IP addresses](https://en.wikipedia.org/wiki/Private_network). The `Persistent Peer` field contains the connection string. For best results use 4-6.

View File

@ -20,12 +20,18 @@ echo "export PATH=$PATH:$GOBIN" >> ~/.bash_profile
## Install Cosmos SDK
Next, let's install the testnet's version of the Cosmos SDK.
You can find information about the latest testnet and the right
version of the Cosmos-SDK for it in the [testnets
repo](https://github.com/cosmos/testnets#testnet-status).
Here we'll use the `master` branch, which contains the latest stable release.
If necessary, make sure you `git checkout` the correct
[released version](https://github.com/cosmos/cosmos-sdk/releases).
```bash
mkdir -p $GOPATH/src/github.com/cosmos
cd $GOPATH/src/github.com/cosmos
git clone https://github.com/cosmos/cosmos-sdk
cd cosmos-sdk && git checkout v0.19.0
cd cosmos-sdk && git checkout master
make get_tools && make get_vendor_deps && make install
```
@ -33,10 +39,7 @@ That will install the `gaiad` and `gaiacli` binaries. Verify that everything is
```bash
$ gaiad version
0.19.0-c6711810
$ gaiacli version
0.19.0-c6711810
```
## Run a Full Node

View File

@ -1,59 +0,0 @@
# Install
The fastest and easiest way to install the Cosmos SDK binaries
is to run [this script](https://github.com/cosmos/cosmos-sdk/blob/develop/scripts/install_sdk_ubuntu.sh) on a fresh Ubuntu instance. Similarly, you can run [this script](https://github.com/cosmos/cosmos-sdk/blob/develop/scripts/install_sdk_bsd.sh) on a fresh FreeBSD instance. Read the scripts before running them to ensure no untrusted connection is being made, for example we're making curl requests to download golang. Also read the comments / instructions carefully (i.e., reset your terminal after running the script).
Cosmos SDK can be installed to
`$GOPATH/src/github.com/cosmos/cosmos-sdk` like a normal Go program:
```
go get github.com/cosmos/cosmos-sdk
```
If the dependencies have been updated with breaking changes, or if
another branch is required, `dep` is used for dependency management.
Thus, assuming you've already run `go get` or otherwise cloned the repo,
the correct way to install is:
```
cd $GOPATH/src/github.com/cosmos/cosmos-sdk
make get_tools
make get_vendor_deps
make install
make install_examples
```
This will install `gaiad` and `gaiacli` and four example binaries:
`basecoind`, `basecli`, `democoind`, and `democli`.
Verify that everything is OK by running:
```
gaiad version
```
you should see:
```
0.17.3-a5a78eb
```
then with:
```
gaiacli version
```
you should see the same version (or a later one for both).
## Update
Get latest code (you can also `git fetch` only the version desired),
ensure the dependencies are up to date, then recompile.
```
cd $GOPATH/src/github.com/cosmos/cosmos-sdk
git fetch -a origin
git checkout VERSION
make get_vendor_deps
make install
```

View File

@ -0,0 +1,27 @@
## Tendermint and Cosmos
Blockchains can be divided into three conceptual layers:
- **Networking:** Responsible for propagating transactions.
- **Consensus:** Enables validator nodes to agree on the next set of transactions to process (i.e. add blocks of transactions to the blockchain).
- **Application:** Responsible for updating the state given a set of transactions, i.e. processing transactions.
The *networking* layer makes sure that each node receives transactions. The *consensus* layer makes sure that each node agrees on the same transactions to modify their local state. As for the *application* layer, it processes transactions. Given a transaction and a state, the application will return a new state. In Bitcoin for example, the application state is a ledger or list of balances for each account (in reality, it's a list of UTXO, short for Unspent Transaction Output, but let's call them balances for the sake of simplicity), and the transactions modify the application's state by changing these list of balances. In the case of Ethereum, the application is a virtual machine. Each transaction goes through this virtual machine and modifies the application state according to the the smart contract that is called within it.
Before Tendermint, building a blockchain required building all three layers from the ground up. It was such a tedious task that most developers preferred to fork or replicate the Bitcoin codebase, but were constrainted by the limitations of Bitcoin's protocol. The Ethereum Virtual Machine (EVM) was designed to solve this problem and simplify decentralized application development by allowing customizable logic to be executed through smart contracts. But it did not resolve the limitations (interoperability, scalability and customization) of blockchains themselves. Go-Ethereum remains a very monolithic tech stack that is difficult to hard-fork much like Bitcoin's codebase.
Tendermint was designed to address these issues and provide developers with an laternative. The goal of Tendermint is to provide the *networking* and *consensus* layers of a blockchain as a generic engine to power any application developers want to build. With Tendermint, developers only have to focus on the *application* layer, thereby saving hundreds of hours of work and costly development set-ups. For reference, Tendermint also designates the name of the byzantine fault tolerant consensus algorithm used within the Tendermint Core engine.
Tendermint connects the blockchain engine, Tendermint Core (*networking* and *consensus* layers), to the *application* layer via a socket protocol called the [ABCI](https://github.com/tendermint/abci), short for Application-BlockChain Interface. Developers only have to implement a few messages to build an ABCI-enabled application that runs on top of the Tendermint Core engine. ABCI is language agnostic, meaning that developers can build the *application* part of their blockchain in any programming language. Building on top of the Tendermint Core engine also provides the following benefits:
- **Public or private blockchain capable.** Developers can deploy any blockchain application, permissioned (private) and permissionless (public), on top of Tendermint Core.
- **Performance.** Tendermint Core is a state-of-the-art blockchain consensus engine able to handle large number of transactions in short timespan. A block time on Tendermint Core can be as low as one second and can process thousands of transactions in that time period.
- **Instant finality.** A property of the Tendermint consensus algorithm is instant finality, meaning that forks are never created, as long as less than a third of the validators are malicious (byzantine). Users can be sure their transactions are finalized as soon as a block is created.
- **Security.** Tendermint Core's consensus is not only fault tolerant, its optimally Byzantine fault-tolerant (BFT), with accountability. If the blockchain forks, there is a way to determine liability.
- **Light-client support**. Tendermint provides built-in light-clients.
But most importantly, Tendermint is natively compatible with the [Inter-Blockchain Communication Protocol](https://github.com/cosmos/cosmos-sdk/tree/develop/docs/spec/ibc) (IBC). This means that any Tendermint-based blockchain, whether public or private, can be natively connected to the Cosmos ecosystem and securely exchange tokens with other blockchains in the ecosystem. Note that benefiting from interoperability via IBC and Cosmos preserves the sovereignty of your Tendermint chain. Non-Tendermint chains can also be connected to Cosmos via IBC adapters or Peg-Zones, but this is out of scope for this document.
For a more detailed overview of the Cosmos ecosystem, you can read [this article](https://blog.cosmos.network/understanding-the-value-proposition-of-cosmos-ecaef63350d).
For more on Tendermint, go [here](tendermint.md)

View File

@ -5,9 +5,43 @@ Tendermint is software for securely and consistently replicating an application
Tendermint is designed to be easy-to-use, simple-to-understand, highly performant, and useful for a wide variety of distributed applications.
## Byzantine Fault Tolerance
The ability to tolerate machines failing in arbitrary ways, including becoming malicious, is known as Byzantine fault tolerance (BFT). The theory of BFT is decades old, but software implementations have only became popular recently, due largely to the success of “blockchain technology” like Bitcoin and Ethereum. Blockchain technology is just a re-formalization of BFT in a more modern setting, with emphasis on peer-to-peer networking and cryptographic authentication. The name derives from the way transactions are batched in blocks, where each block contains a cryptographic hash of the previous one, forming a chain. In practice, the blockchain data structure actually optimizes BFT design.
## Application Blockchain Interface
Tendermint consists of two chief technical components: a blockchain consensus engine and a generic application interface. The consensus engine, called Tendermint Core, ensures that the same transactions are recorded on every machine in the same order. The application interface, called the Application Blockchain Interface (ABCI), enables the transactions to be processed in any programming language. Unlike other blockchain and consensus solutions developers can use Tendermint for BFT state machine replication in any programming language or development environment. Visit the [Tendermint docs](https://tendermint.readthedocs.io/projects/tools/en/master/introduction.html#abci-overview) for a deep dive into the ABCI.
The [Cosmos SDK](/sdk/overview.md) is an ABCI framework written in Go. [Lotion JS](/lotion/overview.md) is an ABCI framework written in JavaScript.
## Understanding the roles of the different layers
It is important to have a good understanding of the respective responsibilities of both the *Application* and the *Consensus Engine*.
Responsibilities of the *Consensus Engine*:
- Propagate transactions
- Agree on the order of valid transactions
Reponsibilities of the *Application*:
- Generate Transactions
- Check if transactions are valid
- Process Transactions (includes state changes)
It is worth underlining that the *Consensus Engine* has knowledge of a given validator set for each block, but that it is the responsiblity of the *Application* to trigger validator set changes. This is the reason why it is possible to build both **public and private chains** with the Cosmos-SDK and Tendermint. A chain will be public or private depending on the rules, defined at application level, that governs a validator's set changes.
The ABCI establishes the connection between the *Consensus Engine* and the *Application*. Essentially, it boils down to two messages:
- `CheckTx`: Ask the application if the transaction is valid. When a validator's node receives a transaction, it will run `CheckTx` on it. If the transaction is valid, it is added to the mempool.
- `DeliverTx`: Ask the application to process the transaction and update the state.
Let us give a high-level overview of how the *Consensus Engine* and the *Application* interract with each other.
- At all times, when the consensus engine (Tendermint Core) of a validator node receives a transaction, it passes it to the application via `CheckTx` to check its validity. If it is valid, the transaction is added to the mempool.
- Let us say we are at block N. There is a validator set V. A proposer of the next block is selected from V by the *Consensus Engine*. The proposer gathers valid transaction from its mempool to form a new block. Then, the block is gossiped to other validators to be signed/commited. The block becomes block N+1 once 2/3+ of V have signed a *precommit* on it (For a more detailed explanation of the consensus algorithm, click [here](https://github.com/tendermint/tendermint/wiki/Byzantine-Consensus-Algorithm)).
- When block N+1 is signed by 2/3+ of V, it is gossipped to full-nodes. When full-nodes receive the block, they confirm its validity. A block is valid if it it holds valid signatures from more than 2/3 of V and if all the transactions in the block are valid. To check the validity of transactions, the *Consensus Engine* transfers them to the application via `DeliverTx`. After each transaction, `DeliverTx` returns a new state if the transaction was valid. At the end of the block, a final state is committed. Of course, this means that the order of transaction within a block matters.
## Application frameworks
Even if Tendermint makes it easy for developers to build their own blockchain by enabling them to focus on the *Application* layer of their blockchain, building an *Application* can be a challenging task in itself. This is why *Application Frameworks* exist. They provide developers with a secure and features-heavy environment to develop Tendermint-based applications. Here are some examples of *Application Frameworks* :
- The [Cosmos SDK](/sdk/overview.md) is an ABCI framework written in Go.
- [Lotion JS](/lotion/overview.md) is an ABCI framework written in JavaScript.

View File

@ -4,4 +4,4 @@ Cosmos is a decentralized network of independent parallel blockchains, each powe
The first blockchain in the Cosmos Network is the [Cosmos Hub](), whose native token is the Atom. Cosmos is a permission-less network, meaning that anybody can build a blockchain on it.
Cosmos can interoperate with multiple other applications and cryptocurrencies. By creating a new zone, you can plug any blockchain system into the Cosmos hub and pass tokens back and forth between those zones, without the need for an intermediary.
Cosmos can interoperate with multiple other applications and cryptocurrencies. By creating a new zone, you can plug any blockchain system into the Cosmos hub and pass tokens back and forth between those zones, without the need for an intermediary.

View File

@ -479,6 +479,62 @@ Returns on error:
}
```
### /stake/delegators/{delegatorAddr}/validators - GET
url: /stake/delegators/{delegatorAddr}/validators
Functionality: Query all validators that a delegator is bonded to.
Returns on success:
```json
{
"rest api":"2.0",
"code":200,
"error":"",
"result":{}
}
```
Returns on failure:
```json
{
"rest api":"2.0",
"code":500,
"error":"TODO",
"result":{}
}
```
### /stake/delegators/{delegatorAddr}/validators/{validatorAddr} - GET
url: /stake/delegators/{delegatorAddr}/validators/{validatorAddr}
Functionality: Query a validator that a delegator is bonded to
Returns on success:
```json
{
"rest api":"2.0",
"code":200,
"error":"",
"result":{}
}
```
Returns on failure:
```json
{
"rest api":"2.0",
"code":500,
"error":"TODO",
"result":{}
}
```
### /stake/delegators/{delegatorAddr}/txs - GET
url: /stake/delegators/{delegatorAddr}/txs

View File

@ -323,6 +323,14 @@ return KeyOutput{
TODO
### [/stake/delegators/{delegatorAddr}/validators](api.md#stakedelegatorsdelegatorAddrvalidators---get)
TODO
### [/stake/delegators/{delegatorAddr}/validators/{validatorAddr}](api.md#stakedelegatorsdelegatorAddrvalidatorsvalidatorAddr---get)
TODO
### [/stake/delegators/{delegatorAddr}/txs](api.md#stakedelegatorsdelegatorAddrtxs---get)
TODO

View File

@ -10,7 +10,6 @@
🚧 We are actively working on improving documentation for Gaiacli and Gaiad.
:::
`gaiacli` is the command line interface to manage accounts and transactions on Cosmos testnets. Here is a list of useful `gaiacli` commands, including usage examples.
### Key Types
@ -18,18 +17,20 @@
There are three types of key representations that are used:
- `cosmosaccaddr`
* Derived from account keys generated by `gaiacli keys add`
* Used to receive funds
* e.g. `cosmosaccaddr15h6vd5f0wqps26zjlwrc6chah08ryu4hzzdwhc`
- Derived from account keys generated by `gaiacli keys add`
- Used to receive funds
- e.g. `cosmosaccaddr15h6vd5f0wqps26zjlwrc6chah08ryu4hzzdwhc`
- `cosmosaccpub`
* Derived from account keys generated by `gaiacli keys add`
* e.g. `cosmosaccpub1zcjduc3q7fu03jnlu2xpl75s2nkt7krm6grh4cc5aqth73v0zwmea25wj2hsqhlqzm`
- Derived from account keys generated by `gaiacli keys add`
- e.g. `cosmosaccpub1zcjduc3q7fu03jnlu2xpl75s2nkt7krm6grh4cc5aqth73v0zwmea25wj2hsqhlqzm`
- `cosmosvalpub`
* Generated when the node is created with `gaiad init`.
* Get this value with `gaiad tendermint show_validator`
* e.g. `cosmosvalpub1zcjduc3qcyj09qc03elte23zwshdx92jm6ce88fgc90rtqhjx8v0608qh5ssp0w94c`
- Generated when the node is created with `gaiad init`.
- Get this value with `gaiad tendermint show-validator`
- e.g. `cosmosvalpub1zcjduc3qcyj09qc03elte23zwshdx92jm6ce88fgc90rtqhjx8v0608qh5ssp0w94c`
### Generate Keys
@ -58,11 +59,11 @@ gaiacli keys list
View the validator pubkey for your node by typing:
```bash
gaiad tendermint show_validator
gaiad tendermint show-validator
```
::: danger Warning
We strongly recommend *NOT* using the same passphrase for multiple keys. The Tendermint team and the Interchain Foundation will not be responsible for the loss of funds.
We strongly recommend _NOT_ using the same passphrase for multiple keys. The Tendermint team and the Interchain Foundation will not be responsible for the loss of funds.
:::
### Get Tokens
@ -86,7 +87,7 @@ We're working on improving our error messages!
```bash
gaiacli send \
--amount=10faucetToken \
--chain-id=gaia-6002 \
--chain-id=gaia-7005 \
--name=<key_name> \
--to=<destination_cosmosaccaddr>
```
@ -119,15 +120,15 @@ On the testnet, we delegate `steak` instead of `atom`. Here's how you can bond t
```bash
gaiacli stake delegate \
--amount=10steak \
--address-validator=$(gaiad tendermint show_validator) \
--from=<key_name> \
--chain-id=gaia-6002
--address-validator=$(gaiad tendermint show-validator) \
--name=<key_name> \
--chain-id=gaia-7005
```
While tokens are bonded, they are pooled with all the other bonded tokens in the network. Validators and delegators obtain a percentage of shares that equal their stake in this pool.
::: tip Note
Don't use more `steak` thank you have! You can always get more by using the [Faucet](https://faucetcosmos.network/)!
Don't use more `steak` thank you have! You can always get more by using the [Faucet](https://faucetcosmos.network/)!
:::
### Unbond Tokens
@ -136,10 +137,10 @@ If for any reason the validator misbehaves, or you want to unbond a certain amou
```bash
gaiacli stake unbond begin \
--address-validator=$(gaiad tendermint show_validator) \
--shares-percent=1 \
--from=<key_name> \
--chain-id=gaia-6002
--address-validator=$(gaiad tendermint show-validator) \
--shares=MAX \
--name=<key_name> \
--chain-id=gaia-7005
```
Later you must use the `gaiacli stake unbond complete` command to finish
@ -151,8 +152,8 @@ gaiacli account <account_cosmosaccaddr>
gaiacli stake delegation \
--address-delegator=<account_cosmosaccaddr> \
--address-validator=$(gaiad tendermint show_validator) \
--chain-id=gaia-6002
--address-validator=$(gaiad tendermint show-validator) \
--chain-id=gaia-7005
```
## Light Client Daemon

View File

@ -150,7 +150,7 @@ func NewCodec() *wire.Codec {
```
Note: We also register the types in the `tendermint/tendermint/crypto` module so that `crypto.PubKey`
and `crypto.Signature` are encoded/decoded correctly.
is encoded/decoded correctly.
Amino supports encoding and decoding in both a binary and JSON format.
See the [codec API docs](https://godoc.org/github.com/tendermint/go-amino#Codec) for more details.
@ -166,7 +166,7 @@ type app2Tx struct {
sdk.Msg
PubKey crypto.PubKey
Signature crypto.Signature
Signature []byte
}
// This tx only has one Msg.

View File

@ -160,7 +160,7 @@ The standard form for signatures is `StdSignature`:
// the first transaction made by the account.
type StdSignature struct {
crypto.PubKey `json:"pub_key"` // optional
crypto.Signature `json:"signature"`
[]byte `json:"signature"`
AccountNumber int64 `json:"account_number"`
Sequence int64 `json:"sequence"`
}

View File

@ -183,7 +183,7 @@ type app2Tx struct {
sdk.Msg
PubKey crypto.PubKey
Signature crypto.Signature
Signature []byte
}
// This tx only has one Msg.
@ -191,7 +191,7 @@ func (tx app2Tx) GetMsgs() []sdk.Msg {
return []sdk.Msg{tx.Msg}
}
func (tx app2Tx) GetSignature() crypto.Signature {
func (tx app2Tx) GetSignature() []byte {
return tx.Signature
}

View File

@ -1,14 +1,16 @@
# Cosmos SDK Overview
The Cosmos-SDK is a framework for building Tendermint ABCI applications in
Golang. It is designed to allow developers to easily create custom interoperable
blockchain applications within the Cosmos Network.
The [Cosmos-SDK](https://github.com/cosmos/cosmos-sdk) is a framework for building multi-asset Proof-of-Stake (PoS) blockchains, like the Cosmos Hub, as well as Proof-Of-Authority (PoA) blockchains.
To achieve its goals of flexibility and security, the SDK makes extensive use of
the [object-capability
model](https://en.wikipedia.org/wiki/Object-capability_model)
and the [principle of least
privilege](https://en.wikipedia.org/wiki/Principle_of_least_privilege).
The goal of the Cosmos-SDK is to allow developers to easily create custom interoperable blockchain applications within the Cosmos Network without having to recreate common blockchain functionality, thus removing the complexity of building a Tendermint ABCI application. We envision the SDK as the npm-like framework to build secure blockchain applications on top of Tendermint.
In terms of its design, the SDK optimizes flexibility and security. The framework is designed around a modular execution stack which allows applications to mix and match elements as desired. In addition, all modules are sandboxed for greater application security.
It is based on two major principles:
- **Composability:** Anyone can create a module for the Cosmos-SDK and integrating the already-built modules is as simple as importing them into your blockchain application.
- **Capabilities:** The SDK is inspired by capabilities-based security, and informed by years of wrestling with blockchain state-machines. Most developers will need to access other 3rd party modules when building their own modules. Given that the Cosmos-SDK is an open framework and that we assume that some of those modules may be malicious, we designed the SDK using object-capabilities (ocaps) based principles. In practice, this means that instead of having each module keep an access control list for other modules, each module implements special objects called keepers that can be passed to other modules to grant a pre-defined set of capabilities. For example, if an instance of module A's keepers is passed to module B, the latter will be able to call a restricted set of module A's functions. The capabilities of each keeper are defined by the module's developer, and it's the developer's job to understand and audit the safety of foreign code from 3rd party modules based on the capabilities they are passing into each 3rd party module. For a deeper look at capabilities, you can read this [article](http://habitatchronicles.com/2017/05/what-are-capabilities/).
For an introduction to object-capabilities, see this [article](http://habitatchronicles.com/2017/05/what-are-capabilities/).

View File

@ -0,0 +1,7 @@
**SDK by Examples** offers an alternative and complementary way to learn about the Cosmos-SDK. It contains several examples that showcase how to build a decentralised application on top of the Cosmos-SDK from start to finish.
Without further ado, let us get into it!
- [Simple governance example](./simple-governance/intro.md)
If you have an example you would like to add to the list, feel free to open a PR [here](https://github.com/cosmos/cosmos-sdk/pulls).

View File

@ -0,0 +1,23 @@
## Application CLI
**File: [`cmd/simplegovcli/maing.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/cmd/simplegovcli/main.go)**
To interact with our application, let us add the commands from the `simple_governance` module to our `simpleGov` application, as well as the pre-built SDK commands:
```go
// cmd/simplegovcli/main.go
...
rootCmd.AddCommand(
client.GetCommands(
simplegovcmd.GetCmdQueryProposal("proposals", cdc),
simplegovcmd.GetCmdQueryProposals("proposals", cdc),
simplegovcmd.GetCmdQueryProposalVotes("proposals", cdc),
simplegovcmd.GetCmdQueryProposalVote("proposals", cdc),
)...)
rootCmd.AddCommand(
client.PostCommands(
simplegovcmd.PostCmdPropose(cdc),
simplegovcmd.PostCmdVote(cdc),
)...)
...
```

View File

@ -0,0 +1,21 @@
## Application codec
**File: [`app/app.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/app/app.go)**
Finally, we need to define the `MakeCodec()` function and register the concrete types and interface from the various modules.
```go
func MakeCodec() *wire.Codec {
var cdc = wire.NewCodec()
wire.RegisterCrypto(cdc) // Register crypto.
sdk.RegisterWire(cdc) // Register Msgs
bank.RegisterWire(cdc)
simplestake.RegisterWire(cdc)
simpleGov.RegisterWire(cdc)
// Register AppAccount
cdc.RegisterInterface((*auth.Account)(nil), nil)
cdc.RegisterConcrete(&types.AppAccount{}, "simpleGov/Account", nil)
return cdc
}
```

View File

@ -0,0 +1,9 @@
## App commands
We will need to add the newly created commands to our application. To do so, go to the `cmd` folder inside your root directory:
```bash
// At root level of directory
cd cmd
```
`simplegovd` is the folder that stores the command for running the server daemon, whereas `simplegovcli` defines the commands of your application.

View File

@ -0,0 +1,61 @@
## Application constructor
**File: [`app/app.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/app/app.go)**
Now, we need to define the constructor for our application.
```go
func NewSimpleGovApp(logger log.Logger, db dbm.DB) *SimpleGovApp
```
In this function, we will:
- Create the codec
```go
var cdc = MakeCodec()
```
- Instantiate our application. This includes creating the keys to access each of the substores.
```go
// Create your application object.
var app = &SimpleGovApp{
BaseApp: bam.NewBaseApp(appName, cdc, logger, db),
cdc: cdc,
capKeyMainStore: sdk.NewKVStoreKey("main"),
capKeyAccountStore: sdk.NewKVStoreKey("acc"),
capKeyStakingStore: sdk.NewKVStoreKey("stake"),
capKeySimpleGovStore: sdk.NewKVStoreKey("simpleGov"),
}
```
- Instantiate the keepers. Note that keepers generally need access to other module's keepers. In this case, make sure you only pass an instance of the keeper for the functionality that is needed. If a keeper only needs to read in another module's store, a read-only keeper should be passed to it.
```go
app.coinKeeper = bank.NewKeeper(app.accountMapper)
app.stakeKeeper = simplestake.NewKeeper(app.capKeyStakingStore, app.coinKeeper,app.RegisterCodespace(simplestake.DefaultCodespace))
app.simpleGovKeeper = simpleGov.NewKeeper(app.capKeySimpleGovStore, app.coinKeeper, app.stakeKeeper, app.RegisterCodespace(simpleGov.DefaultCodespace))
```
- Declare the handlers.
```go
app.Router().
AddRoute("bank", bank.NewHandler(app.coinKeeper)).
AddRoute("simplestake", simplestake.NewHandler(app.stakeKeeper)).
AddRoute("simpleGov", simpleGov.NewHandler(app.simpleGovKeeper))
```
- Initialize the application.
```go
// Initialize BaseApp.
app.MountStoresIAVL(app.capKeyMainStore, app.capKeyAccountStore, app.capKeySimpleGovStore, app.capKeyStakingStore)
app.SetAnteHandler(auth.NewAnteHandler(app.accountMapper, app.feeCollectionKeeper))
err := app.LoadLatestVersion(app.capKeyMainStore)
if err != nil {
cmn.Exit(err.Error())
}
return app
```

View File

@ -0,0 +1,78 @@
## Application design
### Application description
For this tutorial, we will code a **simple governance application**, accompagnied by a **simple governance module**. It will allow us to explain most of the basic notions required to build a functioning application on the Cosmos-SDK. Note that this is not the governance module used for the Cosmos Hub. A much more [advanced governance module](https://github.com/cosmos/cosmos-sdk/tree/develop/x/gov) will be used instead.
All the code for the `simple_governance` application can be found [here](https://github.com/gamarin2/cosmos-sdk/tree/module_tutorial/examples/simpleGov/x/simple_governance). You'll notice that the module and app aren't located at the root level of the repo but in the examples directory. This is just for convenience, you can code your module and application directly in the root directory.
Without further talk, let's get into it!
### Requirements
We will start by writting down your module's requirements. We are designing a simple governance module, in which we want:
- Simple text proposals, that any coin holder can submit.
- Proposals must be submitted with a deposit in Atoms. If the deposit is larger than a `MinDeposit`, the associated proposal enters the voting period. Otherwise it is rejected.
- Bonded Atom holders can vote on proposal on a 1 bonded Atom 1 vote basis
- Bonded Atom holders can choose between 3 options when casting a vote: `Yes`, `No` and `Abstain`.
- If, at the end of the voting period, there are more `Yes` votes than `No` votes, the proposal is accepted. Otherwise, it is rejected.
- Voting period is 2 weeks
When designing a module, it is good to adopt a certain methodology. Remember that a blockchain application is just a replicated state-machine. The state is the representation of the application at a given time. It is up to the application developer to define what the state represents, depending on the goal of the application. For example, the state of a simple cryptocurrency application will be a mapping of addresses to balances.
The state can be updated according to predefined rules. Given a state and a transaction, the state-machine (i.e. the application) will return a new state. In a blockchain application, transactions are bundled in blocks, but the logic is the same. Given a state and a set of transactions (a block), the application returns a new state. A SDK-module is just a subset of the application, but it is based on the same principles. As a result, module developers only have to define a subset of the state and a subset of the transaction types, which trigger state transitions.
In summary, we have to define:
- A `State`, which represents a subset of the current state of the application.
- `Transactions`, which contain messages that trigger state transitions.
### State
Here, we will define the types we need (excluding transaction types), as well as the stores in the multistore.
Our voting module is very simple, we only need a single type: `Proposal`. `Proposals` are item to be voted upon. They can be submitted by any user. A deposit has to be provided.
```go
type Proposal struct {
Title string // Title of the proposal
Description string // Description of the proposal
Submitter sdk.Address // Address of the submitter. Needed to refund deposit if proposal is accepted.
SubmitBlock int64 // Block at which proposal is submitted. Also the block at which voting period begins.
State string // State can be either "Open", "Accepted" or "Rejected"
YesVotes int64 // Total number of Yes votes
NoVotes int64 // Total number of No votes
AbstainVotes int64 // Total number of Abstain votes
}
```
In terms of store, we will just create one [KVStore](#kvstore) in the multistore to store `Proposals`. We will also store the `Vote` (`Yes`, `No` or `Abstain`) chosen by each voter on each proposal.
### Messages
As a module developer, what you have to define are not `Transactions`, but `Messages`. Both transactions and messages exist in the Cosmos-SDK, but a transaction differs from a message in that a message is contained in a transaction. Transactions wrap around messages and add standard information like signatures and fees. As a module developer, you do not have to worry about transactions, only messages.
Let us define the messages we need in order to modify the state. Based on the requirements above, we need to define two types of messages:
- `SubmitProposalMsg`: to submit proposals
- `VoteMsg`: to vote on proposals
```go
type SubmitProposalMsg struct {
Title string // Title of the proposal
Description string // Description of the proposal
Deposit sdk.Coins // Deposit paid by submitter. Must be > MinDeposit to enter voting period
Submitter sdk.Address // Address of the submitter
}
```
```go
type VoteMsg struct {
ProposalID int64 // ID of the proposal
Option string // Option chosen by voter
Voter sdk.Address // Address of the voter
}
```

View File

@ -0,0 +1,11 @@
## Application initialization
In the root of your fork of the SDK, create an `app` and `cmd` folder. In this folder, we will create the main file for our application, `app.go` and the repository to handle REST and CLI commands for our app.
```bash
mkdir app cmd
mkdir -p cmd/simplegovcli cmd/simplegovd
touch app/app.go cmd/simplegovcli/main.go cmd/simplegovd/main.go
```
We will take care of these files later in the tutorial. The first step is to take care of our simple governance module.

View File

@ -0,0 +1,22 @@
## Makefile
The [Makefile](https://en.wikipedia.org/wiki/Makefile) compiles the Go program by defining a set of rules with targets and recipes. We'll need to add our application commands to it:
```
// Makefile
build_examples:
ifeq ($(OS),Windows_NT)
...
go build $(BUILD_FLAGS) -o build/simplegovd.exe ./examples/simpleGov/cmd/simplegovd
go build $(BUILD_FLAGS) -o build/simplegovcli.exe ./examples/simpleGov/cmd/simplegovcli
else
...
go build $(BUILD_FLAGS) -o build/simplegovd ./examples/simpleGov/cmd/simplegovd
go build $(BUILD_FLAGS) -o build/simplegovcli ./examples/simpleGov/cmd/simplegovcli
endif
...
install_examples:
...
go install $(BUILD_FLAGS) ./examples/simpleGov/cmd/simplegovd
go install $(BUILD_FLAGS) ./examples/simpleGov/cmd/simplegovcli
```

View File

@ -0,0 +1,57 @@
##### Rest server
**File: [`cmd/simplegovd/main.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/cmd/simplegovd/main.go)**
The `simplegovd` command will run the daemon server as a background process. First, let us create some `utils` functions:
```go
// cmd/simplegovd/main.go
// SimpleGovAppInit initial parameters
var SimpleGovAppInit = server.AppInit{
AppGenState: SimpleGovAppGenState,
AppGenTx: server.SimpleAppGenTx,
}
// SimpleGovAppGenState sets up the app_state and appends the simpleGov app state
func SimpleGovAppGenState(cdc *wire.Codec, appGenTxs []json.RawMessage) (appState json.RawMessage, err error) {
appState, err = server.SimpleAppGenState(cdc, appGenTxs)
if err != nil {
return
}
return
}
func newApp(logger log.Logger, db dbm.DB) abci.Application {
return app.NewSimpleGovApp(logger, db)
}
func exportAppState(logger log.Logger, db dbm.DB) (json.RawMessage, error) {
dapp := app.NewSimpleGovApp(logger, db)
return dapp.ExportAppStateJSON()
}
```
Now, let us define the command for the daemon server within the `main()` function:
```go
// cmd/simplegovd/main.go
func main() {
cdc := app.MakeCodec()
ctx := server.NewDefaultContext()
rootCmd := &cobra.Command{
Use: "simplegovd",
Short: "Simple Governance Daemon (server)",
PersistentPreRunE: server.PersistentPreRunEFn(ctx),
}
server.AddCommands(ctx, cdc, rootCmd, SimpleGovAppInit,
server.ConstructAppCreator(newApp, "simplegov"),
server.ConstructAppExporter(exportAppState, "simplegov"))
// prepare and add flags
rootDir := os.ExpandEnv("$HOME/.simplegovd")
executor := cli.PrepareBaseCmd(rootCmd, "BC", rootDir)
executor.Execute()
}
```

View File

@ -0,0 +1,55 @@
## Application structure
Now, that we have built all the pieces we need, it is time to integrate them into the application. Let us exit the `/x` director go back at the root of the SDK directory.
```bash
// At root level of directory
cd app
```
We are ready to create our simple governance application!
*Note: You can check the full file (with comments!) [here](link)*
The `app.go` file is the main file that defines your application. In it, you will declare all the modules you need, their keepers, handlers, stores, etc. Let us take a look at each section of this file to see how the application is constructed.
Secondly, we need to define the name of our application.
```go
const (
appName = "SimpleGovApp"
)
```
Then, let us define the structure of our application.
```go
// Extended ABCI application
type SimpleGovApp struct {
*bam.BaseApp
cdc *wire.Codec
// keys to access the substores
capKeyMainStore *sdk.KVStoreKey
capKeyAccountStore *sdk.KVStoreKey
capKeyStakingStore *sdk.KVStoreKey
capKeySimpleGovStore *sdk.KVStoreKey
// keepers
feeCollectionKeeper auth.FeeCollectionKeeper
coinKeeper bank.Keeper
stakeKeeper simplestake.Keeper
simpleGovKeeper simpleGov.Keeper
// Manage getting and setting accounts
accountMapper auth.AccountMapper
}
```
- Each application builds on top of the `BaseApp` template, hence the pointer.
- `cdc` is the codec used in our application.
- Then come the keys to the stores we need in our application. For our simple governance app, we need 3 stores + the main store.
- Then come the keepers and mappers.
Let us do a quick reminder so that it is clear why we need these stores and keepers. Our application is primarily based on the `simple_governance` module. However, we have established in section [Keepers for our app](module-keeper.md) that our module needs access to two other modules: the `bank` module and the `stake` module. We also need the `auth` module for basic account functionalities. Finally, we need access to the main multistore to declare the stores of each of the module we use.

View File

@ -0,0 +1,19 @@
## Cast a vote to an existing proposal
Let's cast a vote on the created proposal:
```bash
simplegovcli vote --proposal-id=1 --option="No"
```
Get the value of the option from your casted vote :
```bash
simplegovcli proposal-vote 1 <your_address>
```
You can also check all the casted votes of a proposal:
```bash
simplegovcli proposals-votes 1
```

View File

@ -0,0 +1,58 @@
# SDK By Examples - Simple Governance Application
In this tutorial, you will learn the basics of coding an application with the Cosmos-SDK. Applications built on top of the SDK are called *Application-specific blockchains*. They are decentralised applications running on their own blockchains. The application we will build in this tutorial is a simple governance application.
Before getting in the bulk of the code, we will start by some introductory content on Tendermint, Cosmos and the programming philosophy of the SDK. Let us get started!
## Table of contents:
### Introduction - Prerequsite reading
- [Intro to Tendermint and Cosmos](../../../introduction/tendermint-cosmos.md)
- [Tendermint Core and ABCI](../../../introduction/tendermint.md)
- [Intro to Cosmos-SDK](../../overview.md)
- [Starting your own project](start.md)
### Setup and design phase
- [Setup](setup.md)
- [Application design](app-design.md)
### Implementation of the application
**Important note: All the code for this application can be found [here](https://github.com/cosmos/cosmos-sdk/tree/fedekunze/module_tutorial/examples/simpleGov). Snippets will be provided throughout the tutorial, but please refer to the provided link for the full implementation details**
- [Application initialization](app-init.md)
- Simple Governance module
+ [Module initialization](module-init.md)
+ [Types](module-types.md)
+ [Keeper](module-keeper.md)
+ [Handler](module-handler.md)
+ [Wire](module-wire.md)
+ [Errors](module-errors.md)
+ Command-Line Interface and Rest API
* [Command-Line Interface](module-cli.md)
* [Rest API](module-rest.md)
- Bridging it all together
+ [Application structure](app-structure.md)
+ [Application CLI and Rest Server](app-commands.md)
* [Application CLI](app-cli.md)
* [Rest Server](app-rest.md)
+ [Makefile](app-makefile.md)
+ [Application constructor](app-constructor.md)
+ [Application codec](app-codec.md)
- Running the application
+ [Installation](run-install.md)
+ [Submit a proposal](submit-proposal.md)
+ [Cast a vote](cast-vote.md)
## Useful links
If you have any question regarding this tutorial or about development on the SDK, please reach out us through our official communication channels:
- [Cosmos-SDK Riot Channel](https://riot.im/app/#/room/#cosmos-sdk:matrix.org)
- [Telegram](https://t.me/cosmosproject)
Or open an issue on the SDK repo:
- [Cosmos-SDK repo](https://github.com/cosmos/cosmos-sdk/)

View File

@ -0,0 +1,33 @@
## Command-Line Interface (CLI)
**File: [`x/simple_governance/client/cli/simple_governance.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/x/simple_governance/client/cli/simple_governance.go)**
Go in the `cli` folder and create a `simple_governance.go` file. This is where we will define the commands for our module.
The CLI builds on top of [Cobra](https://github.com/spf13/cobra). Here is the schema to build a command on top of Cobra:
```go
// Declare flags
const(
Flag = "flag"
...
)
// Main command function. One function for each command.
func Command(codec *wire.Codec) *cobra.Command {
// Create the command to return
command := &cobra.Command{
Use: "actual command",
Short: "Short description",
Run: func(cmd *cobra.Command, args []string) error {
// Actual function to run when command is used
},
}
// Add flags to the command
command.Flags().<Type>(FlagNameConstant, <example_value>, "<Description>")
return command
}
```

View File

@ -0,0 +1,7 @@
## Errors
**File: [`x/simple_governance/errors.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/x/simple_governance/errors.go)**
The `error.go` file allows us to define custom error messages for our module. Declaring errors should be relatively similar in all modules. You can look in the `error.go` file directly for a concrete example. The code is self-explanatory.
Note that the errors of our module inherit from the `sdk.Error` interface and therefore possess the method `Result()`. This method is useful when there is an error in the `handler` and an error has to be returned in place of an actual result.

View File

@ -0,0 +1,73 @@
## Handler
**File: [`x/simple_governance/handler.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/x/simple_governance/handler.go)**
### Constructor and core handlers
Handlers implement the core logic of the state-machine. When a transaction is routed from the app to the module, it is run by the `handler` function.
In practice, one `handler` will be implemented for each message of the module. In our case, we have two message types. We will therefore need two `handler` functions. We will also need a constructor function to route the message to the correct `handler`:
```go
func NewHandler(k Keeper) sdk.Handler {
return func(ctx sdk.Context, msg sdk.Msg) sdk.Result {
switch msg := msg.(type) {
case SubmitProposalMsg:
return handleSubmitProposalMsg(ctx, k, msg)
case VoteMsg:
return handleVoteMsg(ctx, k, msg)
default:
errMsg := "Unrecognized gov Msg type: " + reflect.TypeOf(msg).Name()
return sdk.ErrUnknownRequest(errMsg).Result()
}
}
}
```
The messages are routed to the appropriate `handler` depending on their type. For our simple governance module, we only have two `handlers`, that correspond to our two message types. They have similar signatures:
```go
func handleSubmitProposalMsg(ctx sdk.Context, k Keeper, msg SubmitProposalMsg) sdk.Result
```
Let us take a look at the parameters of this function:
- The context `ctx` to access the stores.
- The keeper `k` allows the handler to read and write from the different stores, including the module's store (`SimpleGovernance` in our case) and all the stores from other modules that the keeper `k` has been granted an access to (`stake` and `bank` in our case).
- The message `msg` that holds all the information provided by the sender of the transaction.
The function returns a `Result` that is returned to the application. It contains several useful information such as the amount of `Gas` for this transaction and wether the message was succesfully processed or not. At this point, we exit the boundaries of our simple governance module and go back to root application level. The `Result` will differ from application to application. You can check the `sdk.Result` type directly [here](https://github.com/cosmos/cosmos-sdk/blob/develop/types/result.go) for more info.
### BeginBlocker and EndBlocker
In contrast to most smart-contracts platform, it is possible to perform automatic (i.e. not triggered by a transaction sent by an end-user) execution of logic in Cosmos-SDK applications.
This automatic execution of code takes place in the `BeginBlock` and `EndBlock` functions that are called at the beginning and at the end of every block. They are powerful tools, but it is important for application developers to be careful with them. For example, it is crutial that developers control the amount of computing that happens in these functions, as expensive computation could delay the block time, and never-ending loop freeze the chain altogether.
`BeginBlock` and `EndBlock` are composable functions, meaning that each module can implement its own `BeginBlock` and `EndBlock` logic. When needed, `BeginBlock` and `EndBlock` logic is implemented in the module's `handler`. Here is the standard way to proceed for `EndBlock` (`BeginBlock` follows the exact same pattern):
```go
func NewEndBlocker(k Keeper) sdk.EndBlocker {
return func(ctx sdk.Context, req abci.RequestEndBlock) (res abci.ResponseEndBlock) {
err := checkProposal(ctx, k)
if err != nil {
panic(err)
}
return
}
}
```
Do not forget that each module need to declare its `BeginBlock` and `EndBlock` constructors at application level. See the [Application - Bridging it all together](app-structure.md).
For the purpose of our simple governance application, we will use `EndBlock` to automatically tally the results of the vote. Here are the different steps that will be performed:
1. Get the oldest proposal from the `ProposalProcessingQueue`
2. Check if the `CurrentBlock` is the block at which the voting period for this proposal ends. If Yes, go to 3.. If no, exit.
3. Check if proposal is accepted or rejected. Update the proposal status.
4. Pop the proposal from the `ProposalProcessingQueue` and go back to 1.
Let us perform a quick safety analysis on this process.
- The loop will not run forever because the number of proposals in `ProposalProcessingQueue` is finite
- The computation should not be too expensive because tallying of individual proposals is not expensive and the number of proposals is expected be relatively low. That is because proposals require a `Deposit` to be accepted. `MinDeposit` should be high enough so that we don't have too many `Proposals` in the queue.
- In the eventuality that the application becomes so successful that the `ProposalProcessingQueue` ends up containing so many proposals that the blockchain starts slowing down, the module should be modified to mitigate the situation. One clever way of doing it is to cap the number of iteration per individual `EndBlock` at `MaxIteration`. This way, tallying will be spread over many blocks if the number of proposals is too important and block time should remain stable. This would require to modify the current check `if (CurrentBlock == Proposal.SubmitBlock + VotingPeriod)` to `if (CurrentBlock > Proposal.SubmitBlock + VotingPeriod) AND (Proposal.Status == ProposalStatusActive)`.

View File

@ -0,0 +1,31 @@
## Module initialization
First, let us go into the module's folder and create a folder for our module.
```bash
cd x/
mkdir simple_governance
cd simple_governance
mkdir -p client/cli client/rest
touch client/cli/simple_governance.go client/rest/simple_governance.go errors.go handler.go handler_test.go keeper_keys.go keeper_test.go keeper.go test_common.go test_types.go types.go wire.go
```
Let us start by adding the files we will need. Your module's folder should look something like that:
```
x
└─── simple_governance
├─── client
│ ├─── cli
│ │ └─── simple_governance.go
│ └─── rest
│ └─── simple_governance.go
├─── errors.go
├─── handler.go
├─── keeper_keys.go
├─── keeper.go
├─── types.go
└─── wire.go
```
Let us go into the detail of each of these files.

View File

@ -0,0 +1,96 @@
## Keeper
**File: [`x/simple_governance/keeper.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/x/simple_governance/keeper.go)**
### Short intro to keepers
`Keepers` are a module abstraction that handle reading/writing to the module store. This is a practical implementation of the **Object Capability Model** for Cosmos.
As module developers, we have to define keepers to interact with our module's store(s) not only from within our module, but also from other modules. When another module wants to access one of our module's store(s), a keeper for this store has to be passed to it at the application level. In practice, it will look like that:
```go
// in app.go
// instanciate keepers
keeperA = moduleA.newKeeper(app.moduleAStoreKey)
keeperB = moduleB.newKeeper(app.moduleBStoreKey)
// pass instance of keeperA to handler of module B
app.Router().
AddRoute("moduleA", moduleA.NewHandler(keeperA)).
AddRoute("moduleB", moduleB.NewHandler(keeperB, keeperA)) // Here module B can access one of module A's store via the keeperA instance
```
`KeeperA` grants a set of capabilities to the handler of module B. When developing a module, it is good practice to think about the sensitivity of the different capabilities that can be granted through keepers. For example, some module may need to read and write to module A's main store, while others only need to read it. If a module has multiple stores, then some keepers could grant access to all of them, while others would only grant access to specific sub-stores. It is the job of the module developer to make sure it is easy for application developers to instanciate a keeper with the right capabilities. Of course, the handler of a module will most likely get an unrestricted instance of that module's keeper.
### Store for our app
Before we delve into the keeper itself, let us see what objects we need to store in our governance sub-store, and how to index them.
- `Proposals` will be indexed by `'proposals'|<proposalID>`.
- `Votes` (`Yes`, `No`, `Abstain`) will be indexed by `'proposals'|<proposalID>|'votes'|<voterAddress>`.
Notice the quote mark on `'proposals'` and `'votes'`. They indicate that these are constant keywords. So, for example, the option casted by voter with address `0x01` on proposal `0101` will be stored at index `'proposals'|0101|'votes'|0x01`.
These keywords are used to faciliate range queries. Range queries (TODO: Link to formal spec) allow developer to query a subspace of the store, and return an iterator. They are made possible by the nice properties of the [IAVL+ tree](https://github.com/tendermint/iavl) that is used in the background. In practice, this means that it is possible to store and query a Key-Value pair in O(1), while still being able to iterate over a given subspace of Key-Value pairs. For example, we can query all the addresses that voted on a given proposal, along with their votes, by calling `rangeQuery(SimpleGovStore, <proposalID|'addresses'>)`.
### Keepers for our app
In our case, we only have one store to access, the `SimpleGov` store. We will need to set and get values inside this store via our keeper. However, these two actions do not have the same impact in terms of security. While there should no problem in granting read access to our store to other modules, write access is way more sensitive. So ideally application developers should be able to create either a governance mapper that can only get values from the store, or one that can both get and set values. To this end, we will introduce two keepers: `Keeper` and `KeeperRead`. When application developers create their application, they will be able to decide which of our module's keeper to use.
Now, let us try to think about which keeper from **external** modules our module's keepers need access to.
Each proposal requires a deposit. This means our module needs to be able to both read and write to the module that handles tokens, which is the `bank` module. We also need to be able to determine the voting power of each voter based on their stake. To this end, we need read access to the store of the `staking` module. However, we don't need write access to this store. We should therefore indicate that in our module, and the application developer should be careful to only pass a read-only keeper of the `staking` module to our module's handler.
With all that in mind, we can define the structure of our `Keeper`:
```go
type Keeper struct {
SimpleGov sdk.StoreKey // Key to our module's store
cdc *wire.Codec // Codec to encore/decode structs
ck bank.Keeper // Needed to handle deposits. This module onlyl requires read/writes to Atom balance
sm stake.Keeper // Needed to compute voting power. This module only needs read access to the staking store.
codespace sdk.CodespaceType // Reserves space for error codes
}
```
And the structure of our `KeeperRead`:
```go
type KeeperRead struct {
Keeper
}
```
`KeeperRead` will inherit all methods from `Keeper`, except those that we override. These will be the methods that perform writes to the store.
### Functions and Methods
The first function we have to create is the constructor.
```go
func NewKeeper(SimpleGov sdk.StoreKey, ck bank.Keeper, sm stake.Keeper, codespace sdk.CodespaceType) Keeper
```
This function is called from the main [`app.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/app/app.go) file to instanciate a new `Keeper`. A similar function exits for `KeeperRead`.
```go
func NewKeeperRead(SimpleGov sdk.StoreKey, ck bank.Keeper, sm stake.Keeper, codespace sdk.CodespaceType) KeeperRead
```
Depending on the needs of the application and its modules, either `Keeper`, `KeeperRead`, or both, will be instanciated at application level.
*Note: Both the `Keeper` type name and `NewKeeper()` function's name are standard names used in every module. It is no requirement to follow this standard, but doing so can facilitate the life of application developers*
Now, let us describe the methods we need for our module's `Keeper`. For the full implementation, please refer to `keeper.go`.
- `GetProposal`: Get a `Proposal` given a `proposalID`. Proposals need to be decoded from `byte` before they can be read.
- `SetProposal`: Set a `Proposal` at index `'proposals'|<proposalID>`. Proposals need to be encoded to `byte` before they can be stored.
- `NewProposalID`: A function to generate a new unique `proposalID`.
- `GetVote`: Get a vote `Option` given a `proposalID` and a `voterAddress`.
- `SetVote`: Set a vote `Option` given a `proposalID` and a `voterAddress`.
- Proposal Queue methods: These methods implement a standard proposal queue to store `Proposals` on a First-In First-Out basis. It is used to tally the votes at the end of the voting period.
The last thing that needs to be done is to override certain methods for the `KeeperRead` type. `KeeperRead` should not have write access to the stores. Therefore, we will override the methods `SetProposal()`, `SetVote()` and `NewProposalID()`, as well as `setProposalQueue()` from the Proposal Queue's methods. For `KeeperRead`, these methods will just throw an error.
*Note: If you look at the code, you'll notice that the context `ctx` is a parameter of many of the methods. The context `ctx` provides useful information on the current state such as the current block height and allows the keeper `k` to access the `KVStore`. You can check all the methods of `ctx` [here](https://github.com/cosmos/cosmos-sdk/blob/develop/types/context.go#L144-L168)*.

View File

@ -0,0 +1,32 @@
## Rest API
**File: [`x/simple_governance/client/rest/simple_governance.goo`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/x/simple_governance/client/rest/simple_governance.go)**
The Rest Server, also called [Light-Client Daemon (LCD)](https://github.com/cosmos/cosmos-sdk/tree/master/client/lcd), provides support for **HTTP queries**.
________________________________________________________
USER INTERFACE <=======> REST SERVER <=======> FULL-NODE
________________________________________________________
It allows end-users that do not want to run full-nodes themselves to interract with the chain. The LCD can be configured to perform **Light-Client verification** via the flag `--trust-node`, which can be set to `true` or `false`.
- If *light-client verification* is enabled, the Rest Server acts as a light-client and needs to be run on the end-user's machine. It allows them to interract with the chain in a trustless way without having to store the whole chain locally.
- If *light-client verification* is disabled, the Rest Server acts as a simple relayer for HTTP calls. In this setting, the Rest server needs not be run on the end-user's machine. Instead, it will probably be run by the same entity that operates the full-node the server connects to. This mode is useful if end-users trust the full-node operator and do not want to store anything locally.
Now, let us define endpoints that will be available for users to query through HTTP requests. These endpoints will be defined in a `simple_governance.go` file stored in the `rest` folder.
| Method | URL | Description |
|--------|---------------------------------|-------------------------------------------------------------|
| GET | /proposals | Range query to get all submitted proposals |
| POST | /proposals | Submit a new proposal |
| GET | /proposals/{id} | Returns a proposal given its ID |
| GET | /proposals/{id}/votes | Range query to get all the votes casted on a given proposal |
| POST | /proposals/{id}/votes | Cast a vote on a given proposal |
| GET | /proposals/{id}/votes/{address} | Returns the vote of a given address on a given proposal |
It is the job of module developers to provide sensible endpoints so that front-end developers and service providers can properly interact with it.
Additionaly, here is a [link](https://hackernoon.com/restful-api-designing-guidelines-the-best-practices-60e1d954e7c9) for REST APIs best practices.

View File

@ -0,0 +1,23 @@
## Types
**File: [`x/simple_governance/types.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/x/simple_governance/types.go)**
In this file, we define the custom types for our module. This includes the types from the [State](app-design.md#State) section and the custom message types defined in the [Messages](app-design#Messages) section.
For each new type that is not a message, it is possible to add methods that make sense in the context of the application. In our case, we will implement an `updateTally` function to easily update the tally of a given proposal as vote messages come in.
Messages are a bit different. They implement the `Message` interface defined in the SDK's `types` folder. Here are the methods you need to implement when you define a custom message type:
- `Type()`: This function returns the name of our module's route. When messages are processed by the application, they are routed using the string returned by the `Type()` method.
- `GetSignBytes()`: Returns the byte representation of the message. It is used to sign the message.
- `GetSigners()`: Returns address(es) of the signer(s).
- `ValidateBasic()`: This function is used to discard obviously invalid messages. It is called at the beginning of `runTx()` in the baseapp file. If `ValidateBasic()` does not return `nil`, the app stops running the transaction.
- `Get()`: A basic getter, returns some property of the message.
- `String()`: Returns a human-readable version of the message
For our simple governance messages, this means:
- `Type()` will return `"simpleGov"`
- For `SubmitProposalMsg`, we need to make sure that the attributes are not empty and that the deposit is both valid and positive. Note that this is only basic validation, we will therefore not check in this method that the sender has sufficient funds to pay for the deposit
- For `VoteMsg`, we check that the address and option are valid and that the proposalID is not negative.
- As for other methods, less customization is required. You can check the code to see a standard way of implementing these.

View File

@ -0,0 +1,13 @@
## Wire
**File: [`x/simple_governance/wire.go`](https://github.com/cosmos/cosmos-sdk/blob/fedekunze/module_tutorial/examples/simpleGov/x/simple_governance/wire.go)**
The `wire.go` file allows developers to register the concrete message types of their module into the codec. In our case, we have two messages to declare:
```go
func RegisterWire(cdc *wire.Codec) {
cdc.RegisterConcrete(SubmitProposalMsg{}, "simple_governance/SubmitProposalMsg", nil)
cdc.RegisterConcrete(VoteMsg{}, "simple_governance/VoteMsg", nil)
}
```
Don't forget to call this function in `app.go` (see [Application - Bridging it all together](app-structure.md)) for more).

View File

@ -0,0 +1,18 @@
## Installation
Once you have finallized your application, install it using `go get`. The following commands will install the pre-built modules and examples of the SDK as well as your `simpleGov` application:
```bash
go get github.com/<your_username>/cosmos-sdk
cd $GOPATH/src/github.com/<your_username>/cosmos-sdk
make get_vendor_deps
make install
make install_examples
```
Check that the app is correctly installed by typing:
```bash
simplegovcli -h
simplegovd -h
```

View File

@ -0,0 +1,32 @@
## Setup
### Prerequisites
- Have [go](https://golang.org/dl/) and [git](https://git-scm.com/downloads) installed
- Don't forget to set your `PATH` and `GOPATH`
### Setup work environment
Go to the [Cosmos-SDK repo](https://githum.com/cosmos/cosmos-sdk) and fork it. Then open a terminal and:
```bash
cd $GOPATH/src/github.com/your_username
git clone github.com/your_username/cosmos-sdk
cd cosmos-sdk
```
Now we'll add the origin Cosmos-SDK as upstream in case some cool feature or module gets merged:
```bash
git remote add upstream github.com/cosmos/cosmos-sdk
git fetch upstream
git rebase upstream/master
```
We will also create a branch dedicated to our module:
```bash
git checkout -b my_new_application
```
We are all set!

View File

@ -0,0 +1,10 @@
## Starting your own project
To get started, you just have to follow these simple steps:
1. Clone the [Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/tree/develop)repo
2. Code the modules needed by your application that do not already exist.
3. Create your app directory. In the app main file, import the module you need and instantiate the different stores.
4. Launch your blockchain.
Easy as pie! With the introduction over, let us delve into practice and learn how to code a SDK application with an example.

View File

@ -0,0 +1,19 @@
## Submit a proposal
Uuse the CLI to create a new proposal:
```bash
simplegovcli propose --title="Voting Period update" --description="Should we change the proposal voting period to 3 weeks?" --deposit=300Atoms
```
Get the details of your newly created proposal:
```bash
simplegovcli proposal 1
```
You can also check all the existing open proposals:
```bash
simplegovcli proposals --active=true
```

View File

@ -25,9 +25,9 @@ type VotingProcedure struct {
```go
type TallyingProcedure struct {
Threshold rational.Rational // Minimum propotion of Yes votes for proposal to pass. Initial value: 0.5
Veto rational.Rational // Minimum proportion of Veto votes to Total votes ratio for proposal to be vetoed. Initial value: 1/3
GovernancePenalty sdk.Rat // Penalty if validator does not vote
Threshold sdk.Dec // Minimum propotion of Yes votes for proposal to pass. Initial value: 0.5
Veto sdk.Dec // Minimum proportion of Veto votes to Total votes ratio for proposal to be vetoed. Initial value: 1/3
GovernancePenalty sdk.Dec // Penalty if validator does not vote
GracePeriod int64 // If validator entered validator set in this period of blocks before vote ended, governance penalty does not apply
}
```
@ -81,7 +81,7 @@ This type is used in a temp map when tallying
```go
type ValidatorGovInfo struct {
Minus sdk.Rat
Minus sdk.Dec
Vote Vote
}
```
@ -103,17 +103,17 @@ type Proposal struct {
VotingStartBlock int64 // Height of the block where MinDeposit was reached. -1 if MinDeposit is not reached
CurrentStatus ProposalStatus // Current status of the proposal
YesVotes sdk.Rat
NoVotes sdk.Rat
NoWithVetoVotes sdk.Rat
AbstainVotes sdk.Rat
YesVotes sdk.Dec
NoVotes sdk.Dec
NoWithVetoVotes sdk.Dec
AbstainVotes sdk.Dec
}
```
We also mention a method to update the tally for a given proposal:
```go
func (proposal Proposal) updateTally(vote byte, amount sdk.Rat)
func (proposal Proposal) updateTally(vote byte, amount sdk.Dec)
```
### Stores

View File

@ -7,7 +7,7 @@
The current annual inflation rate.
```golang
type Inflation sdk.Rat
type Inflation sdk.Dec
```
### InflationLastTime

View File

@ -16,4 +16,3 @@ EndBlock() ValidatorSetChanges
ClearTendermintUpdates()
return vsc
```

View File

@ -13,7 +13,7 @@ type Pool struct {
LooseTokens int64 // tokens not associated with any bonded validator
BondedTokens int64 // reserve of bonded tokens
InflationLastTime int64 // block which the last inflation was processed // TODO make time
Inflation sdk.Rat // current annual inflation rate
Inflation sdk.Dec // current annual inflation rate
DateLastCommissionReset int64 // unix timestamp for last commission accounting reset (daily)
}
@ -28,10 +28,10 @@ overall functioning of the stake module.
```golang
type Params struct {
InflationRateChange sdk.Rat // maximum annual change in inflation rate
InflationMax sdk.Rat // maximum inflation rate
InflationMin sdk.Rat // minimum inflation rate
GoalBonded sdk.Rat // Goal of percent bonded atoms
InflationRateChange sdk.Dec // maximum annual change in inflation rate
InflationMax sdk.Dec // maximum inflation rate
InflationMin sdk.Dec // minimum inflation rate
GoalBonded sdk.Dec // Goal of percent bonded atoms
MaxValidators uint16 // maximum number of validators
BondDenom string // bondable coin denomination
@ -74,9 +74,9 @@ type Validator struct {
Revoked bool // has the validator been revoked?
Status sdk.BondStatus // validator status (bonded/unbonding/unbonded)
Tokens sdk.Rat // delegated tokens (incl. self-delegation)
DelegatorShares sdk.Rat // total shares issued to a validator's delegators
SlashRatio sdk.Rat // increases each time the validator is slashed
Tokens sdk.Dec // delegated tokens (incl. self-delegation)
DelegatorShares sdk.Dec // total shares issued to a validator's delegators
SlashRatio sdk.Dec // increases each time the validator is slashed
Description Description // description terms for the validator
@ -88,10 +88,10 @@ type Validator struct {
}
type CommissionInfo struct {
Rate sdk.Rat // the commission rate of fees charged to any delegators
Max sdk.Rat // maximum commission rate which this validator can ever charge
ChangeRate sdk.Rat // maximum daily increase of the validator commission
ChangeToday sdk.Rat // commission rate change today, reset each day (UTC time)
Rate sdk.Dec // the commission rate of fees charged to any delegators
Max sdk.Dec // maximum commission rate which this validator can ever charge
ChangeRate sdk.Dec // maximum daily increase of the validator commission
ChangeToday sdk.Dec // commission rate change today, reset each day (UTC time)
LastChange int64 // unix timestamp of last commission change
}
@ -117,7 +117,7 @@ the transaction is the owner of the bond.
```golang
type Delegation struct {
Shares sdk.Rat // delegation shares recieved
Shares sdk.Dec // delegation shares recieved
Height int64 // last height bond updated
}
```
@ -178,8 +178,8 @@ the original redelegation has been completed.
```golang
type Redelegation struct {
SourceShares sdk.Rat // amount of source shares redelegating
DestinationShares sdk.Rat // amount of destination shares created at redelegation
SourceShares sdk.Dec // amount of source shares redelegating
DestinationShares sdk.Dec // amount of destination shares created at redelegation
CompleteTime int64 // unix time to complete redelegation
}
```

View File

@ -18,7 +18,7 @@ Other notes:
- `sender` denotes the address of the sender of the transaction
- `getXxx`, `setXxx`, and `removeXxx` functions are used to retrieve and
modify objects from the store
- `sdk.Rat` refers to a rational numeric type specified by the SDK.
- `sdk.Dec` refers to a decimal type specified by the SDK.
### TxCreateValidator
@ -34,9 +34,9 @@ type TxCreateValidator struct {
SelfDelegation coin.Coin
Description Description
Commission sdk.Rat
CommissionMax sdk.Rat
CommissionMaxChange sdk.Rat
Commission sdk.Dec
CommissionMax sdk.Dec
CommissionMaxChange sdk.Dec
}
@ -65,7 +65,7 @@ If either the `Description` (excluding `DateBonded` which is constant),
```golang
type TxEditCandidacy struct {
GovernancePubKey crypto.PubKey
Commission sdk.Rat
Commission sdk.Dec
Description Description
}
@ -199,7 +199,7 @@ type TxRedelegate struct {
DelegatorAddr Address
ValidatorFrom Validator
ValidatorTo Validator
Shares sdk.Rat
Shares sdk.Dec
CompletedTime int64
}

View File

@ -14,9 +14,9 @@ The [Cosmos Hub](/introduction/cosmos-hub.md) is based on [Tendermint](/introduc
The Cosmos Hub is a public Proof-Of-Stake (PoS) blockchain, meaning that validator's weight is determined by the amount of staking tokens (Atoms) bonded as collateral. These Atoms can be staked directly by the validator or delegated to them by Atom holders.
Any user in the system can declare its intention to become a validator by sending a "declare-candidacy" transaction. From there, they become validator candidates.
Any user in the system can declare its intention to become a validator by sending a `create-validator` transaction. From there, they become validators.
The weight (i.e. total stake) of a candidate determines wether or not it is a validator, and also how frequently this node will have to propose a block and how much revenue it will obtain. Initially, only the top 100 validator candidates with the most weight will be validators. If validators double sign, are frequently offline or do not participate in governance, their staked Atoms (including Atoms of users that delegated to them) can be destroyed, or 'slashed'.
The weight (i.e. total stake) of a validator determines wether or not it is an active validator, and also how frequently this node will have to propose a block and how much revenue it will obtain. Initially, only the top 100 validators with the most weight will be active validators. If validators double sign, are frequently offline or do not participate in governance, their staked Atoms (including Atoms of users that delegated to them) can be destroyed, or 'slashed'.
### What is a full-node?
@ -28,7 +28,7 @@ Of course, it is possible and encouraged for any user to run full-nodes even if
Delegators are Atom holders who cannot, or do not want to run validator operations themselves. Through [Cosmos Voyager](/getting-started/voyager.md), a user can delegate Atoms to a validator and obtain a part of its revenue in exchange (for more detail on how revenue is distributed, see **What is the incentive to stake?** and **What is a validator's commission?** sections below).
Because they share revenue with their validators, delegators also share responsibility. Should a validator misbehave, each of its delegators will be partially slashed in proportion to their stake. This is why delegators should perform due diligence on validator candidates before delegating, as well as spreading their stake over multiple validators.
Because they share revenue with their validators, delegators also share responsibility. Should a validator misbehave, each of its delegators will be partially slashed in proportion to their stake. This is why delegators should perform due diligence on validators before delegating, as well as spreading their stake over multiple validators.
Delegators play a critical role in the system, as they are responsible for choosing validators. Being a delegator is not a passive role: Delegators should actively monitor the actions of their validators and participate in governance.
@ -36,21 +36,22 @@ Delegators play a critical role in the system, as they are responsible for choos
### How to become a validator?
Any participant in the network can signal that they want to become a validator by sending a "declare-candidacy" transaction, where they must fill out the following parameters:
Any participant in the network can signal that they want to become a validator by sending a `create-validator` transaction, where they must fill out the following parameters:
* Validator's PubKey: The validator must signal an account with which it will perform its validator duties. The private key associated with PubKey is used to sign _prevotes_ and _precommits_. This way, validators can have different accounts for validating and holding liquid funds.
* Validator's name
* Validator's PubKey: The private key associated with PubKey is used to sign _prevotes_ and _precommits_. This way, validators can have different accounts for validating and holding liquid funds.
* Validator's Address: Application level address. This is the address used to identify your validator publicly. The private key associated with this address is used to bond, unbond, claim rewards, and participate in governance (in MVP only).
* Validator's name (moniker)
* Validator's website (Optional)
* Validator's description (Optional)
* Initial commission rate: The commission rate on block provisions, block rewards and fees charged to delegators
* Maximum commission: The maximum commission rate which this validator candidate can charge
* Commission change rate: The maximum daily increase of the validator candidate commission
* Minimum self-bond amount: Minimum amount of Atoms the validator candidate need to have bonded at all time. If the validator's self-bonded stake falls below this limit, its entire staking pool will unbond.
* Initial self-bond amount: Initial amount Atoms the validator candidate wants to self-bond
* Maximum commission: The maximum commission rate which this validator can charge
* Commission change rate: The maximum daily increase of the validator commission
* Minimum self-bond amount: Minimum amount of Atoms the validator need to have bonded at all time. If the validator's self-bonded stake falls below this limit, its entire staking pool will unbond.
* Initial self-bond amount: Initial amount of Atoms the validator wants to self-bond
Once a PubKey has declared candidacy, Atom holders can delegate atoms to it, effectively adding stake to this pool. The total stake of an address is the combination of Atoms bonded by delegators and Atoms self-bonded by the entity which designated itself.
Once a validator is created, Atom holders can delegate atoms to it, effectively adding stake to this pool. The total stake of an address is the combination of Atoms bonded by delegators and Atoms self-bonded by the entity which designated itself.
Out of all the candidates that signaled themselves, the 100 with the most stake are the ones who are designated as validators. If a validator's total stake falls below the top 100 then that validator loses its validator privileges. Over time, the maximum number of validators will increase, according to a predefined schedule:
Out of all validators that signaled themselves, the 100 with the most stake are the ones who are designated as validators. They become **bonded validators** If a validator's total stake falls below the top 100 then that validator loses its validator privileges, it enters **unbonding mode** and, eventually, becomes **unbonded** . Over time, the maximum number of validators will increase, according to a predefined schedule:
* **Year 0:** 100
* **Year 1:** 113
@ -64,19 +65,48 @@ Out of all the candidates that signaled themselves, the 100 with the most stake
* **Year 9:** 300
* **Year 10:** 300
## Testnet
### How can I join the testnet?
The Testnet is a great environment to test your validator setup before launch.
We view testnet participation as a great way to signal to the community that you are ready and able to operate a validator. You can find all relevant information about the [testnet and more here](/getting-started/full-node.md).
We view testnet participation as a great way to signal to the community that you are ready and able to operate a validator. You can find all relevant information about the testnet [here](https://github.com/cosmos/cosmos-sdk/tree/develop/cmd/gaia/testnets) and [here](https://github.com/cosmos/testnets).
### What are the different types of keys?
In short, there are two types of keys:
- **Tendermint Key**: This is a unique key used to sign block hashes. It is associated with a public key `cosmosvalpub`.
+ Generated when the node is created with gaiad init.
+ Get this value with gaiad tendermint show_validator
+M e.g. cosmosvalpub1zcjduc3qcyj09qc03elte23zwshdx92jm6ce88fgc90rtqhjx8v0608qh5ssp0w94c
- **Application keys**: These keys are created from the application and used to sign transactions. As a validator, you will probably use one key to sign staking-related transactions, and another key to sign governance-related transactions. Application keys are associated with a public key `cosmosaccpub` and an address `cosmosaccaddr`. Both are derived from account keys generated by `gaiacli keys add`.
### What are the different states a validator can be in?
After a validator is created with a `create-validator` transaction, it can be in three states:
- `bonded`: Validator is in the active set and participates in consensus. Validator is earning rewards and can be slashed for misbehaviour.
- `unbonding`: Validator is not in the active set and does not participate in consensus. Validator is not earning rewards, but can still be slashed for misbehaviour. This is a transition state from `bonded` to `unbonded`. If validator does not send a `rebond` transaction while in `unbonding` mode, it will take three weeks for the state transition to complete.
- `unbonded`: Validator is not in the active set, and therefore not signing blocs. Validator cannot be slashed, and does not earn any reward. It is still possible to delegate Atoms to this validator. Un-delegating from an `unbonded` validator is immediate.
Delegators have the same state as their validator.
*Note that delegation are not necessarily bonded. Atoms can be delegated and bonded, delegated and unbonding, delegated and unbonded, or liquid*
### What is 'self-bond'? How can I increase my 'self-bond'?
### Is there a faucet?
If you want to obtain coins for the testnet, you can do so by using [this faucet](https://faucetcosmos.network)
If you want to obtain coins for the testnet, you can do so by using [this faucet](https://gaia.faucetcosmos.network/)
### Is there a minimum amount of Atoms that must be staked to be a validator?
### Is there a minimum amount of Atoms that must be staked to be an active (=bonded) validator?
There is no minimum. The top 100 validator candidates with the highest total stake (where total stake = self-bonded stake + delegators stake) are the validators.
There is no minimum. The top 100 validators with the highest total stake (where total stake = self-bonded stake + delegators stake) are the active validators.
### How will delegators choose their validators?
@ -87,7 +117,7 @@ Delegators are free to choose validators according to their own subjective crite
* **Commission rate:** Commission applied on revenue by validators before it is distributed to their delegators
* **Track record:** Delegators will likely look at the track record of the validators they plan to delegate to. This includes seniority, past votes on proposals, historical average uptime and how often the node was compromised.
Apart from these criteria that will be displayed in Cosmos Voyager, there will be a possibility for validators to signal a website address to complete their resume. Validators will need to build reputation one way or another to attract delegators. For example, it would be a good practice for validators to have their setup audited by third parties. Note though, that the Tendermint team will not approve or conduct any audit itself.
Apart from these criteria that will be displayed in Cosmos Voyager, there will be a possibility for validators to signal a website address to complete their resume. Validators will need to build reputation one way or another to attract delegators. For example, it would be a good practice for validators to have their setup audited by third parties. Note though, that the Tendermint team will not approve or conduct any audit itself. For more on due diligence, see [this blog post](https://medium.com/@interchain_io/3d0faf10ce6f)
## Responsibilites
@ -199,9 +229,9 @@ If a validator misbehaves, its bonded stake along with its delegators' stake and
* **Double signing:** If someone reports on chain A that a validator signed two blocks at the same height on chain A and chain B, this validator will get slashed on chain A
* **Unavailability:** If a validator's signature has not been included in the last X blocks, the validator will get slashed by a marginal amount proportional to X. If X is above a certain limit Y, then the validator will get unbonded
* **Non-voting:** If a validator did not vote on a proposal and once the fault is reported by a someone, its stake will receive a minor slash.
* **Non-voting:** If a validator did not vote on a proposal, its stake will receive a minor slash.
Note that even if a validator does not intentionally misbehave, it can still be slashed if its node crashes, looses connectivity, gets DDOSed, or if its private key is compromised. A complete document on the economics of the network will be published soon.
Note that even if a validator does not intentionally misbehave, it can still be slashed if its node crashes, looses connectivity, gets DDOSed, or if its private key is compromised.
### Do validators need to self-bond Atoms?
@ -251,7 +281,6 @@ Validators should expect to run an HSM that supports ed25519 keys. Here are pote
* Ledger Nano S
* Ledger BOLOS SGX enclave
* Thales nShield support
* Tendermint SGX enclave
The Tendermint team does not recommend one solution above the other. The community is encouraged to bolster the effort to improve HSMs and the security of key management.
@ -276,3 +305,5 @@ Validator nodes should only connect to full-nodes they trust because they operat
Sentry nodes can be quickly spun up or change their IP addresses. Because the links to the sentry nodes are in private IP space, an internet based attacked cannot disturb them directly. This will ensure validator block proposals and votes always make it to the rest of the network.
It is expected that good operating procedures on that part of validators will completely mitigate these threats.
For more on sentry node architecture, see [this](https://forum.cosmos.network/t/sentry-node-architecture-overview/454).

View File

@ -1,5 +1,9 @@
# Validator Setup
::: warning Current Testnet
The current testnet is `gaia-7005`.
:::
Before setting up your validator node, make sure you've already gone through the [Full Node Setup](/getting-started/full-node.md) guide.
## Running a Validator Node
@ -15,7 +19,7 @@ If you want to become a validator for the Hub's `mainnet`, you should [research
Your `cosmosvalpub` can be used to create a new validator by staking tokens. You can find your validator pubkey by running:
```bash
gaiad tendermint show_validator
gaiad tendermint show-validator
```
Next, craft your `gaiacli stake create-validator` command:
@ -27,10 +31,10 @@ Don't use more `steak` thank you have! You can always get more by using the [Fau
```bash
gaiacli stake create-validator \
--amount=5steak \
--pubkey=$(gaiad tendermint show_validator) \
--pubkey=$(gaiad tendermint show-validator) \
--address-validator=<account_cosmosaccaddr>
--moniker="choose a moniker" \
--chain-id=gaia-6002 \
--chain-id=gaia-7005 \
--name=<key_name>
```
@ -47,17 +51,18 @@ gaiacli stake edit-validator
--website="https://cosmos.network" \
--identity=6A0D65E29A4CBC8E
--details="To infinity and beyond!"
--chain-id=gaia-6002 \
--chain-id=gaia-7005 \
--name=<key_name>
```
### View Validator Description
View the validator's information with this command:
```bash
gaiacli stake validator \
--address-validator=<account_cosmosaccaddr> \
--chain-id=gaia-6002
--chain-id=gaia-7005
```
### Confirm Your Validator is Running
@ -65,7 +70,7 @@ gaiacli stake validator \
Your validator is active if the following command returns anything:
```bash
gaiacli advanced tendermint validator-set | grep "$(gaiad tendermint show_validator)"
gaiacli advanced tendermint validator-set | grep "$(gaiad tendermint show-validator)"
```
You should also be able to see your validator on the [Explorer](https://explorecosmos.network/validators). You are looking for the `bech32` encoded `address` in the `~/.gaiad/config/priv_validator.json` file.
@ -79,7 +84,7 @@ To be in the validator set, you need to have more total voting power than the 10
### Problem #1: My validator has `voting_power: 0`
Your validator has become auto-unbonded. In `gaia-6002`, we unbond validators if they do not vote on `50` of the last `100` blocks. Since blocks are proposed every ~2 seconds, a validator unresponsive for ~100 seconds will become unbonded. This usually happens when your `gaiad` process crashes.
Your validator has become auto-unbonded. In `gaia-7005`, we unbond validators if they do not vote on `50` of the last `100` blocks. Since blocks are proposed every ~2 seconds, a validator unresponsive for ~100 seconds will become unbonded. This usually happens when your `gaiad` process crashes.
Here's how you can return the voting power back to your validator. First, if `gaiad` is not running, start it up again:
@ -90,7 +95,7 @@ gaiad start
Wait for your full node to catch up to the latest block. Next, run the following command. Note that `<cosmosaccaddr>` is the address of your validator account, and `<name>` is the name of the validator account. You can find this info by running `gaiacli keys list`.
```bash
gaiacli stake unrevoke <cosmosaccaddr> --chain-id=gaia-6002 --name=<name>
gaiacli stake unrevoke <cosmosaccaddr> --chain-id=gaia-7005 --name=<name>
```
::: danger Warning

View File

@ -104,9 +104,9 @@ You should now see alice, bob and charlie's account all show up.
```
NAME: ADDRESS: PUBKEY:
alice 90B0B9BE0914ECEE0B6DB74E67B07A00056B9BBD 1624DE62201D47E63694448665F5D0217EA8458177728C91C373047A42BD3C0FB78BD0BFA7
bob 29D721F054537C91F618A0FDBF770DA51EF8C48D 1624DE6220F54B2A2CA9EB4EE30DE23A73D15902E087C09CC5616456DDDD3814769E2E0A16
charlie 2E8E13EEB8E3F0411ACCBC9BE0384732C24FBD5E 1624DE6220F8C9FB8B07855FD94126F88A155BD6EB973509AE5595EFDE1AF05B4964836A53
alice cosmosaccaddr1khygs0qh7gz3p4m39u00mjhvgvc2dcpxhsuh5f cosmosaccpub1addwnpepq0w037u5g7y7lvdvsred2dehg90j84k0weyss5ynysf0nnnax74agrsxns6
bob cosmosaccaddr18se8tz6kwwfga6k2yjsu7n64e9z52nen29rhzz cosmosaccpub1addwnpepqwe97n8lryxrzvamrvjfj24jys3uzf8wndfvqa2l7mh5nsv4jrvdznvyeg6
charlie cosmosaccaddr13wq5mklhn03ljpd4dkph5rflk5a3ssma2ag07q cosmosaccpub1addwnpepqdmtxv35rrmv2dvcr3yhfyxj7dzrd4z4rnhmclksq4g55a4wpl54clvx33l
```
@ -115,15 +115,15 @@ charlie 2E8E13EEB8E3F0411ACCBC9BE0384732C24FBD5E 1624DE6220F8C9FB8B07855FD94126F
Lets send bob and charlie some tokens. First, lets query alice's account so we can see what kind of tokens she has:
```
basecli account 90B0B9BE0914ECEE0B6DB74E67B07A00056B9BBD
basecli account cosmosaccaddr1khygs0qh7gz3p4m39u00mjhvgvc2dcpxhsuh5f
```
Where `90B0B9BE0914ECEE0B6DB74E67B07A00056B9BBD` is alice's address we got from running `basecli keys list`. You should see a large amount of "mycoin" there. If you search for bob's or charlie's address, the command will fail, because they haven't been added into the blockchain database yet since they have no coins. We need to send them some!
Where `cosmosaccaddr1khygs0qh7gz3p4m39u00mjhvgvc2dcpxhsuh5f` is alice's address we got from running `basecli keys list`. You should see a large amount of "mycoin" there. If you search for bob's or charlie's address, the command will fail, because they haven't been added into the blockchain database yet since they have no coins. We need to send them some!
The following command will send coins from alice, to bob:
```
basecli send --from=alice --amount=10000mycoin --to=29D721F054537C91F618A0FDBF770DA51EF8C48D
basecli send --from=alice --amount=10000mycoin --to=cosmosaccaddr18se8tz6kwwfga6k2yjsu7n64e9z52nen29rhzz
--sequence=0 --chain-id=test-chain-AE4XQo
```
@ -136,13 +136,13 @@ Flag Descriptions:
Now if we check bobs account, it should have `10000 mycoin`. You can do so by running :
```
basecli account 29D721F054537C91F618A0FDBF770DA51EF8C48D
basecli account cosmosaccaddr18se8tz6kwwfga6k2yjsu7n64e9z52nen29rhzz
```
Now lets send some from bob to charlie. Make sure you send less than bob has, otherwise the transaction will fail:
```
basecli send --from=bob --amount=5000mycoin --to=2E8E13EEB8E3F0411ACCBC9BE0384732C24FBD5E
basecli send --from=bob --amount=5000mycoin --to=cosmosaccaddr13wq5mklhn03ljpd4dkph5rflk5a3ssma2ag07q
--sequence=0 --chain-id=test-chain-AE4XQo
```
@ -151,7 +151,7 @@ Note how we use the ``--from`` flag to select a different account to send from.
Lets now try to send from bob back to alice:
```
basecli send --from=bob --amount=3000mycoin --to=90B0B9BE0914ECEE0B6DB74E67B07A00056B9BBD
basecli send --from=bob --amount=3000mycoin --to=cosmosaccaddr1khygs0qh7gz3p4m39u00mjhvgvc2dcpxhsuh5f
--sequence=1 --chain-id=test-chain-AE4XQo
```
@ -179,7 +179,7 @@ starting this tutorial again or trying something new), the following
commands are run:
```
basecoind unsafe_reset_all
basecoind unsafe-reset-all
rm -rf ~/.basecoind
rm -rf ~/.basecli
```
@ -279,7 +279,7 @@ type TxInput struct {
Address []byte `json:"address"` // Hash of the PubKey
Coins Coins `json:"coins"` //
Sequence int `json:"sequence"` // Must be 1 greater than the last committed TxInput
Signature crypto.Signature `json:"signature"` // Depends on the PubKey type and the whole Tx
Signature []byte `json:"signature"` // Depends on the PubKey type and the whole Tx
PubKey crypto.PubKey `json:"pub_key"` // Is present iff Sequence == 0
}

View File

@ -62,8 +62,8 @@ func NewBasecoinApp(logger log.Logger, db dbm.DB, baseAppOptions ...func(*bam.Ba
// define and attach the mappers and keepers
app.accountMapper = auth.NewAccountMapper(
cdc,
app.keyAccount, // target store
func () auth.Account {
app.keyAccount, // target store
func() auth.Account {
return &types.AppAccount{}
},
)

View File

@ -10,7 +10,7 @@ import (
// Validator implements sdk.Validator
type Validator struct {
Address sdk.AccAddress
Power sdk.Rat
Power sdk.Dec
}
// Implements sdk.Validator
@ -29,18 +29,18 @@ func (v Validator) GetPubKey() crypto.PubKey {
}
// Implements sdk.Validator
func (v Validator) GetTokens() sdk.Rat {
return sdk.ZeroRat()
func (v Validator) GetTokens() sdk.Dec {
return sdk.ZeroDec()
}
// Implements sdk.Validator
func (v Validator) GetPower() sdk.Rat {
func (v Validator) GetPower() sdk.Dec {
return v.Power
}
// Implements sdk.Validator
func (v Validator) GetDelegatorShares() sdk.Rat {
return sdk.ZeroRat()
func (v Validator) GetDelegatorShares() sdk.Dec {
return sdk.ZeroDec()
}
// Implements sdk.Validator
@ -93,8 +93,8 @@ func (vs *ValidatorSet) ValidatorByPubKey(ctx sdk.Context, pubkey crypto.PubKey)
}
// TotalPower implements sdk.ValidatorSet
func (vs *ValidatorSet) TotalPower(ctx sdk.Context) sdk.Rat {
res := sdk.ZeroRat()
func (vs *ValidatorSet) TotalPower(ctx sdk.Context) sdk.Dec {
res := sdk.ZeroDec()
for _, val := range vs.Validators {
res = res.Add(val.Power)
}
@ -122,7 +122,7 @@ func (vs *ValidatorSet) RemoveValidator(addr sdk.AccAddress) {
}
// Implements sdk.ValidatorSet
func (vs *ValidatorSet) Slash(ctx sdk.Context, pubkey crypto.PubKey, height int64, power int64, amt sdk.Rat) {
func (vs *ValidatorSet) Slash(ctx sdk.Context, pubkey crypto.PubKey, height int64, power int64, amt sdk.Dec) {
panic("not implemented")
}

View File

@ -32,8 +32,8 @@ func TestValidatorSet(t *testing.T) {
addr2 := []byte("addr2")
base := &mock.ValidatorSet{[]mock.Validator{
{addr1, sdk.NewRat(1)},
{addr2, sdk.NewRat(2)},
{addr1, sdk.NewDec(1)},
{addr2, sdk.NewDec(2)},
}}
valset := NewValidatorSet(wire.NewCodec(), ctx.KVStore(key).Prefix([]byte("assoc")), base, 1, 5)

View File

@ -38,7 +38,7 @@ func NewHandler(keeper Keeper) sdk.Handler {
In the previous example, the keeper has an `oracle.Keeper`. `oracle.Keeper`s are generated by `NewKeeper`.
```go
func NewKeeper(key sdk.StoreKey, cdc *wire.Codec, valset sdk.ValidatorSet, supermaj sdk.Rat, timeout int64) Keeper {
func NewKeeper(key sdk.StoreKey, cdc *wire.Codec, valset sdk.ValidatorSet, supermaj sdk.Dec, timeout int64) Keeper {
return Keeper {
cdc: cdc,
key: key,

View File

@ -23,7 +23,7 @@ func (keeper Keeper) update(ctx sdk.Context, val sdk.Validator, valset sdk.Valid
// and recalculate voted power
hash := ctx.BlockHeader().ValidatorsHash
if !bytes.Equal(hash, info.Hash) {
info.Power = sdk.ZeroRat()
info.Power = sdk.ZeroDec()
info.Hash = hash
prefix := GetSignPrefix(p, keeper.cdc)
store := ctx.KVStore(keeper.key)

View File

@ -13,12 +13,12 @@ type Keeper struct {
valset sdk.ValidatorSet
supermaj sdk.Rat
supermaj sdk.Dec
timeout int64
}
// NewKeeper constructs a new keeper
func NewKeeper(key sdk.StoreKey, cdc *wire.Codec, valset sdk.ValidatorSet, supermaj sdk.Rat, timeout int64) Keeper {
func NewKeeper(key sdk.StoreKey, cdc *wire.Codec, valset sdk.ValidatorSet, supermaj sdk.Dec, timeout int64) Keeper {
if timeout < 0 {
panic("Timeout should not be negative")
}
@ -46,7 +46,7 @@ const (
// Info for each payload
type Info struct {
Power sdk.Rat
Power sdk.Dec
Hash []byte
LastSigned int64
Status InfoStatus
@ -55,7 +55,7 @@ type Info struct {
// EmptyInfo construct an empty Info
func EmptyInfo(ctx sdk.Context) Info {
return Info{
Power: sdk.ZeroRat(),
Power: sdk.ZeroDec(),
Hash: ctx.BlockHeader().ValidatorsHash,
LastSigned: ctx.BlockHeight(),
Status: Pending,

View File

@ -107,9 +107,9 @@ func TestOracle(t *testing.T) {
addr3 := []byte("addr3")
addr4 := []byte("addr4")
valset := &mock.ValidatorSet{[]mock.Validator{
{addr1, sdk.NewRat(7)},
{addr2, sdk.NewRat(7)},
{addr3, sdk.NewRat(1)},
{addr1, sdk.NewDec(7)},
{addr2, sdk.NewDec(7)},
{addr3, sdk.NewDec(1)},
}}
key := sdk.NewKVStoreKey("testkey")
@ -119,7 +119,7 @@ func TestOracle(t *testing.T) {
require.Nil(t, err)
ctx = ctx.WithBlockHeader(abci.Header{ValidatorsHash: bz})
ork := NewKeeper(key, cdc, valset, sdk.NewRat(2, 3), 100)
ork := NewKeeper(key, cdc, valset, sdk.NewDecWithPrec(667, 3), 100) // 66.7%
h := seqHandler(ork, key, sdk.CodespaceRoot)
// Nonmock.Validator signed, transaction failed
@ -171,7 +171,7 @@ func TestOracle(t *testing.T) {
require.Equal(t, 1, getSequence(ctx, key))
// Should handle mock.Validator set change
valset.AddValidator(mock.Validator{addr4, sdk.NewRat(12)})
valset.AddValidator(mock.Validator{addr4, sdk.NewDec(12)})
bz, err = json.Marshal(valset)
require.Nil(t, err)
ctx = ctx.WithBlockHeader(abci.Header{ValidatorsHash: bz})

Binary file not shown.

View File

@ -1,69 +0,0 @@
Terraform & Ansible
===================
WARNING: The Digital Ocean scripts are obsolete. They are here because they might still be useful for developers.
Automated deployments are done using `Terraform <https://www.terraform.io/>`__ to create servers on Digital Ocean then
`Ansible <http://www.ansible.com/>`__ to create and manage testnets on those servers.
Prerequisites
-------------
- Install `Terraform <https://www.terraform.io/downloads.html>`__ and `Ansible <http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html>`__ on a Linux machine.
- Create a `DigitalOcean API token <https://cloud.digitalocean.com/settings/api/tokens>`__ with read and write capability.
- Install the python dopy package (``pip install dopy``) (This is necessary for the digitalocean.py script for ansible.)
- Create SSH keys
::
export DO_API_TOKEN="abcdef01234567890abcdef01234567890"
export TESTNET_NAME="remotenet"
export SSH_PRIVATE_FILE="$HOME/.ssh/id_rsa"
export SSH_PUBLIC_FILE="$HOME/.ssh/id_rsa.pub"
These will be used by both ``terraform`` and ``ansible``.
Create a remote network
-----------------------
::
make remotenet-start
Optionally, you can set the number of servers you want to launch and the name of the testnet (which defaults to remotenet):
::
TESTNET_NAME="mytestnet" SERVERS=7 make remotenet-start
Quickly see the /status endpoint
--------------------------------
::
make remotenet-status
Delete servers
--------------
::
make remotenet-stop
Logging
-------
You can ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on `this page <https://app.logz.io/#/dashboard/data-sources/Filebeat>`__, then:
::
yum install systemd-devel || echo "This will only work on RHEL-based systems."
apt-get install libsystemd-dev || echo "This will only work on Debian-based systems."
go get github.com/mheese/journalbeat
ansible-playbook -i inventory/digital_ocean.py -l remotenet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345

65
networks/README.md Normal file
View File

@ -0,0 +1,65 @@
# Terraform & Ansible
Automated deployments are done using [Terraform](https://www.terraform.io/) to create servers on AWS then
[Ansible](http://www.ansible.com/) to create and manage testnets on those servers.
## Prerequisites
- Install [Terraform](https://www.terraform.io/downloads.html) and [Ansible](http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) on a Linux machine.
- Create an [AWS API token](https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html) with EC2 create capability.
- Create SSH keys
```
export AWS_ACCESS_KEY_ID="2345234jk2lh4234"
export AWS_SECRET_ACCESS_KEY="234jhkg234h52kh4g5khg34"
export TESTNET_NAME="remotenet"
export CLUSTER_NAME= "remotenetvalidators"
export SSH_PRIVATE_FILE="$HOME/.ssh/id_rsa"
export SSH_PUBLIC_FILE="$HOME/.ssh/id_rsa.pub"
```
These will be used by both `terraform` and `ansible`.
## Create a remote network
```
SERVERS=1 REGION_LIMIT=1 make validators-start
```
The testnet name is what's going to be used in --chain-id, while the cluster name is the administrative tag in AWS for the servers. The code will create SERVERS amount of servers in each availability zone up to the number of REGION_LIMITs, starting at us-east-2. (us-east-1 is excluded.) The below BaSH script does the same, but sometimes it's more comfortable for input.
```
./new-testnet.sh "$TESTNET_NAME" "$CLUSTER_NAME" 1 1
```
## Quickly see the /status endpoint
```
make validators-status
```
## Delete servers
```
make validators-stop
```
## Logging
You can ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on [this page](https://app.logz.io/#/dashboard/data-sources/Filebeat), then:
```
yum install systemd-devel || echo "This will only work on RHEL-based systems."
apt-get install libsystemd-dev || echo "This will only work on Debian-based systems."
go get github.com/mheese/journalbeat
ansible-playbook -i inventory/digital_ocean.py -l remotenet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345
```
## Monitoring
You can install the DataDog agent with:
```
make datadog-install
```

View File

@ -1,78 +0,0 @@
Terraform & Ansible
===================
Automated deployments are done using `Terraform <https://www.terraform.io/>`__ to create servers on AWS then
`Ansible <http://www.ansible.com/>`__ to create and manage testnets on those servers.
Prerequisites
-------------
- Install `Terraform <https://www.terraform.io/downloads.html>`__ and `Ansible <http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html>`__ on a Linux machine.
- Create an `AWS API token <https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html>`__ with EC2 create capability.
- Create SSH keys
::
export AWS_ACCESS_KEY_ID="2345234jk2lh4234"
export AWS_SECRET_ACCESS_KEY="234jhkg234h52kh4g5khg34"
export TESTNET_NAME="remotenet"
export CLUSTER_NAME= "remotenetvalidators"
export SSH_PRIVATE_FILE="$HOME/.ssh/id_rsa"
export SSH_PUBLIC_FILE="$HOME/.ssh/id_rsa.pub"
These will be used by both ``terraform`` and ``ansible``.
Create a remote network
-----------------------
::
SERVERS=1 REGION_LIMIT=1 make validators-start
The testnet name is what's going to be used in --chain-id, while the cluster name is the administrative tag in AWS for the servers. The code will create SERVERS amount of servers in each availability zone up to the number of REGION_LIMITs, starting at us-east-2. (us-east-1 is excluded.) The below BaSH script does the same, but sometimes it's more comfortable for input.
::
./new-testnet.sh "$TESTNET_NAME" "$CLUSTER_NAME" 1 1
Quickly see the /status endpoint
--------------------------------
::
make validators-status
Delete servers
--------------
::
make validators-stop
Logging
-------
You can ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on `this page <https://app.logz.io/#/dashboard/data-sources/Filebeat>`__, then:
::
yum install systemd-devel || echo "This will only work on RHEL-based systems."
apt-get install libsystemd-dev || echo "This will only work on Debian-based systems."
go get github.com/mheese/journalbeat
ansible-playbook -i inventory/digital_ocean.py -l remotenet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345
Monitoring
----------
You can install DataDog agent using
::
make datadog-install

View File

@ -31,9 +31,9 @@ To start a 4 node testnet run:
make localnet-start
```
The nodes bind their RPC servers to ports 46657, 46660, 46662, and 46664 on the host.
The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the host.
This file creates a 4-node network using the gaiadnode image.
The nodes of the network expose their P2P and RPC endpoints to the host machine on ports 46656-46657, 46659-46660, 46661-46662, and 46663-46664 respectively.
The nodes of the network expose their P2P and RPC endpoints to the host machine on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively.
To update the binary, just rebuild it and restart the nodes:

View File

@ -7,7 +7,7 @@ RUN apk update && \
VOLUME [ /gaiad ]
WORKDIR /gaiad
EXPOSE 46656 46657
EXPOSE 26656 26657
ENTRYPOINT ["/usr/bin/wrapper.sh"]
CMD ["start"]
STOPSIGNAL SIGTERM

View File

@ -0,0 +1,9 @@
init_config:
instances:
- collect_connection_state: true
excluded_interfaces:
- lo
- lo0
collect_rate_metrics: true
collect_count_metrics: true

View File

@ -0,0 +1,15 @@
init_config:
instances:
- name: ssh
search_string: ['ssh', 'sshd']
thresholds:
critical: [1, 5]
- name: gaiad
search_string: ['gaiad']
thresholds:
critical: [1, 1]
- name: gaiacli
search_string: ['gaiacli']
thresholds:
critical: [1, 1]

View File

@ -0,0 +1,7 @@
init_config:
instances:
- prometheus_url: http://127.0.0.1:26660
namespace: "gaiad"
metrics:
- p2p: *

View File

@ -0,0 +1,78 @@
-----BEGIN CERTIFICATE-----
MIIESTCCAzGgAwIBAgITBn+UV4WH6Kx33rJTMlu8mYtWDTANBgkqhkiG9w0BAQsF
ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
b24gUm9vdCBDQSAxMB4XDTE1MTAyMjAwMDAwMFoXDTI1MTAxOTAwMDAwMFowRjEL
MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEVMBMGA1UECxMMU2VydmVyIENB
IDFCMQ8wDQYDVQQDEwZBbWF6b24wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDCThZn3c68asg3Wuw6MLAd5tES6BIoSMzoKcG5blPVo+sDORrMd4f2AbnZ
cMzPa43j4wNxhplty6aUKk4T1qe9BOwKFjwK6zmxxLVYo7bHViXsPlJ6qOMpFge5
blDP+18x+B26A0piiQOuPkfyDyeR4xQghfj66Yo19V+emU3nazfvpFA+ROz6WoVm
B5x+F2pV8xeKNR7u6azDdU5YVX1TawprmxRC1+WsAYmz6qP+z8ArDITC2FMVy2fw
0IjKOtEXc/VfmtTFch5+AfGYMGMqqvJ6LcXiAhqG5TI+Dr0RtM88k+8XUBCeQ8IG
KuANaL7TiItKZYxK1MMuTJtV9IblAgMBAAGjggE7MIIBNzASBgNVHRMBAf8ECDAG
AQH/AgEAMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUWaRmBlKge5WSPKOUByeW
dFv5PdAwHwYDVR0jBBgwFoAUhBjMhTTsvAyUlC4IWZzHshBOCggwewYIKwYBBQUH
AQEEbzBtMC8GCCsGAQUFBzABhiNodHRwOi8vb2NzcC5yb290Y2ExLmFtYXpvbnRy
dXN0LmNvbTA6BggrBgEFBQcwAoYuaHR0cDovL2NydC5yb290Y2ExLmFtYXpvbnRy
dXN0LmNvbS9yb290Y2ExLmNlcjA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3Js
LnJvb3RjYTEuYW1hem9udHJ1c3QuY29tL3Jvb3RjYTEuY3JsMBMGA1UdIAQMMAow
CAYGZ4EMAQIBMA0GCSqGSIb3DQEBCwUAA4IBAQCFkr41u3nPo4FCHOTjY3NTOVI1
59Gt/a6ZiqyJEi+752+a1U5y6iAwYfmXss2lJwJFqMp2PphKg5625kXg8kP2CN5t
6G7bMQcT8C8xDZNtYTd7WPD8UZiRKAJPBXa30/AbwuZe0GaFEQ8ugcYQgSn+IGBI
8/LwhBNTZTUVEWuCUUBVV18YtbAiPq3yXqMB48Oz+ctBWuZSkbvkNodPLamkB2g1
upRyzQ7qDn1X8nn8N8V7YJ6y68AtkHcNSRAnpTitxBKjtKPISLMVCx7i4hncxHZS
yLyKQXhw2W2Xs0qLeC1etA+jTGDK4UfLeC0SF7FSi8o5LL21L8IzApar2pR/
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEkjCCA3qgAwIBAgITBn+USionzfP6wq4rAfkI7rnExjANBgkqhkiG9w0BAQsF
ADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNj
b3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4x
OzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1
dGhvcml0eSAtIEcyMB4XDTE1MDUyNTEyMDAwMFoXDTM3MTIzMTAxMDAwMFowOTEL
MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
jgSubJrIqg0CAwEAAaOCATEwggEtMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
BAQDAgGGMB0GA1UdDgQWBBSEGMyFNOy8DJSULghZnMeyEE4KCDAfBgNVHSMEGDAW
gBScXwDfqgHXMCs4iKK4bUqc8hGRgzB4BggrBgEFBQcBAQRsMGowLgYIKwYBBQUH
MAGGImh0dHA6Ly9vY3NwLnJvb3RnMi5hbWF6b250cnVzdC5jb20wOAYIKwYBBQUH
MAKGLGh0dHA6Ly9jcnQucm9vdGcyLmFtYXpvbnRydXN0LmNvbS9yb290ZzIuY2Vy
MD0GA1UdHwQ2MDQwMqAwoC6GLGh0dHA6Ly9jcmwucm9vdGcyLmFtYXpvbnRydXN0
LmNvbS9yb290ZzIuY3JsMBEGA1UdIAQKMAgwBgYEVR0gADANBgkqhkiG9w0BAQsF
AAOCAQEAYjdCXLwQtT6LLOkMm2xF4gcAevnFWAu5CIw+7bMlPLVvUOTNNWqnkzSW
MiGpSESrnO09tKpzbeR/FoCJbM8oAxiDR3mjEH4wW6w7sGDgd9QIpuEdfF7Au/ma
eyKdpwAJfqxGF4PcnCZXmTA5YpaP7dreqsXMGz7KQ2hsVxa81Q4gLv7/wmpdLqBK
bRRYh5TmOTFffHPLkIhqhBGWJ6bt2YFGpn6jcgAKUj6DiAdjd4lpFw85hdKrCEVN
0FE6/V1dN2RMfjCyVSRCnTawXZwXgWHxyvkQAiSr6w10kY17RSlQOYiypok1JR4U
akcjMS9cmvqtmg5iUaQqqcT5NJ0hGA==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEdTCCA12gAwIBAgIJAKcOSkw0grd/MA0GCSqGSIb3DQEBCwUAMGgxCzAJBgNV
BAYTAlVTMSUwIwYDVQQKExxTdGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTIw
MAYDVQQLEylTdGFyZmllbGQgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
eTAeFw0wOTA5MDIwMDAwMDBaFw0zNDA2MjgxNzM5MTZaMIGYMQswCQYDVQQGEwJV
UzEQMA4GA1UECBMHQXJpem9uYTETMBEGA1UEBxMKU2NvdHRzZGFsZTElMCMGA1UE
ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjE7MDkGA1UEAxMyU3RhcmZp
ZWxkIFNlcnZpY2VzIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVDDrEKvlO4vW+GZdfjohTsR8/
y8+fIBNtKTrID30892t2OGPZNmCom15cAICyL1l/9of5JUOG52kbUpqQ4XHj2C0N
Tm/2yEnZtvMaVq4rtnQU68/7JuMauh2WLmo7WJSJR1b/JaCTcFOD2oR0FMNnngRo
Ot+OQFodSk7PQ5E751bWAHDLUu57fa4657wx+UX2wmDPE1kCK4DMNEffud6QZW0C
zyyRpqbn3oUYSXxmTqM6bam17jQuug0DuDPfR+uxa40l2ZvOgdFFRjKWcIfeAg5J
Q4W2bHO7ZOphQazJ1FTfhy/HIrImzJ9ZVGif/L4qL8RVHHVAYBeFAlU5i38FAgMB
AAGjgfAwge0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0O
BBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMB8GA1UdIwQYMBaAFL9ft9HO3R+G9FtV
rNzXEMIOqYjnME8GCCsGAQUFBwEBBEMwQTAcBggrBgEFBQcwAYYQaHR0cDovL28u
c3MyLnVzLzAhBggrBgEFBQcwAoYVaHR0cDovL3guc3MyLnVzL3guY2VyMCYGA1Ud
HwQfMB0wG6AZoBeGFWh0dHA6Ly9zLnNzMi51cy9yLmNybDARBgNVHSAECjAIMAYG
BFUdIAAwDQYJKoZIhvcNAQELBQADggEBACMd44pXyn3pF3lM8R5V/cxTbj5HD9/G
VfKyBDbtgB9TxF00KGu+x1X8Z+rLP3+QsjPNG1gQggL4+C/1E2DUBc7xgQjB3ad1
l08YuW3e95ORCLp+QCztweq7dp4zBncdDQh/U90bZKuCJ/Fp1U1ervShw3WnWEQt
8jxwmKy6abaVd38PMV4s/KCHOkdp8Hlf9BRUpJVeEXgSYCfOn8J3/yNTd126/+pZ
59vPr5KW7ySaNRB6nJHGDn2Z9j8Z3/VyVOEVqQdZe4O/Ui5GjLIAZHYcSNPYeehu
VsyuLAOQ1xk4meTKCRlb/weWsKh/NEnfVqn3sF/tM+2MR7cwA130A4w=
-----END CERTIFICATE-----

View File

@ -0,0 +1,35 @@
# see "man logrotate" for details
# rotate log files weekly
daily
# keep 4 days worth of backlogs
rotate 4
# create new (empty) log files after rotating old ones
create
# use date as a suffix of the rotated file
dateext
# uncomment this if you want your log files compressed
compress
# RPM packages drop log rotation information into this directory
include /etc/logrotate.d
# no packages own wtmp and btmp -- we'll rotate them here
/var/log/wtmp {
monthly
create 0664 root utmp
minsize 1M
rotate 1
}
/var/log/btmp {
missingok
monthly
create 0600 root utmp
rotate 1
}
# system-specific logs may be also be configured here.

View File

@ -0,0 +1,13 @@
/var/log/cron
/var/log/maillog
/var/log/messages
/var/log/secure
/var/log/spooler
{
missingok
sharedscripts
postrotate
/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true
service datadog-agent restart 2> /dev/null || true
endscript
}

View File

@ -3,3 +3,8 @@
- name: restart datadog-agent
service: name=datadog-agent state=restarted
- name: restart rsyslog
service: name=rsyslog state=restarted
- name: restart journald
service: name=systemd-journald state=restarted

View File

@ -13,3 +13,46 @@
DD_API_KEY: "{{DD_API_KEY}}"
DD_HOST_TAGS: "testnet:{{TESTNET_NAME}},cluster:{{CLUSTER_NAME}}"
- name: Set datadog.yaml config
template: src=datadog.yaml.j2 dest=/etc/datadog-agent/datadog.yaml
notify: restart datadog-agent
- name: Set metrics config
copy: src=conf.d/ dest=/etc/datadog-agent/conf.d/
notify: restart datadog-agent
- name: Disable journald rate-limiting
lineinfile: "dest=/etc/systemd/journald.conf regexp={{item.regexp}} line='{{item.line}}'"
with_items:
- { regexp: "^#RateLimitInterval", line: "RateLimitInterval=0s" }
- { regexp: "^#RateLimitBurst", line: "RateLimitBurst=0" }
- { regexp: "^#SystemMaxFileSize", line: "SystemMaxFileSize=500M" }
notify: restart journald
- name: As long as Datadog does not support journald on RPM-based linux, we enable rsyslog
yum: "name={{item}} state=installed"
with_items:
- rsyslog
- rsyslog-gnutls
#- name: Get DataDog certificate for rsyslog
# get_url: url=https://docs.datadoghq.com/crt/intake.logs.datadoghq.com.crt dest=/etc/ssl/certs/intake.logs.datadoghq.com.crt
- name: Get DataDog certificate for rsyslog
copy: src=intake.logs.datadoghq.com.crt dest=/etc/ssl/certs/intake.logs.datadoghq.com.crt
- name: Add datadog config to rsyslog
template: src=datadog.conf.j2 dest=/etc/rsyslog.d/datadog.conf mode=0600
notify: restart rsyslog
- name: Set logrotate to rotate daily so syslog does not use up all space
copy: src=logrotate.conf dest=/etc/logrotate.conf
- name: Set syslog to restart datadog-agent after logrotate
copy: src=syslog dest=/etc/logrotate.d/syslog
#semanage port -a -t syslog_tls_port_t -p tcp 10516
- name: Enable rsyslog to report to port 10516 in SELinux
seport: ports=10516 proto=tcp reload=yes setype=syslog_tls_port_t state=present
notify: restart rsyslog

View File

@ -0,0 +1,14 @@
$template DatadogFormat,"{{DD_API_KEY}} <%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %app-name% - - - %msg%\n"
$imjournalRatelimitInterval 0
$imjournalRatelimitBurst 0
$DefaultNetstreamDriver gtls
$DefaultNetstreamDriverCAFile /etc/ssl/certs/intake.logs.datadoghq.com.crt
$ActionSendStreamDriver gtls
$ActionSendStreamDriverMode 1
$ActionSendStreamDriverAuthMode x509/name
$ActionSendStreamDriverPermittedPeer *.logs.datadoghq.com
*.* @@intake.logs.datadoghq.com:10516;DatadogFormat

View File

@ -0,0 +1,561 @@
# The host of the Datadog intake server to send Agent data to
dd_url: https://app.datadoghq.com
# The Datadog api key to associate your Agent's data with your organization.
# Can be found here:
# https://app.datadoghq.com/account/settings
api_key: {{DD_API_KEY}}
# If you need a proxy to connect to the Internet, provide it here (default:
# disabled). You can use the 'no_proxy' list to specify hosts that should
# bypass the proxy. These settings might impact your checks requests, please
# refer to the specific check documentation for more details. Environment
# variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (coma-separated string) will
# override the values set here. See https://docs.datadoghq.com/agent/proxy/.
#
# proxy:
# http: http(s)://user:password@proxy_for_http:port
# https: http(s)://user:password@proxy_for_https:port
# no_proxy:
# - host1
# - host2
# Setting this option to "yes" will tell the agent to skip validation of SSL/TLS certificates.
# This may be necessary if the agent is running behind a proxy. See this page for details:
# https://github.com/DataDog/dd-agent/wiki/Proxy-Configuration#using-haproxy-as-a-proxy
# skip_ssl_validation: no
# Setting this option to "yes" will force the agent to only use TLS 1.2 when
# pushing data to the url specified in "dd_url".
# force_tls_12: no
# Force the hostname to whatever you want. (default: auto-detected)
# hostname: mymachine.mydomain
# Make the agent use "hostname -f" on unix-based systems as a last resort
# way of determining the hostname instead of Golang "os.Hostname()"
# This will be enabled by default in version 6.4
# More information at https://dtdg.co/flag-hostname-fqdn
# hostname_fqdn: false
# Set the host's tags (optional)
tags: ['testnet:{{TESTNET_NAME}}','cluster:{{CLUSTER_NAME}}']
# - mytag
# - env:prod
# - role:database
# Histogram and Historate configuration
#
# Configure which aggregated value to compute. Possible values are: min, max,
# median, avg, sum and count.
#
# histogram_aggregates: ["max", "median", "avg", "count"]
#
# Configure which percentiles will be computed. Must be a list of float
# between 0 and 1.
# Warning: percentiles must be specified as yaml strings
#
# histogram_percentiles: ["0.95"]
#
# Copy histogram values to distributions for true global distributions (in beta)
# This will increase the number of custom metrics created
# histogram_copy_to_distribution: false
#
# A prefix to add to distribution metrics created when histogram_copy_to_distributions is true
# histogram_copy_to_distribution_prefix: ""
# Forwarder timeout in seconds
# forwarder_timeout: 20
# The forwarder retries failed requests. Use this setting to change the
# maximum length of the forwarder's retry queue (each request in the queue
# takes no more than 2MB in memory)
# forwarder_retry_queue_max_size: 30
# The number of workers used by the forwarder. Please note each worker will
# open an outbound HTTP connection towards Datadog's metrics intake at every
# flush.
# forwarder_num_workers: 1
# Collect AWS EC2 custom tags as agent tags
collect_ec2_tags: true
# The path containing check configuration files
# By default, uses the conf.d folder located in the agent configuration folder.
# confd_path:
# Additional path where to search for Python checks
# By default, uses the checks.d folder located in the agent configuration folder.
# additional_checksd:
# The port for the go_expvar server
# expvar_port: 5000
# The port on which the IPC api listens
# cmd_port: 5001
# The port for the browser GUI to be served
# Setting 'GUI_port: -1' turns off the GUI completely
# Default is '5002' on Windows and macOS ; turned off on Linux
# GUI_port: -1
# The Agent runs workers in parallel to execute checks. By default the number
# of workers is set to 1. If set to 0 the agent will automatically determine
# the best number of runners needed based on the number of checks running. This
# would optimize the check collection time but may produce CPU spikes.
# check_runners: 1
# Metadata collection should always be enabled, except if you are running several
# agents/dsd instances per host. In that case, only one agent should have it on.
# WARNING: disabling it on every agent will lead to display and billing issues
# enable_metadata_collection: true
# Enable the gohai collection of systems data
# enable_gohai: true
# IPC api server timeout in seconds
# server_timeout: 15
# Some environments may have the procfs file system mounted in a miscellaneous
# location. The procfs_path configuration parameter provides a mechanism to
# override the standard default location: '/proc' - this setting will trickle
# down to integrations and affect their behavior if they rely on the psutil
# python package.
# procfs_path: /proc
# BETA: Encrypted Secrets (Linux only)
#
# This feature is in beta and its options or behaviour might break between
# minor or bugfix releases of the Agent.
#
# The agent can call an external command to fetch secrets. The command will be
# executed maximum once per instance containing an encrypted password.
# Secrets are cached by the agent, this will avoid executing again the
# secret_backend_command to fetch an already known secret (useful when combine
# with Autodiscovery). This feature is still in beta.
#
# For more information see: https://github.com/DataDog/datadog-agent/blob/master/docs/agent/secrets.md
#
# Path to the script to execute. The script must belong to the same user used
# to run the agent. Executable right must be given to the agent and no rights
# for 'group' or 'other'.
# secret_backend_command: /path/to/command
#
# A list of arguments to give to the command at each run (optional)
# secret_backend_arguments:
# - argument1
# - argument2
#
# The size in bytes of the buffer used to store the command answer (apply to
# both stdout and stderr)
# secret_backend_output_max_size: 1024
#
# The timeout to execute the command in second
# secret_backend_timeout: 5
# Metadata providers, add or remove from the list to enable or disable collection.
# Intervals are expressed in seconds. You can also set a provider's interval to 0
# to disable it.
# metadata_providers:
# - name: k8s
# interval: 60
# DogStatsd
#
# If you don't want to enable the DogStatsd server, set this option to no
# use_dogstatsd: yes
#
# Make sure your client is sending to the same UDP port
# dogstatsd_port: 8125
#
# The host to bind to receive external metrics (used only by the dogstatsd
# server for now). For dogstatsd this is ignored if
# 'dogstatsd_non_local_traffic' is set to true
# bind_host: localhost
#
# Dogstatsd can also listen for metrics on a Unix Socket (*nix only).
# Set to a valid filesystem path to enable.
# dogstatsd_socket: /var/run/dogstatsd/dsd.sock
#
# When using Unix Socket, dogstatsd can tag metrics with container metadata.
# If running dogstatsd in a container, host PID mode (e.g. with --pid=host) is required.
# dogstatsd_origin_detection: false
#
# The buffer size use to receive statsd packet, in bytes
# dogstatsd_buffer_size: 1024
#
# Whether dogstatsd should listen to non local UDP traffic
# dogstatsd_non_local_traffic: no
#
# Publish dogstatsd's internal stats as Go expvars
# dogstatsd_stats_enable: no
#
# How many items in the dogstatsd's stats circular buffer
# dogstatsd_stats_buffer: 10
#
# The port for the go_expvar server
# dogstatsd_stats_port: 5000
#
# The number of bytes allocated to dogstatsd's socket receive buffer (POSIX
# system only). By default, this value is set by the system. If you need to
# increase the size of this buffer but keep the OS default value the same, you
# can set dogstatsd's receive buffer size here. The maximum accepted value
# might change depending on the OS.
# dogstatsd_so_rcvbuf:
#
# If you want to forward every packet received by the dogstatsd server
# to another statsd server, uncomment these lines.
# WARNING: Make sure that forwarded packets are regular statsd packets and not "dogstatsd" packets,
# as your other statsd server might not be able to handle them.
# statsd_forward_host: address_of_own_statsd_server
# statsd_forward_port: 8125
#
# If you want all statsd metrics coming from this host to be namespaced
# you can configure the namspace below. Each metric received will be prefixed
# with the namespace before it's sent to Datadog.
# statsd_metric_namespace:
# Logs agent
#
# Logs agent is disabled by default
logs_enabled: true
#
# Enable logs collection for all containers, disabled by default
# logs_config:
# container_collect_all: false
#
# JMX
#
# jmx_pipe_path:
# jmx_pipe_name: dd-auto_discovery
#
# If you only run Autodiscovery tests, jmxfetch might fail to pick up custom_jar_paths
# set in the check templates. If that is the case, you can force custom jars here.
# jmx_custom_jars:
# - /jmx-jars/jboss-cli-client.jar
#
# When running in a memory cgroup, openjdk 8u131 and higher can automatically adjust
# its heap memory usage in accordance to the cgroup/container's memory limit.
# Default is false: we'll set a Xmx of 200MB if none is configured.
# Note: older openjdk versions and other jvms might fail to start if this option is set
#
# jmx_use_cgroup_memory_limit: true
#
# Autoconfig
#
# Directory containing configuration templates
# autoconf_template_dir: /datadog/check_configs
#
# The providers the Agent should call to collect checks configurations.
# Please note the File Configuration Provider is enabled by default and cannot
# be configured.
# config_providers:
## The kubelet provider handles templates embedded in pod annotations, see
## https://docs.datadoghq.com/guides/autodiscovery/#template-source-kubernetes-pod-annotations
# - name: kubelet
# polling: true
## The docker provider handles templates embedded in container labels, see
## https://docs.datadoghq.com/guides/autodiscovery/#template-source-docker-label-annotations
# - name: docker
# polling: true
# - name: etcd
# polling: true
# template_dir: /datadog/check_configs
# template_url: http://127.0.0.1
# username:
# password:
# - name: consul
# polling: true
# template_dir: /datadog/check_configs
# template_url: http://127.0.0.1
# ca_file:
# ca_path:
# cert_file:
# key_file:
# username:
# password:
# token:
# - name: zookeeper
# polling: true
# template_dir: /datadog/check_configs
# template_url: 127.0.0.1
# username:
# password:
# Logging
#
# log_level: info
# log_file: /var/log/datadog/agent.log
# Set to 'yes' to output logs in JSON format
# log_format_json: no
# Set to 'no' to disable logging to stdout
# log_to_console: yes
# Set to 'yes' to disable logging to the log file
# disable_file_logging: no
# Set to 'yes' to enable logging to syslog.
#
# log_to_syslog: no
#
# If 'syslog_uri' is left undefined/empty, a local domain socket connection will be attempted
#
# syslog_uri:
#
# Set to 'yes' to output in an RFC 5424-compliant format
#
# syslog_rfc: no
#
# If TLS enabled, you must specify a path to a PEM certificate here
#
# syslog_pem: /path/to/certificate.pem
#
# If TLS enabled, you must specify a path to a private key here
#
# syslog_key: /path/to/key.pem
#
# If TLS enabled, you may enforce TLS verification here (defaults to true)
#
# syslog_tls_verify: yes
#
# Autodiscovery
#
# Change the root directory to look at to get cgroup statistics. Useful when running inside a
# container with host directories mounted on a different folder.
# Default if environment variable "DOCKER_DD_AGENT" is set to "yes"
# "/host/sys/fs/cgroup" and "/sys/fs/cgroup" if not.
#
# container_cgroup_root: /host/sys/fs/cgroup/
#
# Change the root directory to look at to get proc statistics. Useful when running inside a
# container with host directories mounted on a different folder.
# Default if environment variable "DOCKER_DD_AGENT" is set to "yes"
# "/host/proc" and "/proc" if not.
#
# container_proc_root: /host/proc
#
# Choose "auto" if you want to let the agent find any relevant listener on your host
# At the moment, the only auto listener supported is docker
# If you have already set docker anywhere in the listeners, the auto listener is ignored
# listeners:
# - name: auto
# - name: docker
#
# Exclude containers from metrics and AD based on their name or image:
# An excluded container will not get any individual container metric reported for it.
# Please note that the `docker.containers.running`, `.stopped`, `.running.total` and
# `.stopped.total` metrics are not affected by these settings and always count all
# containers. This does not affect your per-container billing.
#
# How it works: include first.
# If a container matches an exclude rule, it won't be included unless it first matches an include rule.
#
# Rules are regexp.
#
# Examples:
# exclude all, except containers based on the 'ubuntu' image or the 'debian' image.
# ac_exclude: ["image:.*"]
# ac_include: ["image:ubuntu", "image:debian"]
#
# include all, except containers based on the 'ubuntu' image.
# ac_exclude: ["image:ubuntu"]
# ac_include: []
#
# exclude all debian images except containers with a name starting with 'frontend'.
# ac_exclude: ["image:debian"]
# ac_include: ["name:frontend.*"]
#
# ac_exclude: []
# ac_include: []
#
#
# Exclude default pause containers from orchestrators.
#
# By default the agent will not monitor kubernetes/openshift pause
# container. They will still be counted in the container count (just like
# excluded containers) since ignoring them would give a wrong impression
# about the docker daemon load.
#
# exclude_pause_container: true
# Exclude default containers from DockerCloud:
# The following configuration will instruct the agent to ignore the containers from Docker Cloud.
# You can remove the ones you want to collect.
# ac_exclude: ["image:dockercloud/network-daemon","image:dockercloud/cleanup","image:dockercloud/logrotate","image:dockercloud/events","image:dockercloud/ntpd"]
# ac_include: []
#
# You can also use the regex to ignore them all:
# ac_exclude: ["image:dockercloud/*"]
# ac_include: []
#
# The default timeout value when connecting to the docker daemon
# is 5 seconds. It can be configured with this option.
# docker_query_timeout: 5
#
# Docker tag extraction
#
# We can extract container label or environment variables
# as metric tags. If you prefix your tag name with +, it
# will only be added to high cardinality metrics (docker check)
#
# docker_labels_as_tags:
# label_name: tag_name
# high_cardinality_label_name: +tag_name
# docker_env_as_tags:
# ENVVAR_NAME: tag_name
#
# Example:
# docker_labels_as_tags:
# com.docker.compose.service: service_name
# com.docker.compose.project: +project_name
#
# Kubernetes tag extraction
#
# We can extract pod labels and annotations as metric tags. If you prefix your
# tag name with +, it will only be added to high cardinality metrics
#
# kubernetes_pod_labels_as_tags:
# app: kube_app
# pod-template-hash: +kube_pod-template-hash
#
# kubernetes_pod_annotations_as_tags:
# app: kube_app
# pod-template-hash: +kube_pod-template-hash
#
# ECS integration
#
# URL where the ECS agent can be found. Standard cases will be autodetected.
# ecs_agent_url: http://localhost:51678
#
# Kubernetes kubelet connectivity
#
# The kubelet host and port should be autodetected when running inside a pod.
# If you run into connectivity issues, you can set these options according to
# your cluster setup:
# kubernetes_kubelet_host: autodetected
# kubernetes_http_kubelet_port: 10255
# kubernetes_https_kubelet_port: 10250
#
# When using HTTPS, we verify the kubelet's certificate, you can tune this:
# kubelet_tls_verify: true
# kubelet_client_ca: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
#
# If authentication is needed, the agent will use the pod's serviceaccount's
# credentials. If you want to use a different account, or are running the agent
# on the host, you can set the credentials to use here:
# kubelet_auth_token_path: /path/to/file
# kubelet_client_crt: /path/to/key
# kubelet_client_key: /path/to/key
#
# Kubernetes apiserver integration
#
# When running in a pod, the agent will automatically use the pod's serviceaccount
# to authenticate with the apiserver. If you wish to install the agent out of a pod
# or customise connection parameters, you can provide the path to a KubeConfig file
# see https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/
#
# kubernetes_kubeconfig_path: /path/to/file
#
# In order to collect Kubernetes service names, the agent needs certain rights (see RBAC documentation in
# [docker readme](https://github.com/DataDog/datadog-agent/blob/master/Dockerfiles/agent/README.md#kubernetes)).
# You can disable this option or set how often (in seconds) the agent refreshes the internal mapping of services to
# ContainerIDs with the following options:
# kubernetes_collect_metadata_tags: true
# kubernetes_metadata_tag_update_freq: 60
# kubernetes_apiserver_client_timeout: 10
# kubernetes_apiserver_poll_freq: 30
#
# To collect Kubernetes events, leader election must be enabled and collect_kubernetes_events set to true.
# Only the leader will collect events. More details about events [here](https://github.com/DataDog/datadog-agent/blob/master/Dockerfilesagent/README.md#event-collection).
# collect_kubernetes_events: false
#
#
# Leader Election settings, more details about leader election [here](https://github.com/DataDog/datadog-agent/blob/master/Dockerfilesagent/README.md#leader-election)
# To enable the leader election on this node, set the leader_election variable to true.
# leader_election: false
# The leader election lease is an integer in seconds.
# leader_lease_duration: 60
#
# Node labels that should be collected and their name in host tags. Off by default.
# Some of these labels are redundant with metadata collected by
# cloud provider crawlers (AWS, GCE, Azure)
#
# kubernetes_node_labels_as_tags:
# kubernetes.io/hostname: nodename
# beta.kubernetes.io/os: os
# Process agent specific settings
#
process_config:
# A string indicating the enabled state of the Process Agent.
# If "false" (the default) it will only collect containers.
# If "true" it will collect containers and processes.
# If "disabled" it will be disabled altogether and won't start.
enabled: "true"
# The full path to the file where process-agent logs will be written.
# log_file:
# The interval, in seconds, at which we will run each check. If you want consistent
# behavior between real-time you may set the Container/ProcessRT intervals to 10.
# Defaults to 10s for normal checks and 2s for others.
# intervals:
# container:
# container_realtime:
# process:
# process_realtime:
# A list of regex patterns that will exclude a process if matched.
# blacklist_patterns:
# How many check results to buffer in memory when POST fails. The default is usually fine.
# queue_size:
# The maximum number of file descriptors to open when collecting net connections.
# Only change if you are running out of file descriptors from the Agent.
# max_proc_fds:
# The maximum number of processes or containers per message.
# Only change if the defaults are causing issues.
# max_per_message:
# Overrides the path to the Agent bin used for getting the hostname. The default is usually fine.
# dd_agent_bin:
# Overrides of the environment we pass to fetch the hostname. The default is usually fine.
# dd_agent_env:
# Trace Agent Specific Settings
#
# apm_config:
# Whether or not the APM Agent should run
# enabled: true
# The environment tag that Traces should be tagged with
# Will inherit from "env" tag if none is applied here
# env: none
# The port that the Receiver should listen on
# receiver_port: 8126
# Whether the Trace Agent should listen for non local traffic
# Only enable if Traces are being sent to this Agent from another host/container
# apm_non_local_traffic: false
# Extra global sample rate to apply on all the traces
# This sample rate is combined to the sample rate from the sampler logic, still promoting interesting traces
# From 1 (no extra rate) to 0 (don't sample at all)
# extra_sample_rate: 1.0
# Maximum number of traces per second to sample.
# The limit is applied over an average over a few minutes ; much bigger spikes are possible.
# Set to 0 to disable the limit.
# max_traces_per_second: 10
# A blacklist of regular expressions can be provided to disable certain traces based on their resource name
# all entries must be surrounded by double quotes and separated by commas
# Example: ["(GET|POST) /healthcheck", "GET /V1"]
# ignore_resources: []

View File

@ -3,4 +3,3 @@
- name: restart journald
service: name=systemd-journald state=restarted

View File

@ -15,6 +15,3 @@
command: "systemd-tmpfiles --create --prefix /var/log/journal"
notify: restart journald
#- name: Ability to get the core dump on SIGABRT
# shell: "ulimit -c unlimited"

View File

@ -24,6 +24,6 @@
- name: Reset network
when: UNSAFE_RESET_ALL | default(false) | bool
command: "sudo -u gaiad gaiad unsafe_reset_all"
command: "sudo -u gaiad gaiad unsafe-reset-all"
notify: restart gaiad

View File

@ -0,0 +1,13 @@
---
# Set the core file size to unlimited to allow the system to generate core dumps
- hosts: all
any_errors_fatal: true
gather_facts: no
tasks:
- name: Set core file size to unlimited to be able to get the core dump on SIGABRT
shell: "ulimit -c unlimited"

View File

@ -1,5 +1,7 @@
---
#DD_API_KEY
- hosts: all
any_errors_fatal: true
gather_facts: no

View File

@ -1,7 +1,6 @@
########################################
### WARNING: The DigitalOcean scripts are deprecated. They are still here because
### they might be useful for developers.
### Use -f to call this Makefile: "make -f Makefile.do target"
# Name of the testnet. Used in chain-id.
TESTNET_NAME?=remotenet

View File

@ -0,0 +1,58 @@
# Terraform & Ansible
WARNING: The Digital Ocean scripts are obsolete. They are here because they might still be useful for developers.
Automated deployments are done using [Terraform](https://www.terraform.io/) to create servers on Digital Ocean then
[Ansible](http://www.ansible.com/) to create and manage testnets on those servers.
## Prerequisites
- Install [Terraform](https://www.terraform.io/downloads.html) and [Ansible](http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) on a Linux machine.
- Create a [DigitalOcean API token](https://cloud.digitalocean.com/settings/api/tokens) with read and write capability.
- Install the python dopy package (`pip install dopy`) (This is necessary for the digitalocean.py script for ansible.)
- Create SSH keys
```
export DO_API_TOKEN="abcdef01234567890abcdef01234567890"
export TESTNET_NAME="remotenet"
export SSH_PRIVATE_FILE="$HOME/.ssh/id_rsa"
export SSH_PUBLIC_FILE="$HOME/.ssh/id_rsa.pub"
```
These will be used by both `terraform` and `ansible`.
## Create a remote network
```
make remotenet-start
```
Optionally, you can set the number of servers you want to launch and the name of the testnet (which defaults to remotenet):
```
TESTNET_NAME="mytestnet" SERVERS=7 make remotenet-start
```
## Quickly see the /status endpoint
```
make remotenet-status
```
## Delete servers
```
make remotenet-stop
```
## Logging
You can ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on [this page](https://app.logz.io/#/dashboard/data-sources/Filebeat), then:
```
yum install systemd-devel || echo "This will only work on RHEL-based systems."
apt-get install libsystemd-dev || echo "This will only work on Debian-based systems."
go get github.com/mheese/journalbeat
ansible-playbook -i inventory/digital_ocean.py -l remotenet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345
```

View File

@ -1,53 +0,0 @@
Using Terraform
===============
This is a `Terraform <https://www.terraform.io/>`__ configuration that sets up DigitalOcean droplets.
Prerequisites
-------------
- Install `HashiCorp Terraform <https://www.terraform.io>`__ on a linux machine.
- Create a `DigitalOcean API token <https://cloud.digitalocean.com/settings/api/tokens>`__ with read and write capability.
- Create SSH keys
Build
-----
::
export DO_API_TOKEN="abcdef01234567890abcdef01234567890"
export TESTNET_NAME="remotenet"
export SSH_PUBLIC_FILE="$HOME/.ssh/id_rsa.pub"
export SSH_PRIVATE_FILE="$HOME/.ssh/id_rsa"
terraform init
terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_PUBLIC_FILE="$SSH_PUBLIC_FILE" -var SSH_PRIVATE_FILE="$SSH_PRIVATE_FILE"
At the end you will get a list of IP addresses that belongs to your new droplets.
Destroy
-------
Run the below:
::
terraform destroy -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_PUBLIC_FILE="$SSH_PUBLIC_FILE" -var SSH_PRIVATE_FILE="$SSH_PRIVATE_FILE"
Good to know
------------
The DigitalOcean API was not very reliable for me. If you find that terraform fails to install a specific server (for example cluster[2]), check
the regions variable and remove data center names that you find unreliable. The variable is at cluster/variables.tf
Example:
::
variable "regions" {
description = "Regions to launch in"
type = "list"
default = ["TOR1", "LON1"]
}

Some files were not shown because too many files have changed in this diff Show More