Merge remote-tracking branch 'origin/develop' into rigel/validator-unbonding

This commit is contained in:
rigelrozanski 2018-08-27 19:34:03 -04:00
commit 78f98fec16
81 changed files with 444 additions and 393 deletions

View File

@ -23,6 +23,7 @@ BREAKING CHANGES
* [core] \#1807 Switch from use of rational to decimal
* [types] \#1901 Validator interface's GetOwner() renamed to GetOperator()
* [types] \#2119 Parsed error messages and ABCI log errors to make them more human readable.
* [simulation] Rename TestAndRunTx to Operation [#2153](https://github.com/cosmos/cosmos-sdk/pull/2153)
* Tendermint
@ -45,12 +46,15 @@ FEATURES
IMPROVEMENTS
* [tools] Improved terraform and ansible scripts for infrastructure deployment
* [tools] Added ansible script to enable process core dumps
* Gaia REST API (`gaiacli advanced rest-server`)
* [x/stake] \#2000 Added tests for new staking endpoints
* Gaia CLI (`gaiacli`)
* [cli] #2060 removed `--select` from `block` command
* [cli] #2128 fixed segfault when exporting directly after `gaiad init`
* Gaia
* [x/stake] [#2023](https://github.com/cosmos/cosmos-sdk/pull/2023) Terminate iteration loop in `UpdateBondedValidators` and `UpdateBondedValidatorsFull` when the first revoked validator is encountered and perform a sanity check.
@ -59,10 +63,10 @@ IMPROVEMENTS
* SDK
* [tools] Make get_vendor_deps deletes `.vendor-new` directories, in case scratch files are present.
* [cli] \#1632 Add integration tests to ensure `basecoind init && basecoind` start sequences run successfully for both `democoin` and `basecoin` examples.
* [simulation] Make timestamps randomized [#2153](https://github.com/cosmos/cosmos-sdk/pull/2153)
* Tendermint
BUG FIXES
* Gaia REST API (`gaiacli advanced rest-server`)

View File

@ -26,7 +26,7 @@ const DefaultKeyPass = "12345678"
var (
// bonded tokens given to genesis validators/accounts
freeFermionVal = int64(100)
freeFermionsAcc = int64(50)
freeFermionsAcc = sdk.NewInt(50)
)
// State to Unmarshal
@ -183,11 +183,11 @@ func GaiaAppGenState(cdc *wire.Codec, appGenTxs []json.RawMessage) (genesisState
accAuth := auth.NewBaseAccountWithAddress(genTx.Address)
accAuth.Coins = sdk.Coins{
{genTx.Name + "Token", sdk.NewInt(1000)},
{"steak", sdk.NewInt(freeFermionsAcc)},
{"steak", freeFermionsAcc},
}
acc := NewGenesisAccount(&accAuth)
genaccs[i] = acc
stakeData.Pool.LooseTokens = stakeData.Pool.LooseTokens.Add(sdk.NewDec(freeFermionsAcc)) // increase the supply
stakeData.Pool.LooseTokens = stakeData.Pool.LooseTokens.Add(sdk.NewDecFromInt(freeFermionsAcc)) // increase the supply
// add the validator
if len(genTx.Name) > 0 {
@ -199,7 +199,7 @@ func GaiaAppGenState(cdc *wire.Codec, appGenTxs []json.RawMessage) (genesisState
// add some new shares to the validator
var issuedDelShares sdk.Dec
validator, stakeData.Pool, issuedDelShares = validator.AddTokensFromDel(stakeData.Pool, freeFermionVal)
validator, stakeData.Pool, issuedDelShares = validator.AddTokensFromDel(stakeData.Pool, sdk.NewInt(freeFermionVal))
stakeData.Validators = append(stakeData.Validators, validator)
// create the self-delegation from the issuedDelShares

View File

@ -85,8 +85,8 @@ func appStateFn(r *rand.Rand, keys []crypto.PrivKey, accs []sdk.AccAddress) json
return appState
}
func testAndRunTxs(app *GaiaApp) []simulation.TestAndRunTx {
return []simulation.TestAndRunTx{
func testAndRunTxs(app *GaiaApp) []simulation.Operation {
return []simulation.Operation{
banksim.TestAndRunSingleInputMsgSend(app.accountMapper),
govsim.SimulateMsgSubmitProposal(app.govKeeper, app.stakeKeeper),
govsim.SimulateMsgDeposit(app.govKeeper, app.stakeKeeper),

View File

@ -4,7 +4,7 @@
- [ ] 2. Add commits/PRs that are desired for this release **that havent already been added to develop**
- [ ] 3. Merge items in `PENDING.md` into the `CHANGELOG.md`. While doing this make sure that each entry contains links to issues/PRs for each item
- [ ] 4. Summarize breaking API changes section under “Breaking Changes” section to the `CHANGELOG.md` to bring attention to any breaking API changes that affect RPC consumers.
- [ ] 5. Tag the commit `{{ .Release.Name }}-rcN`
- [ ] 5. Tag the commit `{ .Release.Name }-rcN`
- [ ] 6. Kick off 1 day of automated fuzz testing
- [ ] 7. Release Lead assigns 2 people to perform [buddy testing script](/docs/RELEASE_TEST_SCRIPT.md) and update the relevant documentation
- [ ] 8. If errors are found in either #6 or #7 go back to #2 (*NOTE*: be sure to increment the `rcN`)

View File

@ -1,7 +1,7 @@
module.exports = {
title: "Cosmos Network",
description: "Documentation for the Cosmos Network.",
dest: "./site-docs",
dest: "./dist/docs",
base: "/",
markdown: {
lineNumbers: true

View File

@ -7,12 +7,12 @@ In the Cosmos network, keys and addresses may refer to a number of different rol
## HRP table
| HRP | Definition |
| HRP | Definition |
| ------------- |:-------------:|
| `cosmosaccaddr` | Cosmos Account Address |
| `cosmosaccpub` | Cosmos Account Public Key |
| `cosmosvaladdr` | Cosmos Consensus Address |
| `cosmosvalpub` | Cosmos Consensus Public Key|
| `cosmos` | Cosmos Account Address |
| `cosmospub` | Cosmos Account Public Key |
| `cosmosval` | Cosmos Validator Consensus Address |
| `cosmosvalpub`| Cosmos Validator Consensus Public Key|
## Encoding
@ -22,4 +22,4 @@ To covert between other binary reprsentation of addresses and keys, it is import
A complete implementation of the Amino serialization format is unncessary in most cases. Simply prepending bytes from this [table](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md#public-key-cryptography) to the bytestring payload before bech32 encoding will sufficient for compatible representation.
 
 

View File

@ -1,4 +0,0 @@
[defaults]
retry_files_enabled = False
host_key_checking = False

View File

@ -0,0 +1,8 @@
---
- hosts: all
any_errors_fatal: true
gather_facts: no
roles:
- increase-openfiles

View File

@ -6,5 +6,7 @@
any_errors_fatal: true
gather_facts: no
roles:
- setup-journald
- install-datadog-agent
- update-datadog-agent

View File

@ -1,4 +1,4 @@
---
GAIAD_ADDRESS: tcp://0.0.0.0:1317
GAIACLI_ADDRESS: tcp://0.0.0.0:1317

View File

@ -3,7 +3,7 @@
- name: Copy binary
copy:
src: "{{GAIACLI_BINARY}}"
dest: /usr/bin
dest: /usr/bin/gaiacli
mode: 0755
notify: restart gaiacli

View File

@ -8,7 +8,7 @@ Restart=on-failure
User=gaiad
Group=gaiad
PermissionsStartOnly=true
ExecStart=/usr/bin/gaiacli rest-server --laddr {{GAIAD_ADDRESS}}
ExecStart=/usr/bin/gaiacli rest-server --laddr {{GAIACLI_ADDRESS}}
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM

View File

@ -0,0 +1 @@
fs.file-max=262144

View File

@ -0,0 +1,3 @@
* soft nofile 262144
* hard nofile 262144

View File

@ -0,0 +1,3 @@
[Service]
LimitNOFILE=infinity
LimitMEMLOCK=infinity

View File

@ -0,0 +1,5 @@
---
- name: reload systemctl
systemd: name=systemd daemon_reload=yes

View File

@ -0,0 +1,22 @@
---
# Based on: https://stackoverflow.com/questions/38155108/how-to-increase-limit-for-open-processes-and-files-using-ansible
- name: Set sysctl File Limits
copy:
src: 50-fs.conf
dest: /etc/sysctl.d
- name: Set Shell File Limits
copy:
src: 91-nofiles.conf
dest: /etc/security/limits.d
- name: Set gaia filehandle Limits
copy:
src: limits.conf
dest: "/lib/systemd/system/{{item}}.service.d"
notify: reload systemctl
with_items:
- gaiad
- gaiacli

View File

@ -1,78 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIESTCCAzGgAwIBAgITBn+UV4WH6Kx33rJTMlu8mYtWDTANBgkqhkiG9w0BAQsF
ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
b24gUm9vdCBDQSAxMB4XDTE1MTAyMjAwMDAwMFoXDTI1MTAxOTAwMDAwMFowRjEL
MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEVMBMGA1UECxMMU2VydmVyIENB
IDFCMQ8wDQYDVQQDEwZBbWF6b24wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDCThZn3c68asg3Wuw6MLAd5tES6BIoSMzoKcG5blPVo+sDORrMd4f2AbnZ
cMzPa43j4wNxhplty6aUKk4T1qe9BOwKFjwK6zmxxLVYo7bHViXsPlJ6qOMpFge5
blDP+18x+B26A0piiQOuPkfyDyeR4xQghfj66Yo19V+emU3nazfvpFA+ROz6WoVm
B5x+F2pV8xeKNR7u6azDdU5YVX1TawprmxRC1+WsAYmz6qP+z8ArDITC2FMVy2fw
0IjKOtEXc/VfmtTFch5+AfGYMGMqqvJ6LcXiAhqG5TI+Dr0RtM88k+8XUBCeQ8IG
KuANaL7TiItKZYxK1MMuTJtV9IblAgMBAAGjggE7MIIBNzASBgNVHRMBAf8ECDAG
AQH/AgEAMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUWaRmBlKge5WSPKOUByeW
dFv5PdAwHwYDVR0jBBgwFoAUhBjMhTTsvAyUlC4IWZzHshBOCggwewYIKwYBBQUH
AQEEbzBtMC8GCCsGAQUFBzABhiNodHRwOi8vb2NzcC5yb290Y2ExLmFtYXpvbnRy
dXN0LmNvbTA6BggrBgEFBQcwAoYuaHR0cDovL2NydC5yb290Y2ExLmFtYXpvbnRy
dXN0LmNvbS9yb290Y2ExLmNlcjA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3Js
LnJvb3RjYTEuYW1hem9udHJ1c3QuY29tL3Jvb3RjYTEuY3JsMBMGA1UdIAQMMAow
CAYGZ4EMAQIBMA0GCSqGSIb3DQEBCwUAA4IBAQCFkr41u3nPo4FCHOTjY3NTOVI1
59Gt/a6ZiqyJEi+752+a1U5y6iAwYfmXss2lJwJFqMp2PphKg5625kXg8kP2CN5t
6G7bMQcT8C8xDZNtYTd7WPD8UZiRKAJPBXa30/AbwuZe0GaFEQ8ugcYQgSn+IGBI
8/LwhBNTZTUVEWuCUUBVV18YtbAiPq3yXqMB48Oz+ctBWuZSkbvkNodPLamkB2g1
upRyzQ7qDn1X8nn8N8V7YJ6y68AtkHcNSRAnpTitxBKjtKPISLMVCx7i4hncxHZS
yLyKQXhw2W2Xs0qLeC1etA+jTGDK4UfLeC0SF7FSi8o5LL21L8IzApar2pR/
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEkjCCA3qgAwIBAgITBn+USionzfP6wq4rAfkI7rnExjANBgkqhkiG9w0BAQsF
ADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNj
b3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4x
OzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1
dGhvcml0eSAtIEcyMB4XDTE1MDUyNTEyMDAwMFoXDTM3MTIzMTAxMDAwMFowOTEL
MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
jgSubJrIqg0CAwEAAaOCATEwggEtMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
BAQDAgGGMB0GA1UdDgQWBBSEGMyFNOy8DJSULghZnMeyEE4KCDAfBgNVHSMEGDAW
gBScXwDfqgHXMCs4iKK4bUqc8hGRgzB4BggrBgEFBQcBAQRsMGowLgYIKwYBBQUH
MAGGImh0dHA6Ly9vY3NwLnJvb3RnMi5hbWF6b250cnVzdC5jb20wOAYIKwYBBQUH
MAKGLGh0dHA6Ly9jcnQucm9vdGcyLmFtYXpvbnRydXN0LmNvbS9yb290ZzIuY2Vy
MD0GA1UdHwQ2MDQwMqAwoC6GLGh0dHA6Ly9jcmwucm9vdGcyLmFtYXpvbnRydXN0
LmNvbS9yb290ZzIuY3JsMBEGA1UdIAQKMAgwBgYEVR0gADANBgkqhkiG9w0BAQsF
AAOCAQEAYjdCXLwQtT6LLOkMm2xF4gcAevnFWAu5CIw+7bMlPLVvUOTNNWqnkzSW
MiGpSESrnO09tKpzbeR/FoCJbM8oAxiDR3mjEH4wW6w7sGDgd9QIpuEdfF7Au/ma
eyKdpwAJfqxGF4PcnCZXmTA5YpaP7dreqsXMGz7KQ2hsVxa81Q4gLv7/wmpdLqBK
bRRYh5TmOTFffHPLkIhqhBGWJ6bt2YFGpn6jcgAKUj6DiAdjd4lpFw85hdKrCEVN
0FE6/V1dN2RMfjCyVSRCnTawXZwXgWHxyvkQAiSr6w10kY17RSlQOYiypok1JR4U
akcjMS9cmvqtmg5iUaQqqcT5NJ0hGA==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEdTCCA12gAwIBAgIJAKcOSkw0grd/MA0GCSqGSIb3DQEBCwUAMGgxCzAJBgNV
BAYTAlVTMSUwIwYDVQQKExxTdGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTIw
MAYDVQQLEylTdGFyZmllbGQgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
eTAeFw0wOTA5MDIwMDAwMDBaFw0zNDA2MjgxNzM5MTZaMIGYMQswCQYDVQQGEwJV
UzEQMA4GA1UECBMHQXJpem9uYTETMBEGA1UEBxMKU2NvdHRzZGFsZTElMCMGA1UE
ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjE7MDkGA1UEAxMyU3RhcmZp
ZWxkIFNlcnZpY2VzIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVDDrEKvlO4vW+GZdfjohTsR8/
y8+fIBNtKTrID30892t2OGPZNmCom15cAICyL1l/9of5JUOG52kbUpqQ4XHj2C0N
Tm/2yEnZtvMaVq4rtnQU68/7JuMauh2WLmo7WJSJR1b/JaCTcFOD2oR0FMNnngRo
Ot+OQFodSk7PQ5E751bWAHDLUu57fa4657wx+UX2wmDPE1kCK4DMNEffud6QZW0C
zyyRpqbn3oUYSXxmTqM6bam17jQuug0DuDPfR+uxa40l2ZvOgdFFRjKWcIfeAg5J
Q4W2bHO7ZOphQazJ1FTfhy/HIrImzJ9ZVGif/L4qL8RVHHVAYBeFAlU5i38FAgMB
AAGjgfAwge0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0O
BBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMB8GA1UdIwQYMBaAFL9ft9HO3R+G9FtV
rNzXEMIOqYjnME8GCCsGAQUFBwEBBEMwQTAcBggrBgEFBQcwAYYQaHR0cDovL28u
c3MyLnVzLzAhBggrBgEFBQcwAoYVaHR0cDovL3guc3MyLnVzL3guY2VyMCYGA1Ud
HwQfMB0wG6AZoBeGFWh0dHA6Ly9zLnNzMi51cy9yLmNybDARBgNVHSAECjAIMAYG
BFUdIAAwDQYJKoZIhvcNAQELBQADggEBACMd44pXyn3pF3lM8R5V/cxTbj5HD9/G
VfKyBDbtgB9TxF00KGu+x1X8Z+rLP3+QsjPNG1gQggL4+C/1E2DUBc7xgQjB3ad1
l08YuW3e95ORCLp+QCztweq7dp4zBncdDQh/U90bZKuCJ/Fp1U1ervShw3WnWEQt
8jxwmKy6abaVd38PMV4s/KCHOkdp8Hlf9BRUpJVeEXgSYCfOn8J3/yNTd126/+pZ
59vPr5KW7ySaNRB6nJHGDn2Z9j8Z3/VyVOEVqQdZe4O/Ui5GjLIAZHYcSNPYeehu
VsyuLAOQ1xk4meTKCRlb/weWsKh/NEnfVqn3sF/tM+2MR7cwA130A4w=
-----END CERTIFICATE-----

View File

@ -1,35 +0,0 @@
# see "man logrotate" for details
# rotate log files weekly
daily
# keep 4 days worth of backlogs
rotate 4
# create new (empty) log files after rotating old ones
create
# use date as a suffix of the rotated file
dateext
# uncomment this if you want your log files compressed
compress
# RPM packages drop log rotation information into this directory
include /etc/logrotate.d
# no packages own wtmp and btmp -- we'll rotate them here
/var/log/wtmp {
monthly
create 0664 root utmp
minsize 1M
rotate 1
}
/var/log/btmp {
missingok
monthly
create 0600 root utmp
rotate 1
}
# system-specific logs may be also be configured here.

View File

@ -1,13 +0,0 @@
/var/log/cron
/var/log/maillog
/var/log/messages
/var/log/secure
/var/log/spooler
{
missingok
sharedscripts
postrotate
/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true
service datadog-agent restart 2> /dev/null || true
endscript
}

View File

@ -13,46 +13,3 @@
DD_API_KEY: "{{DD_API_KEY}}"
DD_HOST_TAGS: "testnet:{{TESTNET_NAME}},cluster:{{CLUSTER_NAME}}"
- name: Set datadog.yaml config
template: src=datadog.yaml.j2 dest=/etc/datadog-agent/datadog.yaml
notify: restart datadog-agent
- name: Set metrics config
copy: src=conf.d/ dest=/etc/datadog-agent/conf.d/
notify: restart datadog-agent
- name: Disable journald rate-limiting
lineinfile: "dest=/etc/systemd/journald.conf regexp={{item.regexp}} line='{{item.line}}'"
with_items:
- { regexp: "^#RateLimitInterval", line: "RateLimitInterval=0s" }
- { regexp: "^#RateLimitBurst", line: "RateLimitBurst=0" }
- { regexp: "^#SystemMaxFileSize", line: "SystemMaxFileSize=500M" }
notify: restart journald
- name: As long as Datadog does not support journald on RPM-based linux, we enable rsyslog
yum: "name={{item}} state=installed"
with_items:
- rsyslog
- rsyslog-gnutls
#- name: Get DataDog certificate for rsyslog
# get_url: url=https://docs.datadoghq.com/crt/intake.logs.datadoghq.com.crt dest=/etc/ssl/certs/intake.logs.datadoghq.com.crt
- name: Get DataDog certificate for rsyslog
copy: src=intake.logs.datadoghq.com.crt dest=/etc/ssl/certs/intake.logs.datadoghq.com.crt
- name: Add datadog config to rsyslog
template: src=datadog.conf.j2 dest=/etc/rsyslog.d/datadog.conf mode=0600
notify: restart rsyslog
- name: Set logrotate to rotate daily so syslog does not use up all space
copy: src=logrotate.conf dest=/etc/logrotate.conf
- name: Set syslog to restart datadog-agent after logrotate
copy: src=syslog dest=/etc/logrotate.d/syslog
#semanage port -a -t syslog_tls_port_t -p tcp 10516
- name: Enable rsyslog to report to port 10516 in SELinux
seport: ports=10516 proto=tcp reload=yes setype=syslog_tls_port_t state=present
notify: restart rsyslog

View File

@ -1,14 +0,0 @@
$template DatadogFormat,"{{DD_API_KEY}} <%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %app-name% - - - %msg%\n"
$imjournalRatelimitInterval 0
$imjournalRatelimitBurst 0
$DefaultNetstreamDriver gtls
$DefaultNetstreamDriverCAFile /etc/ssl/certs/intake.logs.datadoghq.com.crt
$ActionSendStreamDriver gtls
$ActionSendStreamDriverMode 1
$ActionSendStreamDriverAuthMode x509/name
$ActionSendStreamDriverPermittedPeer *.logs.datadoghq.com
*.* @@intake.logs.datadoghq.com:10516;DatadogFormat

View File

@ -0,0 +1 @@
DAEMON_COREFILE_LIMIT='unlimited'

View File

@ -0,0 +1 @@
DAEMON_COREFILE_LIMIT='unlimited'

View File

@ -0,0 +1,3 @@
kernel.core_uses_pid = 1
kernel.core_pattern = /tmp/core-%e-%s-%u-%g-%p-%t
fs.suid_dumpable = 2

View File

@ -0,0 +1,4 @@
---
- name: reload sysctl
command: "/sbin/sysctl -p"

View File

@ -0,0 +1,9 @@
---
# Based on https://www.cyberciti.biz/tips/linux-core-dumps.html
- name: Copy sysctl and sysconfig files to enable app and daemon core dumps
file: src=. dest=/etc/
notify: reload sysctl
- name: Enable debugging for all apps
lineinfile: create=yes line="DAEMON_COREFILE_LIMIT='unlimited'" path=/etc/sysconfig/init regexp=^DAEMON_COREFILE_LIMIT=

View File

@ -0,0 +1,5 @@
---
- name: reload systemd
systemd: name=gaiad enabled=yes daemon_reload=yes

View File

@ -6,14 +6,21 @@
run_once: true
become: no
- name: Create gaiad user
user: name=gaiad home=/home/gaiad shell=/bin/bash
- name: Copy binary
copy:
src: "{{BINARY}}"
dest: /usr/bin
mode: 0755
- name: Copy service file
copy: src=gaiad.service dest=/etc/systemd/system/gaiad.service mode=0755
notify: reload systemd
- name: Get node ID
command: "cat /etc/gaiad-nodeid"
command: "cat /etc/nodeid"
changed_when: false
register: nodeid

View File

@ -5,8 +5,17 @@
with_items:
- { regexp: "^#RateLimitInterval", line: "RateLimitInterval=0s" }
- { regexp: "^#RateLimitBurst", line: "RateLimitBurst=0" }
- { regexp: "^#SystemMaxFileSize", line: "SystemMaxFileSize=100M" }
- { regexp: "^#SystemMaxUse", line: "SystemMaxUse=500M" }
- { regexp: "^#SystemMaxFiles", line: "SystemMaxFiles=10" }
notify: restart journald
- name: Change logrotate to daily
lineinfile: "dest=/etc/logrotate.conf regexp={{item.regexp}} line='{{item.line}}'"
with_items:
- { regexp: "^weekly", line: "daily" }
- { regexp: "^#compress", line: "compress" }
- name: Create journal directory for permanent logs
file: path=/var/log/journal state=directory
notify: restart journald

View File

@ -0,0 +1,5 @@
---
- name: reload systemd
systemd: name=gaiad enabled=yes daemon_reload=yes

View File

@ -6,14 +6,21 @@
run_once: true
become: no
- name: Create gaiad user
user: name=gaiad home=/home/gaiad shell=/bin/bash
- name: Copy binary
copy:
src: "{{BINARY}}"
dest: /usr/bin
mode: 0755
- name: Copy service file
copy: src=gaiad.service dest=/etc/systemd/system/gaiad.service mode=0755
notify: reload systemd
- name: Get node ID
command: "cat /etc/gaiad-nodeid"
command: "cat /etc/nodeid"
changed_when: false
register: nodeid

View File

@ -0,0 +1,13 @@
init_config:
instances:
- name: gaiad
url: http://localhost:26657/status
timeout: 1
content_match: '"latest_block_height": "0",'
reverse_content_match: true
- name: gaiacli
url: http://localhost:1317/node_version
timeout: 1

View File

@ -2,6 +2,9 @@ init_config:
instances:
- prometheus_url: http://127.0.0.1:26660
namespace: "gaiad"
metrics:
- p2p: *
- go*
- mempool*
- p2p*
- process*
- promhttp*

View File

@ -0,0 +1,5 @@
---
- name: restart datadog-agent
service: name=datadog-agent state=restarted

View File

@ -0,0 +1,10 @@
---
- name: Set datadog.yaml config
template: src=datadog.yaml.j2 dest=/etc/datadog-agent/datadog.yaml
notify: restart datadog-agent
- name: Set metrics config
copy: src=conf.d/ dest=/etc/datadog-agent/conf.d/
notify: restart datadog-agent

View File

@ -28,10 +28,10 @@ api_key: {{DD_API_KEY}}
# Setting this option to "yes" will force the agent to only use TLS 1.2 when
# pushing data to the url specified in "dd_url".
# force_tls_12: no
force_tls_12: yes
# Force the hostname to whatever you want. (default: auto-detected)
# hostname: mymachine.mydomain
hostname: {{inventory_hostname}}
# Make the agent use "hostname -f" on unix-based systems as a last resort
# way of determining the hostname instead of Golang "os.Hostname()"
@ -220,7 +220,7 @@ collect_ec2_tags: true
# Logs agent
#
# Logs agent is disabled by default
logs_enabled: true
#logs_enabled: true
#
# Enable logs collection for all containers, disabled by default
# logs_config:

View File

@ -3,7 +3,7 @@
- name: Copy binary
copy:
src: "{{BINARY}}"
dest: /usr/bin
dest: /usr/bin/gaiad
mode: 0755
notify: restart gaiad

View File

@ -1,13 +0,0 @@
---
# Set the core file size to unlimited to allow the system to generate core dumps
- hosts: all
any_errors_fatal: true
gather_facts: no
tasks:
- name: Set core file size to unlimited to be able to get the core dump on SIGABRT
shell: "ulimit -c unlimited"

View File

@ -0,0 +1,8 @@
---
- hosts: all
any_errors_fatal: true
gather_facts: no
roles:
- set-debug

View File

@ -2,10 +2,12 @@
#GENESISFILE required
#CONFIGFILE required
#BINARY required
- hosts: all
any_errors_fatal: true
gather_facts: no
roles:
- increase-openfiles
- setup-fullnodes

View File

@ -4,5 +4,6 @@
any_errors_fatal: true
gather_facts: no
roles:
- increase-openfiles
- setup-validators

View File

@ -9,7 +9,7 @@
- name: Gather status
uri:
body_format: json
url: "http://{{inventory_hostname}}:26657/status"
url: "http://{{ansible_host}}:26657/status"
register: status
- name: Print status

View File

@ -0,0 +1,10 @@
---
#DD_API_KEY,TESTNET_NAME,CLUSTER_NAME required
- hosts: all
any_errors_fatal: true
gather_facts: no
roles:
- update-datadog-agent

View File

@ -4,13 +4,5 @@
#Usage: terraform.sh <testnet_name> <testnet_node_number>
#Add gaiad node number for remote identification
echo "$2" > /etc/gaiad-nodeid
#Create gaiad user
useradd -m -s /bin/bash gaiad
#Reload services to enable the gaiad service (note that the gaiad binary is not available yet)
systemctl daemon-reload
systemctl enable gaiad
echo "$2" > /etc/nodeid

View File

@ -6,10 +6,16 @@
#Instance Attachment (autoscaling is the future)
resource "aws_lb_target_group_attachment" "lb_attach" {
count = "${var.SERVERS*length(data.aws_availability_zones.zones.names)}"
count = "${var.SERVERS*min(length(data.aws_availability_zones.zones.names),var.max_zones)}"
target_group_arn = "${aws_lb_target_group.lb_target_group.arn}"
target_id = "${element(aws_instance.node.*.id,count.index)}"
port = 80
port = 26657
}
resource "aws_lb_target_group_attachment" "lb_attach_lcd" {
count = "${var.SERVERS*min(length(data.aws_availability_zones.zones.names),var.max_zones)}"
target_group_arn = "${aws_lb_target_group.lb_target_group_lcd.arn}"
target_id = "${element(aws_instance.node.*.id,count.index)}"
port = 1317
}

View File

@ -13,7 +13,7 @@ data "aws_ami" "linux" {
resource "aws_instance" "node" {
# depends_on = ["${element(aws_route_table_association.route_table_association.*,count.index)}"]
count = "${var.SERVERS*length(data.aws_availability_zones.zones.names)}"
count = "${var.SERVERS*min(length(data.aws_availability_zones.zones.names),var.max_zones)}"
ami = "${data.aws_ami.linux.image_id}"
instance_type = "${var.instance_type}"
key_name = "${aws_key_pair.key.key_name}"
@ -33,7 +33,7 @@ resource "aws_instance" "node" {
}
root_block_device {
volume_size = 20
volume_size = 40
}
connection {
@ -47,14 +47,8 @@ resource "aws_instance" "node" {
destination = "/tmp/terraform.sh"
}
provisioner "file" {
source = "files/gaiad.service"
destination = "/tmp/gaiad.service"
}
provisioner "remote-exec" {
inline = [
"sudo cp /tmp/gaiad.service /etc/systemd/system/gaiad.service",
"chmod +x /tmp/terraform.sh",
"sudo /tmp/terraform.sh ${var.name} ${count.index}",
]

View File

@ -1,20 +1,22 @@
resource "aws_lb" "lb" {
name = "${var.name}"
subnets = ["${aws_subnet.subnet.*.id}"]
# security_groups = ["${split(",", var.lb_security_groups)}"]
security_groups = ["${aws_security_group.secgroup.id}"]
tags {
Name = "${var.name}"
}
# access_logs {
# bucket = "${var.s3_bucket}"
# prefix = "ELB-logs"
# prefix = "lblogs"
# }
}
resource "aws_lb_listener" "lb_listener" {
load_balancer_arn = "${aws_lb.lb.arn}"
port = "80"
protocol = "HTTP"
port = "443"
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
certificate_arn = "${var.certificate_arn}"
default_action {
target_group_arn = "${aws_lb_target_group.lb_target_group.arn}"
@ -23,7 +25,6 @@ resource "aws_lb_listener" "lb_listener" {
}
resource "aws_lb_listener_rule" "listener_rule" {
# depends_on = ["aws_lb_target_group.lb_target_group"]
listener_arn = "${aws_lb_listener.lb_listener.arn}"
priority = "100"
action {
@ -38,24 +39,14 @@ resource "aws_lb_listener_rule" "listener_rule" {
resource "aws_lb_target_group" "lb_target_group" {
name = "${var.name}"
port = "80"
port = "26657"
protocol = "HTTP"
vpc_id = "${aws_vpc.vpc.id}"
tags {
name = "${var.name}"
}
# stickiness {
# type = "lb_cookie"
# cookie_duration = 1800
# enabled = "true"
# }
# health_check {
# healthy_threshold = 3
# unhealthy_threshold = 10
# timeout = 5
# interval = 10
# path = "${var.target_group_path}"
# port = "${var.target_group_port}"
# }
health_check {
path = "/health"
}
}

View File

@ -0,0 +1,39 @@
resource "aws_lb_listener" "lb_listener_lcd" {
load_balancer_arn = "${aws_lb.lb.arn}"
port = "1317"
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
certificate_arn = "${var.certificate_arn}"
default_action {
target_group_arn = "${aws_lb_target_group.lb_target_group_lcd.arn}"
type = "forward"
}
}
resource "aws_lb_listener_rule" "listener_rule_lcd" {
listener_arn = "${aws_lb_listener.lb_listener_lcd.arn}"
priority = "100"
action {
type = "forward"
target_group_arn = "${aws_lb_target_group.lb_target_group_lcd.id}"
}
condition {
field = "path-pattern"
values = ["/"]
}
}
resource "aws_lb_target_group" "lb_target_group_lcd" {
name = "${var.name}lcd"
port = "1317"
protocol = "HTTP"
vpc_id = "${aws_vpc.vpc.id}"
tags {
name = "${var.name}"
}
health_check {
path = "/node_version"
}
}

View File

@ -8,9 +8,9 @@ output "instances" {
value = ["${aws_instance.node.*.id}"]
}
output "instances_count" {
value = "${length(aws_instance.node.*)}"
}
#output "instances_count" {
# value = "${length(aws_instance.node.*)}"
#}
// The list of cluster instance public IPs
output "public_ips" {

View File

@ -17,6 +17,11 @@ variable "SERVERS" {
default = "1"
}
variable "max_zones" {
description = "Maximum number of availability zones to use"
default = "1"
}
variable "ssh_private_file" {
description = "SSH private key file to be used to connect to the nodes"
type = "string"
@ -27,3 +32,8 @@ variable "ssh_public_file" {
type = "string"
}
variable "certificate_arn" {
description = "Load-balancer SSL certificate AWS ARN"
type = "string"
}

View File

@ -33,7 +33,7 @@ data "aws_availability_zones" "zones" {
}
resource "aws_subnet" "subnet" {
count = "${length(data.aws_availability_zones.zones.names)}"
count = "${min(length(data.aws_availability_zones.zones.names),var.max_zones)}"
vpc_id = "${aws_vpc.vpc.id}"
availability_zone = "${element(data.aws_availability_zones.zones.names,count.index)}"
cidr_block = "${cidrsubnet(aws_vpc.vpc.cidr_block, 8, count.index)}"
@ -45,7 +45,7 @@ resource "aws_subnet" "subnet" {
}
resource "aws_route_table_association" "route_table_association" {
count = "${length(data.aws_availability_zones.zones.names)}"
count = "${min(length(data.aws_availability_zones.zones.names),var.max_zones)}"
subnet_id = "${element(aws_subnet.subnet.*.id,count.index)}"
route_table_id = "${aws_route_table.route_table.id}"
}
@ -66,8 +66,15 @@ resource "aws_security_group" "secgroup" {
}
ingress {
from_port = 80
to_port = 80
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 1317
to_port = 1317
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}

View File

@ -9,11 +9,16 @@ variable "SERVERS" {
default = "1"
}
variable "MAX_ZONES" {
description = "Maximum number of availability zones to use"
default = "4"
}
#See https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region
#eu-west-3 does not contain CentOS images
variable "REGION" {
description = "AWS Regions"
default = "us-east-2"
default = "us-east-1"
}
variable "SSH_PRIVATE_FILE" {
@ -26,6 +31,11 @@ variable "SSH_PUBLIC_FILE" {
type = "string"
}
variable "CERTIFICATE_ARN" {
description = "Load-balancer certificate AWS ARN"
type = "string"
}
# ap-southeast-1 and ap-southeast-2 does not contain the newer CentOS 1704 image
variable "image" {
description = "AWS image name"
@ -34,7 +44,7 @@ variable "image" {
variable "instance_type" {
description = "AWS instance type"
default = "t2.medium"
default = "t2.large"
}
provider "aws" {
@ -48,7 +58,9 @@ module "nodes" {
instance_type = "${var.instance_type}"
ssh_public_file = "${var.SSH_PUBLIC_FILE}"
ssh_private_file = "${var.SSH_PRIVATE_FILE}"
certificate_arn = "${var.CERTIFICATE_ARN}"
SERVERS = "${var.SERVERS}"
max_zones = "${var.MAX_ZONES}"
}
output "public_ips" {

View File

@ -7,13 +7,5 @@
REGION="$(($2 + 1))"
RNODE="$(($3 + 1))"
ID="$((${REGION} * 100 + ${RNODE}))"
echo "$ID" > /etc/gaiad-nodeid
#Create gaiad user
useradd -m -s /bin/bash gaiad
#Reload services to enable the gaiad service (note that the gaiad binary is not available yet)
systemctl daemon-reload
systemctl enable gaiad
echo "$ID" > /etc/nodeid

View File

@ -43,7 +43,7 @@ variable "image" {
variable "instance_type" {
description = "AWS instance type"
default = "t2.medium"
default = "t2.large"
}
module "nodes-0" {

View File

@ -79,7 +79,7 @@ resource "aws_instance" "node" {
}
root_block_device {
volume_size = 20
volume_size = 40
}
connection {
@ -93,14 +93,8 @@ resource "aws_instance" "node" {
destination = "/tmp/terraform.sh"
}
provisioner "file" {
source = "files/gaiad.service"
destination = "/tmp/gaiad.service"
}
provisioner "remote-exec" {
inline = [
"sudo cp /tmp/gaiad.service /etc/systemd/system/gaiad.service",
"chmod +x /tmp/terraform.sh",
"sudo /tmp/terraform.sh ${var.name} ${var.multiplier} ${count.index}",
]

View File

@ -29,11 +29,6 @@ resource "digitalocean_droplet" "cluster" {
destination = "/tmp/terraform.sh"
}
provisioner "file" {
source = "files/gaiad.service"
destination = "/etc/systemd/system/gaiad.service"
}
provisioner "remote-exec" {
inline = [
"chmod +x /tmp/terraform.sh",

View File

@ -1,17 +0,0 @@
[Unit]
Description=gaiad
Requires=network-online.target
After=network-online.target
[Service]
Restart=on-failure
User=gaiad
Group=gaiad
PermissionsStartOnly=true
ExecStart=/usr/bin/gaiad start
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM
[Install]
WantedBy=multi-user.target

View File

@ -4,16 +4,5 @@
#Usage: terraform.sh <testnet_name> <testnet_node_number>
#Add gaiad node number for remote identification
echo "$2" > /etc/gaiad-nodeid
#Create gaiad user
useradd -m -s /bin/bash gaiad
#cp -r /root/.ssh /home/gaiad/.ssh
#chown -R gaiad.gaiad /home/gaiad/.ssh
#chmod -R 700 /home/gaiad/.ssh
#Reload services to enable the gaiad service (note that the gaiad binary is not available yet)
systemctl daemon-reload
systemctl enable gaiad
echo "$2" > /etc/nodeid

View File

@ -9,6 +9,8 @@ import (
"github.com/cosmos/cosmos-sdk/wire"
tmtypes "github.com/tendermint/tendermint/types"
"io/ioutil"
"path"
)
// ExportCmd dumps app state to JSON.
@ -19,6 +21,21 @@ func ExportCmd(ctx *Context, cdc *wire.Codec, appExporter AppExporter) *cobra.Co
RunE: func(cmd *cobra.Command, args []string) error {
home := viper.GetString("home")
traceStore := viper.GetString(flagTraceStore)
emptyState, err := isEmptyState(home)
if err != nil {
return err
}
if emptyState {
fmt.Println("WARNING: State is not initialized. Returning genesis file.")
genesisFile := path.Join(home, "config", "genesis.json")
genesis, err := ioutil.ReadFile(genesisFile)
if err != nil {
return err
}
fmt.Println(string(genesis))
return nil
}
appState, validators, err := appExporter(home, ctx.Logger, traceStore)
if err != nil {
@ -43,3 +60,12 @@ func ExportCmd(ctx *Context, cdc *wire.Codec, appExporter AppExporter) *cobra.Co
},
}
}
func isEmptyState(home string) (bool, error) {
files, err := ioutil.ReadDir(path.Join(home, "data"))
if err != nil {
return false, err
}
return len(files) == 0, nil
}

53
server/export_test.go Normal file
View File

@ -0,0 +1,53 @@
package server
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/wire"
"github.com/tendermint/tendermint/libs/log"
tcmd "github.com/tendermint/tendermint/cmd/tendermint/commands"
"os"
"bytes"
"io"
"github.com/cosmos/cosmos-sdk/server/mock"
)
func TestEmptyState(t *testing.T) {
defer setupViper(t)()
logger := log.NewNopLogger()
cfg, err := tcmd.ParseConfig()
require.Nil(t, err)
ctx := NewContext(cfg, logger)
cdc := wire.NewCodec()
appInit := AppInit{
AppGenTx: mock.AppGenTx,
AppGenState: mock.AppGenStateEmpty,
}
cmd := InitCmd(ctx, cdc, appInit)
err = cmd.RunE(nil, nil)
require.NoError(t, err)
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
cmd = ExportCmd(ctx, cdc, nil)
err = cmd.RunE(nil, nil)
require.NoError(t, err)
outC := make(chan string)
go func() {
var buf bytes.Buffer
io.Copy(&buf, r)
outC <- buf.String()
}()
w.Close()
os.Stdout = old
out := <-outC
require.Contains(t, out, "WARNING: State is not initialized")
require.Contains(t, out, "genesis_time")
require.Contains(t, out, "chain_id")
require.Contains(t, out, "consensus_params")
require.Contains(t, out, "validators")
require.Contains(t, out, "app_hash")
}

View File

@ -121,9 +121,15 @@ func AppGenState(_ *wire.Codec, _ []json.RawMessage) (appState json.RawMessage,
return
}
// AppGenStateEmpty returns an empty transaction state for mocking.
func AppGenStateEmpty(_ *wire.Codec, _ []json.RawMessage) (appState json.RawMessage, err error) {
appState = json.RawMessage(``)
return
}
// Return a validator, not much else
func AppGenTx(_ *wire.Codec, pk crypto.PubKey, genTxConfig gc.GenTx) (
appGenTx, cliPrint json.RawMessage, validator tmtypes.GenesisValidator, err error) {
appGenTx, cliPrint json.RawMessage, validator tmtypes.GenesisValidator, err error) {
validator = tmtypes.GenesisValidator{
PubKey: pk,

View File

@ -20,7 +20,7 @@ import (
// TestAndRunSingleInputMsgSend tests and runs a single msg send, with one input and one output, where both
// accounts already exist.
func TestAndRunSingleInputMsgSend(mapper auth.AccountMapper) simulation.TestAndRunTx {
func TestAndRunSingleInputMsgSend(mapper auth.AccountMapper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
fromKey := simulation.RandomKey(r, keys)
fromAddr := sdk.AccAddress(fromKey.PubKey().Address())

View File

@ -33,7 +33,7 @@ func TestBankWithRandomMessages(t *testing.T) {
simulation.Simulate(
t, mapp.BaseApp, appStateFn,
[]simulation.TestAndRunTx{
[]simulation.Operation{
TestAndRunSingleInputMsgSend(mapper),
},
[]simulation.RandSetup{},

View File

@ -21,7 +21,7 @@ const (
)
// SimulateMsgSubmitProposal
func SimulateMsgSubmitProposal(k gov.Keeper, sk stake.Keeper) simulation.TestAndRunTx {
func SimulateMsgSubmitProposal(k gov.Keeper, sk stake.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
key := simulation.RandomKey(r, keys)
addr := sdk.AccAddress(key.PubKey().Address())
@ -50,7 +50,7 @@ func SimulateMsgSubmitProposal(k gov.Keeper, sk stake.Keeper) simulation.TestAnd
}
// SimulateMsgDeposit
func SimulateMsgDeposit(k gov.Keeper, sk stake.Keeper) simulation.TestAndRunTx {
func SimulateMsgDeposit(k gov.Keeper, sk stake.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
key := simulation.RandomKey(r, keys)
addr := sdk.AccAddress(key.PubKey().Address())
@ -77,7 +77,7 @@ func SimulateMsgDeposit(k gov.Keeper, sk stake.Keeper) simulation.TestAndRunTx {
}
// SimulateMsgVote
func SimulateMsgVote(k gov.Keeper, sk stake.Keeper) simulation.TestAndRunTx {
func SimulateMsgVote(k gov.Keeper, sk stake.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
key := simulation.RandomKey(r, keys)
addr := sdk.AccAddress(key.PubKey().Address())

View File

@ -55,7 +55,7 @@ func TestGovWithRandomMessages(t *testing.T) {
simulation.Simulate(
t, mapp.BaseApp, appStateFn,
[]simulation.TestAndRunTx{
[]simulation.Operation{
SimulateMsgSubmitProposal(govKeeper, stakeKeeper),
SimulateMsgDeposit(govKeeper, stakeKeeper),
SimulateMsgVote(govKeeper, stakeKeeper),

View File

@ -3,6 +3,7 @@ package simulation
import (
"encoding/json"
"fmt"
"math"
"math/rand"
"sort"
"testing"
@ -20,7 +21,7 @@ import (
// Simulate tests application by sending random messages.
func Simulate(
t *testing.T, app *baseapp.BaseApp, appStateFn func(r *rand.Rand, keys []crypto.PrivKey, accs []sdk.AccAddress) json.RawMessage, ops []TestAndRunTx, setups []RandSetup,
t *testing.T, app *baseapp.BaseApp, appStateFn func(r *rand.Rand, keys []crypto.PrivKey, accs []sdk.AccAddress) json.RawMessage, ops []Operation, setups []RandSetup,
invariants []Invariant, numBlocks int, blockSize int, commit bool,
) {
time := time.Now().UnixNano()
@ -30,12 +31,20 @@ func Simulate(
// SimulateFromSeed tests an application by running the provided
// operations, testing the provided invariants, but using the provided seed.
func SimulateFromSeed(
t *testing.T, app *baseapp.BaseApp, appStateFn func(r *rand.Rand, keys []crypto.PrivKey, accs []sdk.AccAddress) json.RawMessage, seed int64, ops []TestAndRunTx, setups []RandSetup,
t *testing.T, app *baseapp.BaseApp, appStateFn func(r *rand.Rand, keys []crypto.PrivKey, accs []sdk.AccAddress) json.RawMessage, seed int64, ops []Operation, setups []RandSetup,
invariants []Invariant, numBlocks int, blockSize int, commit bool,
) {
log := fmt.Sprintf("Starting SimulateFromSeed with randomness created with seed %d", int(seed))
fmt.Printf("%s\n", log)
r := rand.New(rand.NewSource(seed))
unixTime := r.Int63n(int64(math.Pow(2, 40)))
// Set the timestamp for simulation
timestamp := time.Unix(unixTime, 0)
log = fmt.Sprintf("%s\nStarting the simulation from time %v, unixtime %v", log, timestamp.UTC().Format(time.UnixDate), timestamp.Unix())
fmt.Printf("%s\n", log)
timeDiff := maxTimePerBlock - minTimePerBlock
keys, accs := mock.GeneratePrivKeyAddressPairsFromRand(r, numKeys)
// Setup event stats
@ -45,9 +54,6 @@ func SimulateFromSeed(
events[what]++
}
timestamp := time.Unix(0, 0)
timeDiff := maxTimePerBlock - minTimePerBlock
res := app.InitChain(abci.RequestInitChain{AppStateBytes: appStateFn(r, keys, accs)})
validators := make(map[string]mockValidator)
for _, validator := range res.Validators {

View File

@ -11,10 +11,15 @@ import (
)
type (
// TestAndRunTx produces a fuzzed transaction, and ensures the state
// transition was as expected. It returns a descriptive message "action"
// about what this fuzzed tx actually did, for ease of debugging.
TestAndRunTx func(
// Operation runs a state machine transition,
// and ensures the transition happened as expected.
// The operation could be running and testing a fuzzed transaction,
// or doing the same for a message.
//
// For ease of debugging,
// an operation returns a descriptive message "action",
// which details what this fuzzed state machine transition actually did.
Operation func(
t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,
privKeys []crypto.PrivKey, log string, event func(string),
) (action string, err sdk.Error)

View File

@ -16,7 +16,7 @@ import (
)
// SimulateMsgUnjail
func SimulateMsgUnjail(k slashing.Keeper) simulation.TestAndRunTx {
func SimulateMsgUnjail(k slashing.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
key := simulation.RandomKey(r, keys)
address := sdk.AccAddress(key.PubKey().Address())

View File

@ -240,7 +240,7 @@ func (k Keeper) Delegate(ctx sdk.Context, delegatorAddr sdk.AccAddress, bondAmt
}
pool := k.GetPool(ctx)
validator, pool, newShares = validator.AddTokensFromDel(pool, bondAmt.Amount.Int64())
validator, pool, newShares = validator.AddTokensFromDel(pool, bondAmt.Amount)
delegation.Shares = delegation.Shares.Add(newShares)
// Update delegation height

View File

@ -16,7 +16,7 @@ func TestDelegation(t *testing.T) {
pool := keeper.GetPool(ctx)
//construct the validators
amts := []int64{9, 8, 7}
amts := []sdk.Int{sdk.NewInt(9), sdk.NewInt(8), sdk.NewInt(7)}
var validators [3]types.Validator
for i, amt := range amts {
validators[i] = types.NewValidator(addrVals[i], PKs[i], types.Description{})
@ -146,7 +146,7 @@ func TestUnbondDelegation(t *testing.T) {
//create a validator and a delegator to that validator
validator := types.NewValidator(addrVals[0], PKs[0], types.Description{})
validator, pool, issuedShares := validator.AddTokensFromDel(pool, 10)
validator, pool, issuedShares := validator.AddTokensFromDel(pool, sdk.NewInt(10))
require.Equal(t, int64(10), issuedShares.RoundInt64())
keeper.SetPool(ctx, pool)
validator = keeper.UpdateValidator(ctx, validator)

View File

@ -24,7 +24,7 @@ func setupHelper(t *testing.T, amt int64) (sdk.Context, Keeper, types.Params) {
// add numVals validators
for i := 0; i < numVals; i++ {
validator := types.NewValidator(addrVals[i], PKs[i], types.Description{})
validator, pool, _ = validator.AddTokensFromDel(pool, amt)
validator, pool, _ = validator.AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
validator = keeper.UpdateValidator(ctx, validator)
keeper.SetValidatorByPubKeyIndex(ctx, validator)

View File

@ -18,7 +18,7 @@ func TestSetValidator(t *testing.T) {
// test how the validator is set from a purely unbonbed pool
validator := types.NewValidator(addrVals[0], PKs[0], types.Description{})
validator, pool, _ = validator.AddTokensFromDel(pool, 10)
validator, pool, _ = validator.AddTokensFromDel(pool, sdk.NewInt(10))
require.Equal(t, sdk.Unbonded, validator.Status)
assert.True(sdk.DecEq(t, sdk.NewDec(10), validator.Tokens))
assert.True(sdk.DecEq(t, sdk.NewDec(10), validator.DelegatorShares))
@ -61,7 +61,7 @@ func TestUpdateValidatorByPowerIndex(t *testing.T) {
// add a validator
validator := types.NewValidator(addrVals[0], PKs[0], types.Description{})
validator, pool, delSharesCreated := validator.AddTokensFromDel(pool, 100)
validator, pool, delSharesCreated := validator.AddTokensFromDel(pool, sdk.NewInt(100))
require.Equal(t, sdk.Unbonded, validator.Status)
require.Equal(t, int64(100), validator.Tokens.RoundInt64())
keeper.SetPool(ctx, pool)
@ -112,7 +112,7 @@ func TestUpdateBondedValidatorsDecreaseCliff(t *testing.T) {
val := types.NewValidator(Addrs[i], PKs[i], types.Description{Moniker: moniker})
val.BondHeight = int64(i)
val.BondIntraTxCounter = int16(i)
val, pool, _ = val.AddTokensFromDel(pool, int64((i+1)*10))
val, pool, _ = val.AddTokensFromDel(pool, sdk.NewInt(int64((i+1)*10)))
keeper.SetPool(ctx, pool)
val = keeper.UpdateValidator(ctx, val)
@ -177,7 +177,7 @@ func TestCliffValidatorChange(t *testing.T) {
val := types.NewValidator(Addrs[i], PKs[i], types.Description{Moniker: moniker})
val.BondHeight = int64(i)
val.BondIntraTxCounter = int16(i)
val, pool, _ = val.AddTokensFromDel(pool, int64((i+1)*10))
val, pool, _ = val.AddTokensFromDel(pool, sdk.NewInt(int64((i+1)*10)))
keeper.SetPool(ctx, pool)
val = keeper.UpdateValidator(ctx, val)
@ -186,7 +186,7 @@ func TestCliffValidatorChange(t *testing.T) {
// add a large amount of tokens to current cliff validator
currCliffVal := validators[numVals-maxVals]
currCliffVal, pool, _ = currCliffVal.AddTokensFromDel(pool, 200)
currCliffVal, pool, _ = currCliffVal.AddTokensFromDel(pool, sdk.NewInt(200))
keeper.SetPool(ctx, pool)
currCliffVal = keeper.UpdateValidator(ctx, currCliffVal)
@ -199,7 +199,7 @@ func TestCliffValidatorChange(t *testing.T) {
require.Equal(t, GetValidatorsByPowerIndexKey(newCliffVal, pool), cliffPower)
// add small amount of tokens to new current cliff validator
newCliffVal, pool, _ = newCliffVal.AddTokensFromDel(pool, 1)
newCliffVal, pool, _ = newCliffVal.AddTokensFromDel(pool, sdk.NewInt(1))
keeper.SetPool(ctx, pool)
newCliffVal = keeper.UpdateValidator(ctx, newCliffVal)
@ -209,7 +209,7 @@ func TestCliffValidatorChange(t *testing.T) {
require.Equal(t, GetValidatorsByPowerIndexKey(newCliffVal, pool), cliffPower)
// add enough power to cliff validator to be equal in rank to next validator
newCliffVal, pool, _ = newCliffVal.AddTokensFromDel(pool, 9)
newCliffVal, pool, _ = newCliffVal.AddTokensFromDel(pool, sdk.NewInt(9))
keeper.SetPool(ctx, pool)
newCliffVal = keeper.UpdateValidator(ctx, newCliffVal)
@ -229,7 +229,7 @@ func TestSlashToZeroPowerRemoved(t *testing.T) {
// add a validator
validator := types.NewValidator(addrVals[0], PKs[0], types.Description{})
validator, pool, _ = validator.AddTokensFromDel(pool, 100)
validator, pool, _ = validator.AddTokensFromDel(pool, sdk.NewInt(100))
require.Equal(t, sdk.Unbonded, validator.Status)
require.Equal(t, int64(100), validator.Tokens.RoundInt64())
keeper.SetPool(ctx, pool)
@ -256,7 +256,7 @@ func TestValidatorBasics(t *testing.T) {
validators[i] = types.NewValidator(addrVals[i], PKs[i], types.Description{})
validators[i].Status = sdk.Unbonded
validators[i].Tokens = sdk.ZeroDec()
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
}
assert.True(sdk.DecEq(t, sdk.NewDec(9), validators[0].Tokens))
@ -482,7 +482,7 @@ func TestGetValidatorsEdgeCases(t *testing.T) {
pool := keeper.GetPool(ctx)
moniker := fmt.Sprintf("val#%d", int64(i))
validators[i] = types.NewValidator(Addrs[i], PKs[i], types.Description{Moniker: moniker})
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
validators[i] = keeper.UpdateValidator(ctx, validators[i])
}
@ -497,7 +497,7 @@ func TestGetValidatorsEdgeCases(t *testing.T) {
assert.True(ValEq(t, validators[3], resValidators[1]))
pool := keeper.GetPool(ctx)
validators[0], pool, _ = validators[0].AddTokensFromDel(pool, 500)
validators[0], pool, _ = validators[0].AddTokensFromDel(pool, sdk.NewInt(500))
keeper.SetPool(ctx, pool)
validators[0] = keeper.UpdateValidator(ctx, validators[0])
resValidators = keeper.GetValidatorsByPower(ctx)
@ -514,7 +514,7 @@ func TestGetValidatorsEdgeCases(t *testing.T) {
validators[3], found = keeper.GetValidator(ctx, validators[3].Operator)
require.True(t, found)
validators[3], pool, _ = validators[3].AddTokensFromDel(pool, 1)
validators[3], pool, _ = validators[3].AddTokensFromDel(pool, sdk.NewInt(1))
keeper.SetPool(ctx, pool)
validators[3] = keeper.UpdateValidator(ctx, validators[3])
resValidators = keeper.GetValidatorsByPower(ctx)
@ -532,7 +532,7 @@ func TestGetValidatorsEdgeCases(t *testing.T) {
assert.True(ValEq(t, validators[2], resValidators[1]))
// validator 4 does not get spot back
validators[3], pool, _ = validators[3].AddTokensFromDel(pool, 200)
validators[3], pool, _ = validators[3].AddTokensFromDel(pool, sdk.NewInt(200))
keeper.SetPool(ctx, pool)
validators[3] = keeper.UpdateValidator(ctx, validators[3])
resValidators = keeper.GetValidatorsByPower(ctx)
@ -559,9 +559,9 @@ func TestValidatorBondHeight(t *testing.T) {
validators[1] = types.NewValidator(Addrs[1], PKs[1], types.Description{})
validators[2] = types.NewValidator(Addrs[2], PKs[2], types.Description{})
validators[0], pool, _ = validators[0].AddTokensFromDel(pool, 200)
validators[1], pool, _ = validators[1].AddTokensFromDel(pool, 100)
validators[2], pool, _ = validators[2].AddTokensFromDel(pool, 100)
validators[0], pool, _ = validators[0].AddTokensFromDel(pool, sdk.NewInt(200))
validators[1], pool, _ = validators[1].AddTokensFromDel(pool, sdk.NewInt(100))
validators[2], pool, _ = validators[2].AddTokensFromDel(pool, sdk.NewInt(100))
keeper.SetPool(ctx, pool)
validators[0] = keeper.UpdateValidator(ctx, validators[0])
@ -579,8 +579,8 @@ func TestValidatorBondHeight(t *testing.T) {
assert.True(ValEq(t, validators[0], resValidators[0]))
assert.True(ValEq(t, validators[1], resValidators[1]))
validators[1], pool, _ = validators[1].AddTokensFromDel(pool, 50)
validators[2], pool, _ = validators[2].AddTokensFromDel(pool, 50)
validators[1], pool, _ = validators[1].AddTokensFromDel(pool, sdk.NewInt(50))
validators[2], pool, _ = validators[2].AddTokensFromDel(pool, sdk.NewInt(50))
keeper.SetPool(ctx, pool)
validators[2] = keeper.UpdateValidator(ctx, validators[2])
resValidators = keeper.GetValidatorsByPower(ctx)
@ -603,7 +603,7 @@ func TestFullValidatorSetPowerChange(t *testing.T) {
for i, amt := range amts {
pool := keeper.GetPool(ctx)
validators[i] = types.NewValidator(Addrs[i], PKs[i], types.Description{})
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
keeper.UpdateValidator(ctx, validators[i])
}
@ -624,7 +624,7 @@ func TestFullValidatorSetPowerChange(t *testing.T) {
// test a swap in voting power
pool := keeper.GetPool(ctx)
validators[0], pool, _ = validators[0].AddTokensFromDel(pool, 600)
validators[0], pool, _ = validators[0].AddTokensFromDel(pool, sdk.NewInt(600))
keeper.SetPool(ctx, pool)
validators[0] = keeper.UpdateValidator(ctx, validators[0])
resValidators = keeper.GetValidatorsByPower(ctx)
@ -642,7 +642,7 @@ func TestClearTendermintUpdates(t *testing.T) {
for i, amt := range amts {
pool := keeper.GetPool(ctx)
validators[i] = types.NewValidator(Addrs[i], PKs[i], types.Description{})
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
keeper.UpdateValidator(ctx, validators[i])
}
@ -662,7 +662,7 @@ func TestGetTendermintUpdatesAllNone(t *testing.T) {
for i, amt := range amts {
pool := keeper.GetPool(ctx)
validators[i] = types.NewValidator(Addrs[i], PKs[i], types.Description{})
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
}
@ -701,7 +701,7 @@ func TestGetTendermintUpdatesIdentical(t *testing.T) {
for i, amt := range amts {
pool := keeper.GetPool(ctx)
validators[i] = types.NewValidator(Addrs[i], PKs[i], types.Description{})
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
}
validators[0] = keeper.UpdateValidator(ctx, validators[0])
@ -724,7 +724,7 @@ func TestGetTendermintUpdatesSingleValueChange(t *testing.T) {
for i, amt := range amts {
pool := keeper.GetPool(ctx)
validators[i] = types.NewValidator(Addrs[i], PKs[i], types.Description{})
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
}
validators[0] = keeper.UpdateValidator(ctx, validators[0])
@ -752,7 +752,7 @@ func TestGetTendermintUpdatesMultipleValueChange(t *testing.T) {
for i, amt := range amts {
pool := keeper.GetPool(ctx)
validators[i] = types.NewValidator(Addrs[i], PKs[i], types.Description{})
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
}
validators[0] = keeper.UpdateValidator(ctx, validators[0])
@ -763,8 +763,8 @@ func TestGetTendermintUpdatesMultipleValueChange(t *testing.T) {
// test multiple value change
// tendermintUpdate set: {c1, c3} -> {c1', c3'}
pool := keeper.GetPool(ctx)
validators[0], pool, _ = validators[0].AddTokensFromDel(pool, 190)
validators[1], pool, _ = validators[1].AddTokensFromDel(pool, 80)
validators[0], pool, _ = validators[0].AddTokensFromDel(pool, sdk.NewInt(190))
validators[1], pool, _ = validators[1].AddTokensFromDel(pool, sdk.NewInt(80))
keeper.SetPool(ctx, pool)
validators[0] = keeper.UpdateValidator(ctx, validators[0])
validators[1] = keeper.UpdateValidator(ctx, validators[1])
@ -783,7 +783,7 @@ func TestGetTendermintUpdatesInserted(t *testing.T) {
for i, amt := range amts {
pool := keeper.GetPool(ctx)
validators[i] = types.NewValidator(Addrs[i], PKs[i], types.Description{})
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
}
validators[0] = keeper.UpdateValidator(ctx, validators[0])
@ -826,7 +826,7 @@ func TestGetTendermintUpdatesWithCliffValidator(t *testing.T) {
for i, amt := range amts {
pool := keeper.GetPool(ctx)
validators[i] = types.NewValidator(Addrs[i], PKs[i], types.Description{})
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
}
validators[0] = keeper.UpdateValidator(ctx, validators[0])
@ -846,7 +846,7 @@ func TestGetTendermintUpdatesWithCliffValidator(t *testing.T) {
require.Equal(t, 0, len(keeper.GetTendermintUpdates(ctx)))
pool := keeper.GetPool(ctx)
validators[2], pool, _ = validators[2].AddTokensFromDel(pool, 10)
validators[2], pool, _ = validators[2].AddTokensFromDel(pool, sdk.NewInt(10))
keeper.SetPool(ctx, pool)
validators[2] = keeper.UpdateValidator(ctx, validators[2])
@ -864,7 +864,7 @@ func TestGetTendermintUpdatesPowerDecrease(t *testing.T) {
for i, amt := range amts {
pool := keeper.GetPool(ctx)
validators[i] = types.NewValidator(Addrs[i], PKs[i], types.Description{})
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, amt)
validators[i], pool, _ = validators[i].AddTokensFromDel(pool, sdk.NewInt(amt))
keeper.SetPool(ctx, pool)
}
validators[0] = keeper.UpdateValidator(ctx, validators[0])

View File

@ -18,7 +18,7 @@ import (
)
// SimulateMsgCreateValidator
func SimulateMsgCreateValidator(m auth.AccountMapper, k stake.Keeper) simulation.TestAndRunTx {
func SimulateMsgCreateValidator(m auth.AccountMapper, k stake.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,
keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
@ -58,7 +58,7 @@ func SimulateMsgCreateValidator(m auth.AccountMapper, k stake.Keeper) simulation
}
// SimulateMsgEditValidator
func SimulateMsgEditValidator(k stake.Keeper) simulation.TestAndRunTx {
func SimulateMsgEditValidator(k stake.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,
keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
@ -89,7 +89,7 @@ func SimulateMsgEditValidator(k stake.Keeper) simulation.TestAndRunTx {
}
// SimulateMsgDelegate
func SimulateMsgDelegate(m auth.AccountMapper, k stake.Keeper) simulation.TestAndRunTx {
func SimulateMsgDelegate(m auth.AccountMapper, k stake.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,
keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
@ -124,7 +124,7 @@ func SimulateMsgDelegate(m auth.AccountMapper, k stake.Keeper) simulation.TestAn
}
// SimulateMsgBeginUnbonding
func SimulateMsgBeginUnbonding(m auth.AccountMapper, k stake.Keeper) simulation.TestAndRunTx {
func SimulateMsgBeginUnbonding(m auth.AccountMapper, k stake.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,
keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
@ -159,7 +159,7 @@ func SimulateMsgBeginUnbonding(m auth.AccountMapper, k stake.Keeper) simulation.
}
// SimulateMsgCompleteUnbonding
func SimulateMsgCompleteUnbonding(k stake.Keeper) simulation.TestAndRunTx {
func SimulateMsgCompleteUnbonding(k stake.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,
keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
@ -185,7 +185,7 @@ func SimulateMsgCompleteUnbonding(k stake.Keeper) simulation.TestAndRunTx {
}
// SimulateMsgBeginRedelegate
func SimulateMsgBeginRedelegate(m auth.AccountMapper, k stake.Keeper) simulation.TestAndRunTx {
func SimulateMsgBeginRedelegate(m auth.AccountMapper, k stake.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,
keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {
@ -224,7 +224,7 @@ func SimulateMsgBeginRedelegate(m auth.AccountMapper, k stake.Keeper) simulation
}
// SimulateMsgCompleteRedelegate
func SimulateMsgCompleteRedelegate(k stake.Keeper) simulation.TestAndRunTx {
func SimulateMsgCompleteRedelegate(k stake.Keeper) simulation.Operation {
return func(t *testing.T, r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,
keys []crypto.PrivKey, log string, event func(string)) (action string, err sdk.Error) {

View File

@ -44,7 +44,7 @@ func TestStakeWithRandomMessages(t *testing.T) {
simulation.Simulate(
t, mapp.BaseApp, appStateFn,
[]simulation.TestAndRunTx{
[]simulation.Operation{
SimulateMsgCreateValidator(mapper, stakeKeeper),
SimulateMsgEditValidator(stakeKeeper),
SimulateMsgDelegate(mapper, stakeKeeper),

View File

@ -371,11 +371,11 @@ func (v Validator) RemoveTokens(pool Pool, tokens sdk.Dec) (Validator, Pool) {
//_________________________________________________________________________________________________________
// AddTokensFromDel adds tokens to a validator
func (v Validator) AddTokensFromDel(pool Pool, amount int64) (Validator, Pool, sdk.Dec) {
func (v Validator) AddTokensFromDel(pool Pool, amount sdk.Int) (Validator, Pool, sdk.Dec) {
// bondedShare/delegatedShare
exRate := v.DelegatorShareExRate()
amountDec := sdk.NewDec(amount)
amountDec := sdk.NewDecFromInt(amount)
if v.Status == sdk.Bonded {
pool = pool.looseTokensToBonded(amountDec)

View File

@ -109,7 +109,7 @@ func TestAddTokensValidatorBonded(t *testing.T) {
pool.LooseTokens = sdk.NewDec(10)
validator := NewValidator(addr1, pk1, Description{})
validator, pool = validator.UpdateStatus(pool, sdk.Bonded)
validator, pool, delShares := validator.AddTokensFromDel(pool, 10)
validator, pool, delShares := validator.AddTokensFromDel(pool, sdk.NewInt(10))
require.Equal(t, sdk.OneDec(), validator.DelegatorShareExRate())
@ -122,7 +122,7 @@ func TestAddTokensValidatorUnbonding(t *testing.T) {
pool.LooseTokens = sdk.NewDec(10)
validator := NewValidator(addr1, pk1, Description{})
validator, pool = validator.UpdateStatus(pool, sdk.Unbonding)
validator, pool, delShares := validator.AddTokensFromDel(pool, 10)
validator, pool, delShares := validator.AddTokensFromDel(pool, sdk.NewInt(10))
require.Equal(t, sdk.OneDec(), validator.DelegatorShareExRate())
@ -136,7 +136,7 @@ func TestAddTokensValidatorUnbonded(t *testing.T) {
pool.LooseTokens = sdk.NewDec(10)
validator := NewValidator(addr1, pk1, Description{})
validator, pool = validator.UpdateStatus(pool, sdk.Unbonded)
validator, pool, delShares := validator.AddTokensFromDel(pool, 10)
validator, pool, delShares := validator.AddTokensFromDel(pool, sdk.NewInt(10))
require.Equal(t, sdk.OneDec(), validator.DelegatorShareExRate())
@ -206,7 +206,7 @@ func TestUpdateStatus(t *testing.T) {
pool.LooseTokens = sdk.NewDec(100)
validator := NewValidator(addr1, pk1, Description{})
validator, pool, _ = validator.AddTokensFromDel(pool, 100)
validator, pool, _ = validator.AddTokensFromDel(pool, sdk.NewInt(100))
require.Equal(t, sdk.Unbonded, validator.Status)
require.Equal(t, int64(100), validator.Tokens.RoundInt64())
require.Equal(t, int64(0), pool.BondedTokens.RoundInt64())
@ -243,7 +243,7 @@ func TestPossibleOverflow(t *testing.T) {
}
tokens := int64(71)
msg := fmt.Sprintf("validator %#v", validator)
newValidator, _, _ := validator.AddTokensFromDel(pool, tokens)
newValidator, _, _ := validator.AddTokensFromDel(pool, sdk.NewInt(tokens))
msg = fmt.Sprintf("Added %d tokens to %s", tokens, msg)
require.False(t, newValidator.DelegatorShareExRate().LT(sdk.ZeroDec()),