Compare commits

...

325 Commits

Author SHA1 Message Date
sevenzing cc9c155391
Running Nginx with the ava client in the same docker container 2020-07-19 14:17:48 +03:00
holisticode 578cc77195
Merge pull request #151 from moreati/ansible-service
ansible: Install AVA as a systemd service
2020-07-14 17:32:51 -05:00
holisticode 658b6f080b
Merge branch 'master' into ansible-service 2020-07-13 14:05:55 -05:00
Stephen Buttolph 634a4d0ee5
Merge pull request #257 from ava-labs/improve-apis
add isBootstrapped method; handle errors when creating APIs
2020-07-03 17:29:52 -04:00
StephenButtolph 18350cf3e3 nit cleanup 2020-07-03 17:07:39 -04:00
Dan Laine a6317bd60f fix nil pointer error 2020-07-03 15:18:39 -04:00
Dan Laine fa4cd10efe address PR comments 2020-07-03 13:45:54 -04:00
Dan Laine d92420d4f4 fix typo causing infinite loop 2020-06-30 17:13:06 -04:00
Dan Laine 1d2e1eb00e style 2020-06-30 16:55:05 -04:00
Dan Laine d3a4dcffef add comment; handle error on health check registration 2020-06-30 16:51:41 -04:00
Dan Laine 1be948f38c add health check to ensure default subnet is bootstrapped. add monotonicHealthCheck type 2020-06-30 14:31:30 -04:00
Dan Laine efaf2f147a change Check to an interface and create implementing type check 2020-06-30 13:28:56 -04:00
Dan Laine 5d7e62f7c7 Merge branch 'improve-apis' into improve-health-service 2020-06-29 18:30:55 -04:00
Dan Laine 628e112570 Merge branch 'master' into improve-health-service 2020-06-29 18:11:29 -04:00
Dan Laine ebf1ae152b grab context of chain before calling isBootstrapped() to avoid race condition 2020-06-29 18:07:48 -04:00
Dan Laine 369666a0f8 Merge branch 'master' of github.com:ava-labs/gecko-internal 2020-06-25 10:30:45 -04:00
Dan Laine 76d82288e1 add isBootstrapped to mockManager 2020-06-24 18:28:33 -04:00
Dan Laine fde0ce7327 add isBootstrapped method to info API 2020-06-24 18:21:09 -04:00
Dan Laine 894c88e492 Merge remote-tracking branch 'origin/master' into improve-health-service 2020-06-24 16:51:23 -04:00
Dan Laine c4605b2f2b add IsBootstrapped method to engine and chain manager 2020-06-24 16:49:11 -04:00
Stephen Buttolph e30f00c0f2
Merge pull request #256 from StephenButtolph/version-bump-0.5.7
Version bump 0.5.7
2020-06-24 14:53:45 -04:00
Dan Laine 4c75989056 don't ignore errors when adding routes; improve logging 2020-06-24 13:27:10 -04:00
Stephen Buttolph c6d3d6d68c
Merge pull request #123 from ava-labs/nat-errors
added error reporting to nat.Map
2020-06-24 12:39:37 -04:00
Dan Laine 3ca3a7377a return 503 rather than 500 when unhealthy 2020-06-24 12:28:16 -04:00
Stephen Buttolph 50b28ed7f3
Merge pull request #121 from ava-labs/lower-log-level
Lower log level
2020-06-24 12:05:44 -04:00
Stephen Buttolph 6791f3489e
Merge pull request #120 from ava-labs/bootstrap-comments
Add comments
2020-06-24 12:05:06 -04:00
StephenButtolph 481f3c41ec removed duplicated call 2020-06-24 12:01:45 -04:00
StephenButtolph eefaed4b1a added error reporting to nat.Map 2020-06-24 11:41:22 -04:00
Dan Laine 5c618d6c1b lower log level 2020-06-24 10:09:36 -04:00
Dan Laine c0629af46e add comments 2020-06-24 09:51:34 -04:00
StephenButtolph 63ea29064d Added some more numbers to interesting logs in bootstrapping 2020-06-23 21:07:35 -04:00
Stephen Buttolph 1f01d43102
Merge pull request #119 from ava-labs/restrict-version-logging
only log to info based on the beacons
2020-06-23 20:59:55 -04:00
StephenButtolph a61d1638a3 only log to info based on the beacons 2020-06-23 20:58:35 -04:00
StephenButtolph 6ba9d2264d version bump to 0.5.7 2020-06-23 20:29:03 -04:00
Stephen Buttolph 283824972d
Merge pull request #71 from ava-labs/upnp
NAT Traversal
2020-06-23 20:27:50 -04:00
StephenButtolph 2e218ea508 merged 2020-06-23 20:20:57 -04:00
Stephen Buttolph 49a5571758
Merge pull request #116 from ava-labs/c-chain-next
Some minor improvements for C-Chain.
2020-06-23 20:15:04 -04:00
StephenButtolph 0cb534031a Merge branch 'denali' into c-chain-next 2020-06-23 20:12:17 -04:00
Stephen Buttolph 5361fb796b
Merge pull request #99 from ava-labs/limit-outstanding-requests
Limit # outstanding GetAncestors to 8; reduce MaxTimeFetchingAncestors
2020-06-23 20:02:29 -04:00
StephenButtolph 26edbc5e6e cleaned up avalanche bootstrapping 2020-06-23 19:57:44 -04:00
StephenButtolph 16f006edc9 Removed no longer upheld invariant 2020-06-23 19:43:03 -04:00
StephenButtolph 8c7934515c removed mutually recursive functions for fetching 2020-06-23 19:41:22 -04:00
StephenButtolph 561e021e67 Merge branch 'denali' into limit-outstanding-requests 2020-06-23 19:19:19 -04:00
Stephen Buttolph f701b9769c
Merge pull request #118 from ava-labs/support-apt-plugins
added local path to plugin
2020-06-23 18:51:08 -04:00
Determinant d40fbe8f75 improve plugin logging 2020-06-23 18:33:57 -04:00
StephenButtolph 1d4c368462 added local path to plugin 2020-06-23 18:23:22 -04:00
Stephen Buttolph 3609a73073
Merge pull request #100 from ava-labs/backwards-compatible-api
Backwards compatible api
2020-06-23 17:48:36 -04:00
StephenButtolph 737430af26 merged 2020-06-23 17:48:02 -04:00
Stephen Buttolph f4bbaa09b4
Merge pull request #117 from ava-labs/enable-keystore
Enable keystore by default
2020-06-23 17:39:17 -04:00
Stephen Buttolph 18c8f949a7
Merge pull request #115 from ava-labs/memory-improvements
Memory improvements
2020-06-23 17:37:54 -04:00
Determinant 7591e93e08 remove go-plugin logging; use coreth 0.2.5 2020-06-23 17:36:20 -04:00
Dan Laine 3d374a73db enable keystore by default 2020-06-23 17:30:45 -04:00
Dan Laine fa11fecbb0 pre-allocate map capacity in consensus 2020-06-23 17:15:25 -04:00
Dan Laine 875b2d0cab remove errant newline 2020-06-23 16:54:25 -04:00
Dan Laine 8ce7bda92a cleanup 2020-06-23 16:54:03 -04:00
Dan Laine 6c6136d551 only downsize underlying arrays if they're much too large 2020-06-23 16:44:02 -04:00
Dan Laine 7f5693dfd3 reduce MaxTimeFetchingAncestors from 100ms to 50ms 2020-06-23 15:08:15 -04:00
Dan Laine 55079aa893 add CappedList for ids.Set and use it in fetchANeededVtx 2020-06-23 15:01:55 -04:00
Dan Laine 19a3f69e99 Merge branch 'duplicated-writes' into memory-improvements 2020-06-23 13:10:01 -04:00
Dan Laine f92fa88d24 commit db after parsing tx to avoid memory leak 2020-06-23 13:04:10 -04:00
Dan Laine 998f4bff40 add comments; remove unnceccessary batch write; avoid possible memory leak; reset batch after write 2020-06-23 13:03:23 -04:00
StephenButtolph e7bea2c16f Merge branch 'master' into denali 2020-06-23 12:46:53 -04:00
StephenButtolph b42e4897ed Merge branch 'master' of github.com:ava-labs/gecko 2020-06-23 12:46:34 -04:00
Stephen Buttolph df107ba871
Merge pull request #253 from kurtosis-tech/master
Adding E2E Tests to CircleCI
2020-06-23 12:45:48 -04:00
Stephen Buttolph cb65fca7d8
Merge pull request #110 from ava-labs/api-security
API security improvements
2020-06-22 19:08:01 -04:00
StephenButtolph 7ef37af0d6 changed test to enforce abortions 2020-06-22 18:14:35 -04:00
StephenButtolph 5b6debbaba added regression test 2020-06-22 18:08:20 -04:00
StephenButtolph 7bc66938da Merge branch 'denali' into duplicated-writes 2020-06-22 17:59:09 -04:00
Stephen Buttolph c3c10a0a81
Merge pull request #112 from ava-labs/handler-improvements
handler engine gets/sets
2020-06-22 17:57:51 -04:00
Stephen Buttolph 6664c9bd20
Merge pull request #108 from ava-labs/tighten-cert-permissions
make staking cert/key read-only
2020-06-22 17:40:10 -04:00
Dan Laine c9aa8eedc2 pre-allocate arrays 2020-06-22 16:50:31 -04:00
Dan Laine fc15e3cfe6 prevent potential memory leaks 2020-06-22 16:35:42 -04:00
StephenButtolph 473bef24b1 removed duplicated batch writes, fixed tests 2020-06-22 15:50:52 -04:00
StephenButtolph 3a854ebdec handler engine gets/sets 2020-06-22 15:20:35 -04:00
Dan Laine fc40ad802f lock, mem and CPU profiles write to a fixed filename 2020-06-22 13:18:14 -04:00
Dan Laine c7356a581d open HTTP port iff HTTP server not listening on localhost 2020-06-22 13:06:18 -04:00
Dan Laine 38f7e23647 disable keystore and admin APIs by default 2020-06-22 13:05:47 -04:00
StephenButtolph e3b62549cb Merge branch 'denali' into tighten-cert-permissions 2020-06-22 11:41:37 -04:00
Dan Laine 5cb106d349 make staking cert/key read-only 2020-06-22 10:53:10 -04:00
Dan Laine a20a54af4e Merge branch 'master' of github.com:ava-labs/gecko-internal 2020-06-22 09:45:26 -04:00
Stephen Buttolph db01f2e7e9
Merge pull request #106 from ava-labs/snowball-testing
Clarify that the snowball byzantine struct is only for testing
2020-06-22 01:25:01 -04:00
Stephen Buttolph dbdf3b35f9
Merge pull request #105 from ava-labs/avalanche-testing
Set up tests for error handling in avalanche
2020-06-22 01:24:34 -04:00
Stephen Buttolph 21b2fe8313
Merge pull request #104 from ava-labs/snowstorm-testing
Set up tests for error handling in snowstorm
2020-06-22 01:23:47 -04:00
Stephen Buttolph 18372f03dd
Merge pull request #102 from ava-labs/snowman-testing
Added tests for error checking in snowman
2020-06-22 01:23:18 -04:00
Stephen Buttolph bb6bd861d5
Merge pull request #101 from ava-labs/moar-metrics
Added poll duration metrics
2020-06-22 01:22:49 -04:00
StephenButtolph c88c85ea9b Minor cleanup in snowball consensus 2020-06-22 00:14:19 -04:00
StephenButtolph 12297cb0d2 Clarify that the snowball byzantine struct is only for testing 2020-06-22 00:08:11 -04:00
StephenButtolph 3211546b5a Set up tests for error handling in avalanche 2020-06-21 23:56:08 -04:00
StephenButtolph fb7e491000 Set up tests for error handling in snowstorm 2020-06-21 22:38:53 -04:00
StephenButtolph d923d5c0f9 Cleaned up tests 2020-06-21 21:30:29 -04:00
StephenButtolph 8865eabec7 Added tests for error checking in snowman 2020-06-21 21:26:50 -04:00
StephenButtolph d2573be25f Added duration metrics 2020-06-21 20:00:54 -04:00
StephenButtolph 7a2a7f0add fixed polls metrics 2020-06-21 19:44:35 -04:00
StephenButtolph b0ad887a42 refactored polls into their own sub package 2020-06-21 19:38:24 -04:00
Galen Marchetti 81494fa785
Merge pull request #6 from kurtosis-tech/gmarchetti/remove-set-x
removing set x
2020-06-21 17:10:22 -03:00
galenmarchetti 62340e4f29 removing set x 2020-06-21 17:09:12 -03:00
Galen Marchetti fe8157fc67
Merge pull request #5 from kurtosis-tech/gmarchetti/reset-comments
gmarchetti/reset comments
2020-06-21 17:07:06 -03:00
galenmarchetti e2aea23214 re-enabling osx 2020-06-21 16:13:59 -03:00
galenmarchetti 6a37d268bc replacing whitespace 2020-06-21 16:12:53 -03:00
Galen Marchetti 4acaea9cd2
Merge pull request #4 from kurtosis-tech/gmarchetti/integrate-kurtosis-into-ci
[WIP] gmarchetti/integrate kurtosis into ci
2020-06-20 13:16:12 -03:00
galenmarchetti f4a428351d corrected test-names arg 2020-06-20 13:02:04 -03:00
galenmarchetti 50fba7520b defining just two tests 2020-06-20 12:54:02 -03:00
galenmarchetti 0f0439ff15 calling executable raw 2020-06-20 12:42:23 -03:00
galenmarchetti 551e16fe36 checking build directory 2020-06-20 12:26:36 -03:00
galenmarchetti 4cacb56cf5 commenting controller image label from script 2020-06-20 12:12:53 -03:00
galenmarchetti a523fb184a cleaning CI run script 2020-06-20 10:24:02 -03:00
galenmarchetti 2e16f2087a cleaning up CI script 2020-06-20 10:22:55 -03:00
StephenButtolph 32812e5375 re-added the admin API calls to be backwards compatible 2020-06-19 18:36:45 -04:00
Dan Laine 67d9281501 change maximum # outstanding to 8 to reduce load on nodes 2020-06-19 18:06:04 -04:00
Dan Laine 4d4a073d60 only have 15 outstanding GetAncestors at a time during bootstrapping to not flood the network 2020-06-19 17:57:47 -04:00
StephenButtolph 6c34fd79eb version bump 2020-06-19 17:56:35 -04:00
Stephen Buttolph 2e13c33f23
Merge pull request #86 from ava-labs/bootstrap-process-list
Bootstrap process list
2020-06-19 16:36:45 -04:00
Dan Laine 7c05353d3d skip unneccessary parse 2020-06-19 16:33:33 -04:00
Dan Laine 245f13ea65 add stripeDistance and stripeWidth 2020-06-19 16:29:27 -04:00
Stephen Buttolph f2126cba19
Merge pull request #87 from ava-labs/remove-tx-dependencies
use transitive dependencies when possible with transaction dependencies
2020-06-19 16:16:11 -04:00
Stephen Buttolph 20d24b42b0
Merge pull request #93 from ava-labs/improve-memory
improve leveldb batch usage and pointer releasing in caches
2020-06-19 16:15:30 -04:00
Stephen Buttolph 145edd2864
Merge pull request #94 from ava-labs/fix-query-failed-message-formatting
Fix query failed message formatting
2020-06-19 16:15:07 -04:00
Stephen Buttolph c6bdd5247f
Merge pull request #95 from ava-labs/health-get-api
Health get api
2020-06-19 16:14:46 -04:00
Stephen Buttolph 328aa57a11
Merge pull request #98 from ava-labs/consensus-error-handling
report error returned by the snowstorm RecordPoll
2020-06-19 16:14:28 -04:00
StephenButtolph 975198bb5e report error returned by the snowstorm RecordPoll 2020-06-19 15:13:34 -04:00
galenmarchetti 0627c7c28e turning GO111MODULE off to pull 2020-06-19 15:50:09 -03:00
Dan Laine 979f4e2759 GET to ext/health returns 200 if heathy, else 500 2020-06-19 14:05:11 -04:00
Dan Laine 27bdba4776 GET to /ext/health returns a 200 and no body, per Fabio's request 2020-06-19 13:44:57 -04:00
Aaron Buchwald 25478846d3
Remove extra string formatter in chain router QueryFailed message 2020-06-19 13:34:47 -04:00
StephenButtolph e9517c166d Merge branch 'denali' into remove-tx-dependencies 2020-06-19 13:31:14 -04:00
StephenButtolph 534ef0bffc Merge branch 'denali' into improve-memory 2020-06-19 13:30:55 -04:00
Stephen Buttolph 4056dc49cd
Merge pull request #92 from ava-labs/fix-param-access
Fix param attribute access
2020-06-19 13:28:13 -04:00
Aaron Buchwald 28a313becf
Fix param attribute access 2020-06-19 13:02:28 -04:00
Dan Laine 8b75abdee0 cache only contains vertices at height 5000, 10000, etc. 2020-06-19 12:18:20 -04:00
StephenButtolph 006ff75149 improve leveldb batch usage and pointer releasing in caches 2020-06-19 11:02:38 -04:00
galenmarchetti 878056d24a pulling in gopath 2020-06-19 08:49:09 -03:00
galenmarchetti 9ac4472a27 removing -d 2020-06-19 08:28:39 -03:00
galenmarchetti ea7b77ca9d adding correct cd 2020-06-19 08:17:35 -03:00
galenmarchetti 5cc4aa4718 adding debugging steps 2020-06-19 08:16:43 -03:00
galenmarchetti 4b22442fd2 changing path to access ava-e2e-tests 2020-06-19 08:01:06 -03:00
galenmarchetti 1be5daf5cf updating CI 2020-06-19 07:44:28 -03:00
Aaron Buchwald a3d3ef4787 Change ForceAccepted to process list of already stored vtxs 2020-06-18 14:49:38 -04:00
StephenButtolph f78d7b3caf use transitive dependencies when possible with transaction dependencies 2020-06-18 13:34:04 -04:00
Galen Marchetti 3de02097da
Merge pull request #3 from ava-labs/master
updating against ava-labs base
2020-06-18 09:04:24 -03:00
Stephen Buttolph a65a83e1e2
Merge branch 'master' into upnp 2020-06-18 02:12:34 -04:00
StephenButtolph be6be7ae1c reverted breaking changes 2020-06-18 01:45:58 -04:00
Stephen Buttolph 45b9a230b6
Merge pull request #83 from ava-labs/lets-format
formatting changes
2020-06-18 00:54:40 -04:00
StephenButtolph 84c4e30937 formatting update 2020-06-18 00:30:15 -04:00
StephenButtolph 3eb9788976 formatting changes 2020-06-17 23:29:30 -04:00
Stephen Buttolph 8f009cfa4a
Merge branch 'master' into upnp 2020-06-17 22:34:10 -04:00
StephenButtolph a1b1ad2da4 address golint errors 2020-06-17 22:33:41 -04:00
Stephen Buttolph f616952736
Merge pull request #82 from ava-labs/bootstrap-heap
move cache check and use heap in bootstrapping
2020-06-17 22:09:27 -04:00
Stephen Buttolph e8114cd814
Merge branch 'master' into bootstrap-heap 2020-06-17 20:58:45 -04:00
Stephen Buttolph 36f305e5f6
Merge pull request #81 from ava-labs/bootstrap-bug-fix
patch for bug that caused bootstrapping to never finish
2020-06-17 20:58:21 -04:00
Stephen Buttolph 9543966d44
Merge branch 'master' into bootstrap-heap 2020-06-17 20:57:56 -04:00
Dan Laine 15898c4ac2 style fix 2020-06-17 20:41:02 -04:00
Dan Laine f0cd642c2d move cache check out of loop 2020-06-17 20:35:28 -04:00
Stephen Buttolph d4373f2a0a
Merge branch 'master' into bootstrap-bug-fix 2020-06-17 20:31:42 -04:00
StephenButtolph 9a43e1222b added tests 2020-06-17 20:31:13 -04:00
Dan Laine f4a789b433 use heap in bootstrapping to reduce amount of work done. Move cache check to reduce memory allocations. 2020-06-17 20:15:42 -04:00
Dan Laine 4da1ce58fb patch for bug that caused bootstrapping to never finish 2020-06-17 19:42:11 -04:00
Hongbo Zhang 20637a0f23 dropped error msg from unmapallport 2020-06-17 17:13:35 -04:00
Stephen Buttolph e5a6c00fce
Merge pull request #78 from ava-labs/optimize-state
Optimize core block / state
2020-06-17 16:00:27 -04:00
Stephen Buttolph def156db42
Merge branch 'master' into optimize-state 2020-06-17 16:00:05 -04:00
StephenButtolph 0e16d298ae merged 2020-06-17 15:54:55 -04:00
Stephen Buttolph 8d5559393f
Merge branch 'master' into optimize-state 2020-06-17 13:58:26 -04:00
Stephen Buttolph 22cccd02f9
Merge pull request #77 from ava-labs/patch-add-default-subnet-delegator-tx
Patch add default subnet delegator tx
2020-06-17 13:58:03 -04:00
Stephen Buttolph f747eea3d9
Merge branch 'master' into patch-add-default-subnet-delegator-tx 2020-06-17 13:24:57 -04:00
Gabriel Cardona f40fa7d7e6 Formatting. 2020-06-17 10:21:11 -07:00
Aaron Buchwald 077afc20e7
Adjust delegator test cases 2020-06-17 13:16:37 -04:00
Gabriel Cardona 82b91e5244 Add tests for platform.addDefaultSubnetDelegator for confirming tx fails when attempting to delegate too much as well as confirming balance is correct after delegating. 2020-06-17 10:00:41 -07:00
Dan Laine 8e4c57bcd3 Merge remote-tracking branch 'origin/master' into optimize-state 2020-06-17 09:04:20 -04:00
Dan Laine 0270a485e6 Merge branch 'master' of github.com:ava-labs/gecko-internal 2020-06-17 09:02:11 -04:00
Gabriel Cardona 553bc117b0 Merge branch 'master' into patch-add-default-subnet-delegator-tx 2020-06-17 04:37:05 -07:00
Stephen Buttolph 3aa89c3926
Merge pull request #76 from ava-labs/polls-early-termination
Terminate poll early when possible
2020-06-16 23:58:02 -04:00
StephenButtolph 3d6fff70e0 nits to clean up the PR 2020-06-16 23:53:19 -04:00
Stephen Buttolph 19d871538e
Merge branch 'master' into polls-early-termination 2020-06-16 23:29:35 -04:00
Stephen Buttolph be5957875c
Merge pull request #55 from ava-labs/ping-pong
Added ping pong messages
2020-06-16 23:27:19 -04:00
StephenButtolph 29fc15a8f9 merged 2020-06-16 23:13:05 -04:00
StephenButtolph 8ec2862c04 Merge branch 'master' of github.com:ava-labs/gecko-internal 2020-06-16 23:08:29 -04:00
StephenButtolph 181bd76b10 Merge branch 'master' of github.com:ava-labs/gecko 2020-06-16 23:08:17 -04:00
Stephen Buttolph 6c8c70b1ca
Merge pull request #65 from ava-labs/lower-gossip-logs
lower log level for gossiped put messages
2020-06-16 23:07:39 -04:00
Stephen Buttolph d3d6471a2a
Merge branch 'master' into lower-gossip-logs 2020-06-16 23:04:58 -04:00
Stephen Buttolph ea9dcec5a0
Merge pull request #75 from ava-labs/set-cache
pre-allocate slices in various places
2020-06-16 23:04:27 -04:00
Aaron Buchwald aab8f5f3d4 Implement early termination case for avalanche polling 2020-06-16 19:29:08 -04:00
Dan Laine ddcc2d73a2 lazily fetch block status 2020-06-16 18:14:16 -04:00
Dan Laine 4223e1f9d5 remove unnecessary call to Has 2020-06-16 17:51:49 -04:00
Dan Laine eda52b631e Merge branch 'master' of github.com:ava-labs/gecko 2020-06-16 17:32:28 -04:00
Dan Laine e0d00e25c7 fix typo 2020-06-16 17:23:48 -04:00
Stephen Buttolph aca163714d
fixed typo 2020-06-16 17:21:23 -04:00
StephenButtolph b5579776e1 Merge branch 'ping-pong' of github.com:ava-labs/gecko-internal into ping-pong 2020-06-16 16:53:45 -04:00
StephenButtolph 8edcb1689b bump version for everest 2020-06-16 16:52:46 -04:00
Stephen Buttolph 45512ddef6
Merge branch 'master' into ping-pong 2020-06-16 16:46:05 -04:00
StephenButtolph 7feb0c335e merged 2020-06-16 16:45:41 -04:00
Stephen Buttolph 8fe8586dab
Merge branch 'master' into lower-gossip-logs 2020-06-16 16:35:28 -04:00
StephenButtolph 52b6c1df21 merged 2020-06-16 16:35:13 -04:00
Stephen Buttolph 89ab9fce4b
Merge branch 'master' into set-cache 2020-06-16 16:28:31 -04:00
Stephen Buttolph 9562dc1054
Merge pull request #68 from ava-labs/network-upgrade
Network upgrade
2020-06-16 16:20:43 -04:00
Dan Laine 77d24022fe add minimumCacheSize 2020-06-16 16:11:21 -04:00
StephenButtolph ad130e848e merged 2020-06-16 16:10:45 -04:00
Dan Laine 4ecd92efba add minimum size to uniqueBag and Requests 2020-06-16 15:43:30 -04:00
Dan Laine 191cd48593 add minimum map size to Blocker 2020-06-16 15:34:34 -04:00
Dan Laine 8fdeef5eb6 pre-allocate slices for List in set, bag, shortSet 2020-06-16 15:17:13 -04:00
Stephen Buttolph bf7cad2a37
Merge pull request #74 from ava-labs/fix-consensus-bugs
Fix consensus bugs
2020-06-16 14:45:10 -04:00
Stephen Buttolph b1923d7dee
Merge pull request #251 from aaronbuchwald/optimize-insert-from
Optimize DAG traversal in insertFrom
2020-06-16 14:40:38 -04:00
Stephen Buttolph bfa13fa077
Merge branch 'master' into fix-consensus-bugs 2020-06-16 14:20:54 -04:00
StephenButtolph 11f66e1394 merged 2020-06-16 14:17:47 -04:00
Hongbo Zhang 210ad164f3 resolve comments for PR 71; change log leves; type check
type check

...
2020-06-16 12:12:37 -04:00
Hongbo Zhang 571b6e597b bring pmp back 2020-06-16 12:06:24 -04:00
Hongbo Zhang fb51e6a443 check failed port mapping
handle failed mapping

set retry to 20
2020-06-16 12:06:24 -04:00
Hongbo Zhang 3cfba77c70 staking internal port and external port could be different
get mapped port entry; change interface to mapper
2020-06-16 12:06:23 -04:00
Hongbo Zhang f8301f11c2 fix upnp port detection and retry on mapping
clean up

rm test
2020-06-16 12:05:09 -04:00
Hongbo Zhang 661ee3a542 support machine with public IP 2020-06-16 12:05:09 -04:00
Hongbo Zhang 3281da4ff2 NAT test 2020-06-16 12:05:09 -04:00
Hongbo Zhang 54e1c4031e upnp 2020-06-16 12:05:08 -04:00
Hongbo Zhang 96cfcc0b5b NAT test 2020-06-16 12:03:01 -04:00
Hongbo Zhang aaa00b3488 upnp 2020-06-16 12:03:01 -04:00
Aaron Buchwald 0fdddae9fc
Optimize DAG traversal in insertFrom 2020-06-16 11:53:57 -04:00
Stephen Buttolph 0d9a21b458
Merge pull request #69 from ava-labs/codec-upgrade
Codec upgrade
2020-06-16 01:09:56 -04:00
Stephen Buttolph 284d0ee765
Merge branch 'master' into codec-upgrade 2020-06-16 00:45:20 -04:00
Stephen Buttolph 639ef4b22f
Merge pull request #70 from ava-labs/log-level-and-format
Change RPC log levels
2020-06-16 00:40:45 -04:00
Stephen Buttolph 93cb630a6f
Merge branch 'master' into log-level-and-format 2020-06-16 00:19:59 -04:00
Stephen Buttolph 039ea3c0c3
Merge pull request #73 from ava-labs/fix-platform-bootstrapped
Fix platform Bootstrapped function
2020-06-16 00:18:32 -04:00
Gabriel Cardona aa5422696e Set helpers to Debug log level. 2020-06-15 21:14:54 -07:00
Aaron Buchwald b950f016d8
Fix platform bootstrapped function to initialize fx 2020-06-15 21:53:29 -04:00
Gabriel Cardona ec953d6ec3 Fix log level. 2020-06-15 15:08:03 -07:00
Gabriel Cardona ef0b9bfb1e Merge branch 'master' into log-level-and-format 2020-06-15 15:07:44 -07:00
Stephen Buttolph 58c9093279
Merge pull request #202 from Shashank-In/patch-1
Fix for KeyStore DoS vulnerability. Resolves #195
2020-06-15 17:46:41 -04:00
Stephen Buttolph 91852fe932
nit 2020-06-15 17:08:25 -04:00
Stephen Buttolph 413ff7b0b4
Merge branch 'master' into patch-1 2020-06-15 17:06:43 -04:00
Stephen Buttolph 6abf3a8c02
Merge pull request #220 from ava-labs/improve-network-logging
Improve network logging
2020-06-15 16:00:52 -04:00
StephenButtolph 8fb00cf67c Merge branch 'improve-network-logging' of github.com:ava-labs/gecko into improve-network-logging 2020-06-15 15:51:20 -04:00
StephenButtolph c3c9cec1ea updated new messages to match new logging format 2020-06-15 15:51:12 -04:00
Stephen Buttolph 7fc3b65c86
Merge branch 'master' into improve-network-logging 2020-06-15 15:44:23 -04:00
StephenButtolph e4a0e73155 merged router + added optimization around failed gets 2020-06-15 15:43:59 -04:00
Stephen Buttolph 7c60e8d966
Merge pull request #245 from aaronbuchwald/duplicates-avm-import-key
Prevent duplicated addresses in avm import key
2020-06-15 15:22:40 -04:00
Stephen Buttolph 21d65dc167
Merge branch 'master' into duplicates-avm-import-key 2020-06-15 15:14:01 -04:00
Stephen Buttolph 34943cf366
Merge pull request #234 from aaronbuchwald/fix-unissued-events-ordering
Use Add instead of Push to correctly order timed txs in event heap
2020-06-15 15:12:22 -04:00
Aaron Buchwald dc06427166 Merge branch 'duplicates-avm-import-key' of https://github.com/aaronbuchwald/gecko into duplicates-avm-import-key 2020-06-15 14:21:58 -04:00
Aaron Buchwald 8783844aca
Fix nitpick 2020-06-15 14:20:16 -04:00
Stephen Buttolph 239b99a29d
Merge branch 'master' into fix-unissued-events-ordering 2020-06-15 14:06:57 -04:00
Stephen Buttolph c86265baf8
Merge branch 'master' into split-admin-api 2020-06-15 14:06:38 -04:00
Stephen Buttolph 9c30c33bb0
Merge pull request #244 from moreati/version-versions
main: Added database & network version to -version
2020-06-15 14:05:34 -04:00
Stephen Buttolph 34e1c9de2f
Merge branch 'master' into version-versions 2020-06-15 13:56:33 -04:00
Stephen Buttolph de6c5721a8
Merge branch 'master' into duplicates-avm-import-key 2020-06-15 13:41:20 -04:00
Stephen Buttolph 9236eb286f
Merge pull request #250 from aaronbuchwald/debug-staking-disabled
Decouple staking and tls encryption for internode communication
2020-06-15 13:38:14 -04:00
Dan Laine 18c0b0a65b move codec to utils 2020-06-15 13:20:30 -04:00
Gabriel Cardona fa4be45d8a Update go.sum 2020-06-15 10:15:43 -07:00
Dan Laine acbb9a7e0c remove expansionBoost from packer (Go's append does similar already). change initialSliceCap 1024 --> 256. Streamline packer.Expand, as this method is called very often 2020-06-15 13:12:55 -04:00
Gabriel Cardona cdac10c23b Fix typos. 2020-06-15 09:45:21 -07:00
Gabriel Cardona f59f45a20f Make all RPC call logs `Info` level. 2020-06-15 09:35:41 -07:00
Dan Laine a84abacea5 fix typos and removed useless benchmark 2020-06-15 10:55:09 -04:00
Dan Laine 979477d68f change initialSliceCap --> 1024 2020-06-15 10:46:22 -04:00
Dan Laine cea79f66b2 add to tests; comment them 2020-06-15 10:33:08 -04:00
Dan Laine f28b69b819 set initial slice capacity for packer; packer pre-allocates capacity when possible 2020-06-15 10:06:40 -04:00
Aaron Buchwald ba2b214b58 Decouple staking and tls encryption for internode communication 2020-06-15 09:39:56 -04:00
Dan Laine b269f8cfb0 marshal writes directly to packer rather than creating array of functions 2020-06-14 21:38:07 -04:00
Dan Laine d1796c8b0b cleanup; revert string packing method 2020-06-14 18:15:44 -04:00
Dan Laine 9c4cfecf4e pack pointer to string instead of string...halves memory footprint 2020-06-14 12:23:05 -04:00
Dan Laine ee1cf620a1 cleanup 2020-06-14 12:06:39 -04:00
Dan Laine 7b5b3d1f1c more optimizations/cleanup 2020-06-14 11:53:19 -04:00
Dan Laine f6cabee51b cache serializable fields of struct types; change codec methods to be on pointer type; change variable names; change benchmark toinclude both marshaling and unmarshaling 2020-06-14 10:56:43 -04:00
Gabriel Cardona d85ef87695 * Use Debug log level for all RPC calls
* Use convention: [vm/api: function_name called...]. Ex: "Platform: SampleValidators called...
2020-06-13 11:06:32 -07:00
Alex Willmer 760c32c4ac main: Added database version & default network to -version 2020-06-13 09:37:08 +01:00
Dan Laine 954074abcc optimize by reducing amount of data stored on heap 2020-06-12 19:26:03 -04:00
Dan Laine 42deac45e9 use pre-allocated array of functions 2020-06-12 19:03:08 -04:00
Dan Laine a895b691a2 change wire format to be exact same as it was before 2020-06-12 16:52:58 -04:00
Aaron Buchwald 2fb88906cc
Add testing for avm API ImportKey 2020-06-12 15:10:39 -04:00
Aaron Buchwald 1572b1bd97
Return early when finding address already exists 2020-06-12 14:09:45 -04:00
Aaron Buchwald 750f7b2120
Improve error message in platform API IssueTx call 2020-06-12 13:49:26 -04:00
Aaron Buchwald 26f5503a43 Add test to ensure IssueTx maintains ordering of unissued events heap 2020-06-12 12:58:35 -04:00
Aaron Buchwald e15c1bad8c Use Add instead of Push to correctly order timed txs in event heap 2020-06-12 12:58:35 -04:00
Dan Laine 617a158097 use wrappers.packer instead of byte array 2020-06-12 10:41:02 -04:00
Dan Laine 7879dd1768 upgrade codec to be more efficient. Passes all codec tests. Failing some other tests due to new format 2020-06-11 18:16:21 -04:00
StephenButtolph c071e56cfa Merge branch 'master' into network-upgrade 2020-06-11 18:11:56 -04:00
StephenButtolph 960377e2b4 cleaned up imports 2020-06-11 18:08:42 -04:00
StephenButtolph f52d0c29bd Register a timeout for querying ourselves to ensure we never drop a query 2020-06-11 18:00:21 -04:00
Aaron Buchwald e073b4e8ad
Prevent duplicated addresses in avm import key 2020-06-11 17:01:28 -04:00
Gabriel Cardona 3d60db3a05 Subtract from balance when adding a default subnet delegator. 2020-06-11 13:14:02 -07:00
Alex Willmer f61d5f4628 ansible: Become a super user to install ava 2020-06-11 19:59:14 +01:00
Alex Willmer a3aba825c0 ansible: Fetch git repo of https 2020-06-11 19:58:46 +01:00
Alex Willmer 7f3316bb89 Merge remote-tracking branch 'upstream/master' into ansible-service 2020-06-11 19:57:01 +01:00
Aaron Buchwald 64b2df39b5
Split admin api into admin and info apis 2020-06-10 16:47:31 -04:00
Dan Laine b576f27397 comments/reorganize 2020-06-10 16:20:40 -04:00
Dan Laine 3a4ffb4850 lower log level for gossiped put messages 2020-06-08 20:30:03 -04:00
Alex Willmer 6a5305f0a9 Merge branch 'master' into ansible-service 2020-06-08 12:24:11 +01:00
Dan Laine c560eeab37 lower/improve gossip logging 2020-06-06 11:50:46 -04:00
Dan Laine 9e74fdf15d improve network logging 2020-06-06 11:48:13 -04:00
Alex Willmer 62206086ac Merge branch 'master' of github.com:ava-labs/gecko into ansible-service 2020-06-05 23:27:56 +01:00
mieubrisse e771c2f956 Merge remote-tracking branch 'upstream/master' 2020-06-05 16:07:46 -03:00
StephenButtolph 8c42f14a49 Added ping pong messages 2020-06-04 01:57:43 -04:00
Shashank 311ce90977
Fixed go format 2020-06-03 19:47:39 +05:30
Shashank f8405794b7
Merge branch 'master' into patch-1 2020-06-03 14:14:43 +05:30
Shashank 6dc67bbf70
Updated fix for issue 195
https://github.com/ava-labs/gecko/issues/195
2020-06-03 14:08:57 +05:30
Shashank 8e8dd7529b
Fix for KeyStore DoS vulnerability
https://github.com/ava-labs/gecko/issues/195
2020-06-02 22:47:02 +05:30
Galen Marchetti 76e712242a
Merge pull request #2 from kurtosis-tech/gmarchetti/kurtosis-e2e-ci-integration
kurtosis e2e ci integration with gecko
2020-06-02 07:21:50 -03:00
galenmarchetti 1ae9c76c53 isolating the kurtosis testing 2020-05-31 14:48:33 -03:00
galenmarchetti 72330d494c removing debugging statements 2020-05-31 14:25:47 -03:00
galenmarchetti f8cea1d299 travisyml now minimally modified to include kurtosis tests 2020-05-31 14:25:11 -03:00
galenmarchetti 6fac7c2446 fixing travis yml 2020-05-31 14:13:07 -03:00
galenmarchetti c6954227eb build image script 2020-05-31 14:09:29 -03:00
galenmarchetti 2036c7233a building with docker deploy in scripts 2020-05-31 13:50:32 -03:00
galenmarchetti dea626aea3 putting in debugging statements 2020-05-31 13:10:05 -03:00
galenmarchetti 374062aa1a sleeping 90 2020-05-31 12:56:16 -03:00
galenmarchetti 0165827857 removing osx build to debug kurtosis runs; 2020-05-31 10:28:21 -03:00
galenmarchetti d85a016310 e2e tests must be runnable 2020-05-31 10:09:38 -03:00
galenmarchetti 37b7440788 pulling images 2020-05-31 10:06:36 -03:00
galenmarchetti 3eb9efded9 modifying travisci yaml 2020-05-31 10:03:02 -03:00
galenmarchetti 081ab1146d defining basic e2e tests ci 2020-05-31 10:01:54 -03:00
Galen Marchetti 5eeb7cb88a
Merge pull request #1 from ava-labs/master
Updating from ava-labs
2020-05-31 09:56:59 -03:00
Alex Willmer 1b84ac3190 ansible: restrict RPC to localhost by default 2020-05-24 11:55:33 +01:00
Alex Willmer 756fdee142 ansible: Default to /usr/local and /var/local
Following discussion in https://github.com/ava-labs/gecko/pull/151 it
was decided that /usr (and by implication /var) should be reserved for
OS package managers (e.g. apt, yum).
2020-05-24 11:54:45 +01:00
Alex Willmer 4cd9569e21 ansible: Split installation of Go from other dependencies
This makes it easier to skip, if Go has been installed by other means.
2020-05-24 11:49:28 +01:00
Alex Willmer e067a150f0 ansible: Add shebangs to playbooks
This allows them to be run as scripts. They take the same options and
arguments as ansible-playbook.
2020-05-24 11:48:12 +01:00
Alex Willmer 984adf0440
Merge branch 'master' into ansible-service 2020-05-14 22:12:05 +01:00
Stephen Buttolph f737368d65
Merge branch 'master' into ansible-service 2020-05-12 22:42:33 -04:00
Alex Willmer 9a3f803bad ansible: Speedup playbook execution by reusing ssh connections
This restores the default ssh_args for Ansible. When present Ansible
instructs ssh to keep a connection open for a short period after
exiting. Subsequent ssh processes can then skip TCP & SSH hand shake.
2020-05-12 23:16:46 +01:00
Alex Willmer 9b68599834 ansible: Speedup playbook execution with pipelining 2020-05-12 23:12:16 +01:00
Alex Willmer f491dda625 ansible: Use builtin config to control host key checking 2020-05-12 23:11:09 +01:00
Alex Willmer e7a06ff207 ansible: Don't continue playbook execution if a task fails
The Ansible equivalent of `set -o errexit` in bash
2020-05-12 23:10:07 +01:00
Alex Willmer f3776cac9e ansible: Remove duplicated default variables 2020-05-12 23:08:34 +01:00
Alex Willmer a92630631a ansible: Migrate staking key of AVA service to new location
Refs #145
2020-05-12 23:08:16 +01:00
Alex Willmer 74588da411 ansible: Conform to new staking key layout
Refs #145
2020-05-12 23:07:06 +01:00
Alex Willmer 777afed793 ansible: Remove repo_* variable overrides that match defaults 2020-05-12 23:04:11 +01:00
Alex Willmer 50b41b3468 ansible: Fix typos 2020-05-12 23:02:15 +01:00
Alex Willmer f9e71f2e03 Merge branch 'master' into ansible-service 2020-05-12 21:21:43 +01:00
Alex Willmer 9e318bb3d5 Merge branch 'master' of github.com:ava-labs/gecko into ansible-service 2020-05-11 20:21:24 +01:00
Alex Willmer 99ca4a50af ansible: Add service_playbook.yml & supporting roles
This playbook

 - Installs Gecko dependencies
 - Clones & builds ava-build/gecko
 - Creates an ava user
 - Installs Gecko in /usr/bin
 - Creates and installs a staking certificate
 - Installs Gecko as a Systemd service called "ava"
 - Configures /var/lib/ava/db as the database
 - Configures /var/log/ava as the log destination
 - Starts the service
2020-05-09 23:01:23 +01:00
Alex Willmer bba45ed183 ansible: Add git defaults to ansible-build role 2020-05-09 22:52:04 +01:00
StephenButtolph 27eb3ca6ee fixed bugs added for the bug bounty 2020-03-10 16:10:53 -04:00
205 changed files with 7908 additions and 3245 deletions

16
.ci/run_e2e_tests.sh Executable file
View File

@ -0,0 +1,16 @@
SCRIPTS_PATH=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd)
SRC_PATH=$(dirname "${SCRIPTS_PATH}")
# Build the runnable Gecko docker image
bash "${SRC_PATH}"/scripts/build_image.sh
GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1)
# Turn off GO111MODULE to pull e2e test source code in order to get run script.
GO111MODULE=off go get -t -v github.com/kurtosis-tech/ava-e2e-tests/...
cd "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests/ || exit
bash "./scripts/rebuild_initializer_binary.sh"
bash "./scripts/rebuild_controller_image.sh"
# TODO: Make the controller image label a parameter to rebuild_controller_image script
# Standard controller image label used by above scripts.
CONTROLLER_IMAGE="kurtosistech/ava-e2e-tests_controller:latest"
./build/ava-e2e-tests --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${CONTROLLER_IMAGE}" --test-names="fiveStakingNodeGetValidatorsTest,fiveStakingNodeFullyConnectedTest"

5
.dockerignore Normal file
View File

@ -0,0 +1,5 @@
.env*
docker-compose.yml
Dockerfile
README.md
LICENSE

11
.env.example Normal file
View File

@ -0,0 +1,11 @@
# Path to ava node
RPC_URL=http://localhost:9650/ext/bc/C/rpc
# Local path for url-request
LOCAL_URL=/rpc
# Local port for url-request
NGINX_PORT=4444
# Options for binary ava file
AVA_CMD_OPTIONS=--db-dir /db

View File

@ -18,6 +18,7 @@ jobs:
scripts/ansible/kill_playbook.yml
scripts/ansible/ping_playbook.yml
scripts/ansible/restart_playbook.yml
scripts/ansible/service_playbook.yml
scripts/ansible/update_playbook.yml
args:

1
.gitignore vendored
View File

@ -12,6 +12,7 @@ awscpu
*.dylib
*.profile
.env
# Test binary, build with `go test -c`
*.test

View File

@ -14,6 +14,7 @@ env:
global:
- CODECOV_TOKEN="8c18c993-fc6e-4706-998b-01ddc7987804"
- GECKO_HOME=/go/src/github.com/ava-labs/gecko/
- E2E_TEST_HOME=/go/src/github.com/kurtosis-tech/ava-e2e-tests/
- COMMIT=${TRAVIS_COMMIT::8}
- DOCKERHUB_REPO=avaplatform/gecko
- secure: "L/A9+re0NEKP6EV6H9RcTGiDhX3WMvsiWrkRKDYKqnviqbjY30RK6EM4vvjrM4Lrw2QwsO3YKgnku3+zioE/TxEZFkpkbjNUXru0nYBrWAg1TKVsDXnYaIZkHUejfryST3E8N7F4Hx6zCtGEO0sEdUeKuT+MNUIuHezHooTgGzDjMogm70EWMFjQHc7VucTJu7dWU1RBPjovWQ0q9qflrtCpbrvXFIiihQQ1PQha1Q2C4wLakKuLbhhSafue90Mnyss0blaPHy/tyewcASJu4vsGTKRBn0DzttlkNTwuD6+nKrbmJY0ohunnkVFzYjrZAw1gyN+DCDb/lPbz4ZDItKPwrIUPEtL5xuUOrxUZPUh+0io3Q2d6rjaqkdGjd1KQXzbnW1mn0BxX3d3b2UpIqhBn9umYYjHBKnMuoRiTK33b7U9+LF3K84+tEvVDCPeHs/mw6Inp5jGRSravnM6yPQ6feGzogs4+3EMzZXxnkngKFKCsnd67Oe9xfV9amOU2aQAx4jaAwlPjEpBEkUa8YKx3lPznvmUk1QsNCUbLjdSl5JBaXojLJoiuPbj29hp4S5AXXgn+3Hvwk3ndcFCxi6/l1W9mjYSOtFqg3EAUdF4EgnA/ykQg9ZokkoKY0+qgOzG2bKOAYuCDWeGr7P1apToh00ccsQXL81nVPiq7uDw="
@ -26,7 +27,7 @@ install:
script:
- if [ "$TRAVIS_OS_NAME" = "osx" ]; then .ci/runscript_osx.sh; fi
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/runscript_linux.sh; fi
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/runscript_linux.sh; .ci/run_e2e_tests.sh; fi
#Need to push to docker hub only from one build
after_success:

View File

@ -1,11 +1,16 @@
# syntax=docker/dockerfile:experimental
FROM golang:1.13.4-buster
FROM golang:1.13.4-buster AS builder
RUN mkdir -p /go/src/github.com/ava-labs
WORKDIR $GOPATH/src/github.com/ava-labs/
WORKDIR /src/github.com/ava-labs/
COPY . gecko
WORKDIR $GOPATH/src/github.com/ava-labs/gecko
WORKDIR /src/github.com/ava-labs/gecko
RUN ./scripts/build.sh
FROM nginx:latest
COPY --from=builder /src/github.com/ava-labs/gecko/build /build
COPY entrypoint.sh nginx.template ./
CMD ["/bin/bash", "entrypoint.sh"]

View File

@ -10,6 +10,15 @@ import (
"runtime/pprof"
)
const (
// Name of file that CPU profile is written to when StartCPUProfiler called
cpuProfileFile = "cpu.profile"
// Name of file that memory profile is written to when MemoryProfile called
memProfileFile = "mem.profile"
// Name of file that lock profile is written to
lockProfileFile = "lock.profile"
)
var (
errCPUProfilerRunning = errors.New("cpu profiler already running")
errCPUProfilerNotRunning = errors.New("cpu profiler doesn't exist")
@ -20,12 +29,12 @@ var (
type Performance struct{ cpuProfileFile *os.File }
// StartCPUProfiler starts measuring the cpu utilization of this node
func (p *Performance) StartCPUProfiler(filename string) error {
func (p *Performance) StartCPUProfiler() error {
if p.cpuProfileFile != nil {
return errCPUProfilerRunning
}
file, err := os.Create(filename)
file, err := os.Create(cpuProfileFile)
if err != nil {
return err
}
@ -52,8 +61,8 @@ func (p *Performance) StopCPUProfiler() error {
}
// MemoryProfile dumps the current memory utilization of this node
func (p *Performance) MemoryProfile(filename string) error {
file, err := os.Create(filename)
func (p *Performance) MemoryProfile() error {
file, err := os.Create(memProfileFile)
if err != nil {
return err
}
@ -66,8 +75,8 @@ func (p *Performance) MemoryProfile(filename string) error {
}
// LockProfile dumps the current lock statistics of this node
func (p *Performance) LockProfile(filename string) error {
file, err := os.Create(filename)
func (p *Performance) LockProfile() error {
file, err := os.Create(lockProfileFile)
if err != nil {
return err
}

View File

@ -57,7 +57,7 @@ type GetNodeVersionReply struct {
// GetNodeVersion returns the version this node is running
func (service *Admin) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error {
service.log.Debug("Admin: GetNodeVersion called")
service.log.Info("Admin: GetNodeVersion called")
reply.Version = service.version.String()
return nil
@ -70,7 +70,7 @@ type GetNodeIDReply struct {
// GetNodeID returns the node ID of this node
func (service *Admin) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error {
service.log.Debug("Admin: GetNodeID called")
service.log.Info("Admin: GetNodeID called")
reply.NodeID = service.nodeID
return nil
@ -83,7 +83,7 @@ type GetNetworkIDReply struct {
// GetNetworkID returns the network ID this node is running on
func (service *Admin) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error {
service.log.Debug("Admin: GetNetworkID called")
service.log.Info("Admin: GetNetworkID called")
reply.NetworkID = cjson.Uint32(service.networkID)
return nil
@ -96,7 +96,7 @@ type GetNetworkNameReply struct {
// GetNetworkName returns the network name this node is running on
func (service *Admin) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error {
service.log.Debug("Admin: GetNetworkName called")
service.log.Info("Admin: GetNetworkName called")
reply.NetworkName = genesis.NetworkName(service.networkID)
return nil
@ -114,7 +114,7 @@ type GetBlockchainIDReply struct {
// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied
func (service *Admin) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
service.log.Debug("Admin: GetBlockchainID called")
service.log.Info("Admin: GetBlockchainID called")
bID, err := service.chainManager.Lookup(args.Alias)
reply.BlockchainID = bID.String()
@ -128,26 +128,21 @@ type PeersReply struct {
// Peers returns the list of current validators
func (service *Admin) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error {
service.log.Debug("Admin: Peers called")
service.log.Info("Admin: Peers called")
reply.Peers = service.networking.Peers()
return nil
}
// StartCPUProfilerArgs are the arguments for calling StartCPUProfiler
type StartCPUProfilerArgs struct {
Filename string `json:"filename"`
}
// StartCPUProfilerReply are the results from calling StartCPUProfiler
type StartCPUProfilerReply struct {
Success bool `json:"success"`
}
// StartCPUProfiler starts a cpu profile writing to the specified file
func (service *Admin) StartCPUProfiler(_ *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error {
service.log.Debug("Admin: StartCPUProfiler called with %s", args.Filename)
func (service *Admin) StartCPUProfiler(_ *http.Request, args *struct{}, reply *StartCPUProfilerReply) error {
service.log.Info("Admin: StartCPUProfiler called")
reply.Success = true
return service.performance.StartCPUProfiler(args.Filename)
return service.performance.StartCPUProfiler()
}
// StopCPUProfilerReply are the results from calling StopCPUProfiler
@ -157,31 +152,21 @@ type StopCPUProfilerReply struct {
// StopCPUProfiler stops the cpu profile
func (service *Admin) StopCPUProfiler(_ *http.Request, _ *struct{}, reply *StopCPUProfilerReply) error {
service.log.Debug("Admin: StopCPUProfiler called")
service.log.Info("Admin: StopCPUProfiler called")
reply.Success = true
return service.performance.StopCPUProfiler()
}
// MemoryProfileArgs are the arguments for calling MemoryProfile
type MemoryProfileArgs struct {
Filename string `json:"filename"`
}
// MemoryProfileReply are the results from calling MemoryProfile
type MemoryProfileReply struct {
Success bool `json:"success"`
}
// MemoryProfile runs a memory profile writing to the specified file
func (service *Admin) MemoryProfile(_ *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error {
service.log.Debug("Admin: MemoryProfile called with %s", args.Filename)
func (service *Admin) MemoryProfile(_ *http.Request, args *struct{}, reply *MemoryProfileReply) error {
service.log.Info("Admin: MemoryProfile called")
reply.Success = true
return service.performance.MemoryProfile(args.Filename)
}
// LockProfileArgs are the arguments for calling LockProfile
type LockProfileArgs struct {
Filename string `json:"filename"`
return service.performance.MemoryProfile()
}
// LockProfileReply are the results from calling LockProfile
@ -190,10 +175,10 @@ type LockProfileReply struct {
}
// LockProfile runs a mutex profile writing to the specified file
func (service *Admin) LockProfile(_ *http.Request, args *LockProfileArgs, reply *LockProfileReply) error {
service.log.Debug("Admin: LockProfile called with %s", args.Filename)
func (service *Admin) LockProfile(_ *http.Request, args *struct{}, reply *LockProfileReply) error {
service.log.Info("Admin: LockProfile called")
reply.Success = true
return service.performance.LockProfile(args.Filename)
return service.performance.LockProfile()
}
// AliasArgs are the arguments for calling Alias
@ -209,7 +194,7 @@ type AliasReply struct {
// Alias attempts to alias an HTTP endpoint to a new name
func (service *Admin) Alias(_ *http.Request, args *AliasArgs, reply *AliasReply) error {
service.log.Debug("Admin: Alias called with URL: %s, Alias: %s", args.Endpoint, args.Alias)
service.log.Info("Admin: Alias called with URL: %s, Alias: %s", args.Endpoint, args.Alias)
reply.Success = true
return service.httpServer.AddAliasesWithReadLock(args.Endpoint, args.Alias)
}
@ -227,7 +212,7 @@ type AliasChainReply struct {
// AliasChain attempts to alias a chain to a new name
func (service *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, reply *AliasChainReply) error {
service.log.Debug("Admin: AliasChain called with Chain: %s, Alias: %s", args.Chain, args.Alias)
service.log.Info("Admin: AliasChain called with Chain: %s, Alias: %s", args.Chain, args.Alias)
chainID, err := service.chainManager.Lookup(args.Chain)
if err != nil {

View File

@ -20,36 +20,66 @@ type CheckFn func() (interface{}, error)
// Check defines a single health check that we want to monitor and consider as
// part of our wider healthiness
type Check struct {
type Check interface {
// Name is the identifier for this check and must be unique among all Checks
Name string
Name() string
// CheckFn is the function to call to perform the the health check
CheckFn CheckFn
// Execute performs the health check. It returns nil if the check passes.
// It can also return additional information to marshal and display to the caller
Execute() (interface{}, error)
// ExecutionPeriod is the duration to wait between executions of this Check
ExecutionPeriod time.Duration
ExecutionPeriod() time.Duration
// InitialDelay is the duration to wait before executing the first time
InitialDelay time.Duration
InitialDelay() time.Duration
// InitiallyPassing is whether or not to consider the Check healthy before the
// initial execution
InitiallyPassing bool
InitiallyPassing() bool
}
// gosundheitCheck implements the health.Check interface backed by a CheckFn
type gosundheitCheck struct {
name string
checkFn CheckFn
// check implements the Check interface
type check struct {
name string
checkFn CheckFn
executionPeriod, initialDelay time.Duration
initiallyPassing bool
}
// Name implements the health.Check interface by returning a unique name
func (c gosundheitCheck) Name() string { return c.name }
// Name is the identifier for this check and must be unique among all Checks
func (c check) Name() string { return c.name }
// Execute implements the health.Check interface by executing the checkFn and
// returning the results
func (c gosundheitCheck) Execute() (interface{}, error) { return c.checkFn() }
// Execute performs the health check. It returns nil if the check passes.
// It can also return additional information to marshal and display to the caller
func (c check) Execute() (interface{}, error) { return c.checkFn() }
// ExecutionPeriod is the duration to wait between executions of this Check
func (c check) ExecutionPeriod() time.Duration { return c.executionPeriod }
// InitialDelay is the duration to wait before executing the first time
func (c check) InitialDelay() time.Duration { return c.initialDelay }
// InitiallyPassing is whether or not to consider the Check healthy before the initial execution
func (c check) InitiallyPassing() bool { return c.initiallyPassing }
// monotonicCheck is a check that will run until it passes once, and after that it will
// always pass without performing any logic. Used for bootstrapping, for example.
type monotonicCheck struct {
passed bool
check
}
func (mc monotonicCheck) Execute() (interface{}, error) {
if mc.passed {
return nil, nil
}
details, pass := mc.check.Execute()
if pass == nil {
mc.passed = true
}
return details, pass
}
// Heartbeater provides a getter to the most recently observed heartbeat
type Heartbeater interface {

View File

@ -7,15 +7,17 @@ import (
"net/http"
"time"
"github.com/AppsFlyer/go-sundheit"
health "github.com/AppsFlyer/go-sundheit"
"github.com/gorilla/rpc/v2"
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/ava-labs/gecko/utils/json"
"github.com/ava-labs/gecko/utils/logging"
"github.com/gorilla/rpc/v2"
)
// defaultCheckOpts is a Check whose properties represent a default Check
var defaultCheckOpts = Check{ExecutionPeriod: time.Minute}
var defaultCheckOpts = check{executionPeriod: time.Minute}
// Health observes a set of vital signs and makes them available through an HTTP
// API.
@ -36,7 +38,18 @@ func (h *Health) Handler() *common.HTTPHandler {
newServer.RegisterCodec(codec, "application/json")
newServer.RegisterCodec(codec, "application/json;charset=UTF-8")
newServer.RegisterService(h, "health")
return &common.HTTPHandler{LockOptions: common.NoLock, Handler: newServer}
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodGet { // GET request --> return 200 if getLiveness returns true, else 503
if _, healthy := h.health.Results(); healthy {
w.WriteHeader(http.StatusOK)
} else {
w.WriteHeader(http.StatusServiceUnavailable)
}
} else {
newServer.ServeHTTP(w, r) // Other request --> use JSON RPC
}
})
return &common.HTTPHandler{LockOptions: common.NoLock, Handler: handler}
}
// RegisterHeartbeat adds a check with default options and a CheckFn that checks
@ -48,18 +61,27 @@ func (h *Health) RegisterHeartbeat(name string, hb Heartbeater, max time.Duratio
// RegisterCheckFunc adds a Check with default options and the given CheckFn
func (h *Health) RegisterCheckFunc(name string, checkFn CheckFn) error {
check := defaultCheckOpts
check.Name = name
check.CheckFn = checkFn
check.name = name
check.checkFn = checkFn
return h.RegisterCheck(check)
}
// RegisterMonotonicCheckFunc adds a Check with default options and the given CheckFn
// After it passes once, its logic (checkFunc) is never run again; it just passes
func (h *Health) RegisterMonotonicCheckFunc(name string, checkFn CheckFn) error {
check := monotonicCheck{check: defaultCheckOpts}
check.name = name
check.checkFn = checkFn
return h.RegisterCheck(check)
}
// RegisterCheck adds the given Check
func (h *Health) RegisterCheck(c Check) error {
return h.health.RegisterCheck(&health.Config{
InitialDelay: c.InitialDelay,
ExecutionPeriod: c.ExecutionPeriod,
InitiallyPassing: c.InitiallyPassing,
Check: gosundheitCheck{c.Name, c.CheckFn},
InitialDelay: c.InitialDelay(),
ExecutionPeriod: c.ExecutionPeriod(),
InitiallyPassing: c.InitiallyPassing(),
Check: c,
})
}
@ -74,7 +96,7 @@ type GetLivenessReply struct {
// GetLiveness returns a summation of the health of the node
func (h *Health) GetLiveness(_ *http.Request, _ *GetLivenessArgs, reply *GetLivenessReply) error {
h.log.Debug("Health: GetLiveness called")
h.log.Info("Health: GetLiveness called")
reply.Checks, reply.Healthy = h.health.Results()
return nil
}

160
api/info/service.go Normal file
View File

@ -0,0 +1,160 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package info
import (
"fmt"
"net/http"
"github.com/gorilla/rpc/v2"
"github.com/ava-labs/gecko/chains"
"github.com/ava-labs/gecko/genesis"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/network"
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/version"
cjson "github.com/ava-labs/gecko/utils/json"
)
// Info is the API service for unprivileged info on a node
type Info struct {
version version.Version
nodeID ids.ShortID
networkID uint32
log logging.Logger
networking network.Network
chainManager chains.Manager
}
// NewService returns a new admin API service
func NewService(log logging.Logger, version version.Version, nodeID ids.ShortID, networkID uint32, chainManager chains.Manager, peers network.Network) *common.HTTPHandler {
newServer := rpc.NewServer()
codec := cjson.NewCodec()
newServer.RegisterCodec(codec, "application/json")
newServer.RegisterCodec(codec, "application/json;charset=UTF-8")
newServer.RegisterService(&Info{
version: version,
nodeID: nodeID,
networkID: networkID,
log: log,
chainManager: chainManager,
networking: peers,
}, "info")
return &common.HTTPHandler{Handler: newServer}
}
// GetNodeVersionReply are the results from calling GetNodeVersion
type GetNodeVersionReply struct {
Version string `json:"version"`
}
// GetNodeVersion returns the version this node is running
func (service *Info) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error {
service.log.Info("Info: GetNodeVersion called")
reply.Version = service.version.String()
return nil
}
// GetNodeIDReply are the results from calling GetNodeID
type GetNodeIDReply struct {
NodeID ids.ShortID `json:"nodeID"`
}
// GetNodeID returns the node ID of this node
func (service *Info) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error {
service.log.Info("Info: GetNodeID called")
reply.NodeID = service.nodeID
return nil
}
// GetNetworkIDReply are the results from calling GetNetworkID
type GetNetworkIDReply struct {
NetworkID cjson.Uint32 `json:"networkID"`
}
// GetNetworkID returns the network ID this node is running on
func (service *Info) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error {
service.log.Info("Info: GetNetworkID called")
reply.NetworkID = cjson.Uint32(service.networkID)
return nil
}
// GetNetworkNameReply is the result from calling GetNetworkName
type GetNetworkNameReply struct {
NetworkName string `json:"networkName"`
}
// GetNetworkName returns the network name this node is running on
func (service *Info) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error {
service.log.Info("Info: GetNetworkName called")
reply.NetworkName = genesis.NetworkName(service.networkID)
return nil
}
// GetBlockchainIDArgs are the arguments for calling GetBlockchainID
type GetBlockchainIDArgs struct {
Alias string `json:"alias"`
}
// GetBlockchainIDReply are the results from calling GetBlockchainID
type GetBlockchainIDReply struct {
BlockchainID string `json:"blockchainID"`
}
// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied
func (service *Info) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
service.log.Info("Info: GetBlockchainID called")
bID, err := service.chainManager.Lookup(args.Alias)
reply.BlockchainID = bID.String()
return err
}
// PeersReply are the results from calling Peers
type PeersReply struct {
Peers []network.PeerID `json:"peers"`
}
// Peers returns the list of current validators
func (service *Info) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error {
service.log.Info("Info: Peers called")
reply.Peers = service.networking.Peers()
return nil
}
// IsBootstrappedArgs are the arguments for calling IsBootstrapped
type IsBootstrappedArgs struct {
// Alias of the chain
// Can also be the string representation of the chain's ID
Chain string `json:"chain"`
}
// IsBootstrappedResponse are the results from calling IsBootstrapped
type IsBootstrappedResponse struct {
// True iff the chain exists and is done bootstrapping
IsBootstrapped bool `json:"isBootstrapped"`
}
// IsBootstrapped returns nil and sets [reply.IsBootstrapped] == true iff [args.Chain] exists and is done bootstrapping
// Returns an error if the chain doesn't exist
func (service *Info) IsBootstrapped(_ *http.Request, args *IsBootstrappedArgs, reply *IsBootstrappedResponse) error {
service.log.Info("Info: IsBootstrapped called")
if args.Chain == "" {
return fmt.Errorf("argument 'chain' not given")
}
chainID, err := service.chainManager.Lookup(args.Chain)
if err != nil {
return fmt.Errorf("there is no chain with alias/ID '%s'", args.Chain)
}
reply.IsBootstrapped = service.chainManager.IsBootstrapped(chainID)
return nil
}

View File

@ -61,6 +61,7 @@ type PublishBlockchainReply struct {
// PublishBlockchain publishes the finalized accepted transactions from the blockchainID over the IPC
func (ipc *IPCs) PublishBlockchain(r *http.Request, args *PublishBlockchainArgs, reply *PublishBlockchainReply) error {
ipc.log.Info("IPCs: PublishBlockchain called with BlockchainID: %s", args.BlockchainID)
chainID, err := ipc.chainManager.Lookup(args.BlockchainID)
if err != nil {
ipc.log.Error("unknown blockchainID: %s", err)
@ -116,6 +117,7 @@ type UnpublishBlockchainReply struct {
// UnpublishBlockchain closes publishing of a blockchainID
func (ipc *IPCs) UnpublishBlockchain(r *http.Request, args *UnpublishBlockchainArgs, reply *UnpublishBlockchainReply) error {
ipc.log.Info("IPCs: UnpublishBlockchain called with BlockchainID: %s", args.BlockchainID)
chainID, err := ipc.chainManager.Lookup(args.BlockchainID)
if err != nil {
ipc.log.Error("unknown blockchainID %s: %s", args.BlockchainID, err)

View File

@ -8,29 +8,41 @@ import (
"fmt"
"net/http"
"sync"
"testing"
"github.com/gorilla/rpc/v2"
zxcvbn "github.com/nbutton23/zxcvbn-go"
"github.com/ava-labs/gecko/chains/atomic"
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/database/encdb"
"github.com/ava-labs/gecko/database/memdb"
"github.com/ava-labs/gecko/database/prefixdb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/ava-labs/gecko/utils/codec"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/vms/components/codec"
jsoncodec "github.com/ava-labs/gecko/utils/json"
zxcvbn "github.com/nbutton23/zxcvbn-go"
)
const (
// maxUserPassLen is the maximum length of the username or password allowed
maxUserPassLen = 1024
// requiredPassScore defines the score a password must achieve to be accepted
// as a password with strong characteristics by the zxcvbn package
// maxCheckedPassLen limits the length of the password that should be
// strength checked.
//
// As per issue https://github.com/ava-labs/gecko/issues/195 it was found
// the longer the length of password the slower zxcvbn.PasswordStrength()
// performs. To avoid performance issues, and a DoS vector, we only check
// the first 50 characters of the password.
maxCheckedPassLen = 50
// requiredPassScore defines the score a password must achieve to be
// accepted as a password with strong characteristics by the zxcvbn package
//
// The scoring mechanism defined is as follows;
//
@ -135,37 +147,11 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("CreateUser called with %.*s", maxUserPassLen, args.Username)
if len(args.Username) > maxUserPassLen || len(args.Password) > maxUserPassLen {
return errUserPassMaxLength
}
if args.Username == "" {
return errEmptyUsername
}
if usr, err := ks.getUser(args.Username); err == nil || usr != nil {
return fmt.Errorf("user already exists: %s", args.Username)
}
if zxcvbn.PasswordStrength(args.Password, nil).Score < requiredPassScore {
return errWeakPassword
}
usr := &User{}
if err := usr.Initialize(args.Password); err != nil {
ks.log.Info("Keystore: CreateUser called with %.*s", maxUserPassLen, args.Username)
if err := ks.AddUser(args.Username, args.Password); err != nil {
return err
}
usrBytes, err := ks.codec.Marshal(usr)
if err != nil {
return err
}
if err := ks.userDB.Put([]byte(args.Username), usrBytes); err != nil {
return err
}
ks.users[args.Username] = usr
reply.Success = true
return nil
}
@ -183,7 +169,7 @@ func (ks *Keystore) ListUsers(_ *http.Request, args *ListUsersArgs, reply *ListU
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("ListUsers called")
ks.log.Info("Keystore: ListUsers called")
reply.Users = []string{}
@ -211,7 +197,7 @@ func (ks *Keystore) ExportUser(_ *http.Request, args *ExportUserArgs, reply *Exp
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("ExportUser called for %s", args.Username)
ks.log.Info("Keystore: ExportUser called for %s", args.Username)
usr, err := ks.getUser(args.Username)
if err != nil {
@ -264,7 +250,7 @@ func (ks *Keystore) ImportUser(r *http.Request, args *ImportUserArgs, reply *Imp
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("ImportUser called for %s", args.Username)
ks.log.Info("Keystore: ImportUser called for %s", args.Username)
if args.Username == "" {
return errEmptyUsername
@ -324,7 +310,7 @@ func (ks *Keystore) DeleteUser(_ *http.Request, args *DeleteUserArgs, reply *Del
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("DeleteUser called with %s", args.Username)
ks.log.Info("Keystore: DeleteUser called with %s", args.Username)
if args.Username == "" {
return errEmptyUsername
@ -403,3 +389,51 @@ func (ks *Keystore) GetDatabase(bID ids.ID, username, password string) (database
return encDB, nil
}
// AddUser attempts to register this username and password as a new user of the
// keystore.
func (ks *Keystore) AddUser(username, password string) error {
if len(username) > maxUserPassLen || len(password) > maxUserPassLen {
return errUserPassMaxLength
}
if username == "" {
return errEmptyUsername
}
if usr, err := ks.getUser(username); err == nil || usr != nil {
return fmt.Errorf("user already exists: %s", username)
}
checkPass := password
if len(password) > maxCheckedPassLen {
checkPass = password[:maxCheckedPassLen]
}
if zxcvbn.PasswordStrength(checkPass, nil).Score < requiredPassScore {
return errWeakPassword
}
usr := &User{}
if err := usr.Initialize(password); err != nil {
return err
}
usrBytes, err := ks.codec.Marshal(usr)
if err != nil {
return err
}
if err := ks.userDB.Put([]byte(username), usrBytes); err != nil {
return err
}
ks.users[username] = usr
return nil
}
// CreateTestKeystore returns a new keystore that can be utilized for testing
func CreateTestKeystore(t *testing.T) *Keystore {
ks := &Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
return ks
}

View File

@ -10,9 +10,7 @@ import (
"reflect"
"testing"
"github.com/ava-labs/gecko/database/memdb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/logging"
)
var (
@ -22,8 +20,7 @@ var (
)
func TestServiceListNoUsers(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
reply := ListUsersReply{}
if err := ks.ListUsers(nil, &ListUsersArgs{}, &reply); err != nil {
@ -35,8 +32,7 @@ func TestServiceListNoUsers(t *testing.T) {
}
func TestServiceCreateUser(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -75,8 +71,7 @@ func genStr(n int) string {
// TestServiceCreateUserArgsChecks generates excessively long usernames or
// passwords to assure the santity checks on string length are not exceeded
func TestServiceCreateUserArgsCheck(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -117,8 +112,7 @@ func TestServiceCreateUserArgsCheck(t *testing.T) {
// TestServiceCreateUserWeakPassword tests creating a new user with a weak
// password to ensure the password strength check is working
func TestServiceCreateUserWeakPassword(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -138,8 +132,7 @@ func TestServiceCreateUserWeakPassword(t *testing.T) {
}
func TestServiceCreateDuplicate(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -166,8 +159,7 @@ func TestServiceCreateDuplicate(t *testing.T) {
}
func TestServiceCreateUserNoName(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
reply := CreateUserReply{}
if err := ks.CreateUser(nil, &CreateUserArgs{
@ -178,8 +170,7 @@ func TestServiceCreateUserNoName(t *testing.T) {
}
func TestServiceUseBlockchainDB(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -218,8 +209,7 @@ func TestServiceUseBlockchainDB(t *testing.T) {
}
func TestServiceExportImport(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -252,8 +242,7 @@ func TestServiceExportImport(t *testing.T) {
t.Fatal(err)
}
newKS := Keystore{}
newKS.Initialize(logging.NoLog{}, memdb.New())
newKS := CreateTestKeystore(t)
{
reply := ImportUserReply{}
@ -358,11 +347,10 @@ func TestServiceDeleteUser(t *testing.T) {
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
if tt.setup != nil {
if err := tt.setup(&ks); err != nil {
if err := tt.setup(ks); err != nil {
t.Fatalf("failed to create user setup in keystore: %v", err)
}
}

View File

@ -4,9 +4,10 @@
package metrics
import (
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/ava-labs/gecko/snow/engine/common"
)
// NewService returns a new prometheus service

8
cache/lru_cache.go vendored
View File

@ -10,6 +10,10 @@ import (
"github.com/ava-labs/gecko/ids"
)
const (
minCacheSize = 32
)
type entry struct {
Key ids.ID
Value interface{}
@ -59,7 +63,7 @@ func (c *LRU) Flush() {
func (c *LRU) init() {
if c.entryMap == nil {
c.entryMap = make(map[[32]byte]*list.Element)
c.entryMap = make(map[[32]byte]*list.Element, minCacheSize)
}
if c.entryList == nil {
c.entryList = list.New()
@ -134,6 +138,6 @@ func (c *LRU) evict(key ids.ID) {
func (c *LRU) flush() {
c.init()
c.entryMap = make(map[[32]byte]*list.Element)
c.entryMap = make(map[[32]byte]*list.Element, minCacheSize)
c.entryList = list.New()
}

53
cache/lru_cache_benchmark_test.go vendored Normal file
View File

@ -0,0 +1,53 @@
package cache
import (
"crypto/rand"
"testing"
"github.com/ava-labs/gecko/ids"
)
func BenchmarkLRUCachePutSmall(b *testing.B) {
smallLen := 5
cache := &LRU{Size: smallLen}
for n := 0; n < b.N; n++ {
for i := 0; i < smallLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
cache.Put(ids.NewID(idBytes), n)
}
b.StopTimer()
cache.Flush()
b.StartTimer()
}
}
func BenchmarkLRUCachePutMedium(b *testing.B) {
mediumLen := 250
cache := &LRU{Size: mediumLen}
for n := 0; n < b.N; n++ {
for i := 0; i < mediumLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
cache.Put(ids.NewID(idBytes), n)
}
b.StopTimer()
cache.Flush()
b.StartTimer()
}
}
func BenchmarkLRUCachePutLarge(b *testing.B) {
largeLen := 10000
cache := &LRU{Size: largeLen}
for n := 0; n < b.N; n++ {
for i := 0; i < largeLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
cache.Put(ids.NewID(idBytes), n)
}
b.StopTimer()
cache.Flush()
b.StartTimer()
}
}

View File

@ -10,9 +10,9 @@ import (
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/database/prefixdb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/codec"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/vms/components/codec"
)
type rcLock struct {

View File

@ -76,6 +76,9 @@ type Manager interface {
// Add an alias to a chain
Alias(ids.ID, string) error
// Returns true iff the chain with the given ID exists and is finished bootstrapping
IsBootstrapped(ids.ID) bool
Shutdown()
}
@ -114,6 +117,10 @@ type manager struct {
keystore *keystore.Keystore
sharedMemory *atomic.SharedMemory
// Key: Chain's ID
// Value: The chain
chains map[[32]byte]*router.Handler
unblocked bool
blockedChains []ChainParameters
}
@ -131,7 +138,7 @@ func New(
decisionEvents *triggers.EventDispatcher,
consensusEvents *triggers.EventDispatcher,
db database.Database,
router router.Router,
rtr router.Router,
net network.Network,
consensusParams avacon.Parameters,
validators validators.Manager,
@ -145,7 +152,7 @@ func New(
timeoutManager.Initialize(requestTimeout)
go log.RecoverAndPanic(timeoutManager.Dispatch)
router.Initialize(log, &timeoutManager, gossipFrequency, shutdownTimeout)
rtr.Initialize(log, &timeoutManager, gossipFrequency, shutdownTimeout)
m := &manager{
stakingEnabled: stakingEnabled,
@ -155,7 +162,7 @@ func New(
decisionEvents: decisionEvents,
consensusEvents: consensusEvents,
db: db,
chainRouter: router,
chainRouter: rtr,
net: net,
timeoutManager: &timeoutManager,
consensusParams: consensusParams,
@ -165,6 +172,7 @@ func New(
server: server,
keystore: keystore,
sharedMemory: sharedMemory,
chains: make(map[[32]byte]*router.Handler),
}
m.Initialize()
return m
@ -454,7 +462,7 @@ func (m *manager) createAvalancheChain(
eng: &engine,
})
}
m.chains[ctx.ChainID.Key()] = handler
return nil
}
@ -546,9 +554,20 @@ func (m *manager) createSnowmanChain(
eng: &engine,
})
}
m.chains[ctx.ChainID.Key()] = handler
return nil
}
func (m *manager) IsBootstrapped(id ids.ID) bool {
chain, exists := m.chains[id.Key()]
if !exists {
return false
}
chain.Context().Lock.Lock()
defer chain.Context().Lock.Unlock()
return chain.Engine().IsBootstrapped()
}
// Shutdown stops all the chains
func (m *manager) Shutdown() { m.chainRouter.Shutdown() }

View File

@ -35,3 +35,6 @@ func (mm MockManager) Alias(ids.ID, string) error { return nil }
// Shutdown ...
func (mm MockManager) Shutdown() {}
// IsBootstrapped ...
func (mm MockManager) IsBootstrapped(ids.ID) bool { return false }

14
database/common.go Normal file
View File

@ -0,0 +1,14 @@
package database
const (
// MaxExcessCapacityFactor ...
// If, when a batch is reset, the cap(batch)/len(batch) > MaxExcessCapacityFactor,
// the underlying array's capacity will be reduced by a factor of capacityReductionFactor.
// Higher value for MaxExcessCapacityFactor --> less aggressive array downsizing --> less memory allocations
// but more unnecessary data in the underlying array that can't be garbage collected.
// Higher value for CapacityReductionFactor --> more aggressive array downsizing --> more memory allocations
// but less unnecessary data in the underlying array that can't be garbage collected.
MaxExcessCapacityFactor = 4
// CapacityReductionFactor ...
CapacityReductionFactor = 2
)

View File

@ -13,8 +13,8 @@ import (
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/database/nodb"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/codec"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/vms/components/codec"
)
// Database encrypts all values that are provided
@ -201,7 +201,11 @@ func (b *batch) Write() error {
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.writes = b.writes[:0]
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.Batch.Reset()
}

View File

@ -6,14 +6,15 @@ package leveldb
import (
"bytes"
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/utils"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/utils"
)
const (

View File

@ -13,8 +13,10 @@ import (
"github.com/ava-labs/gecko/utils"
)
// DefaultSize is the default initial size of the memory database
const DefaultSize = 1 << 10
const (
// DefaultSize is the default initial size of the memory database
DefaultSize = 1 << 10
)
// Database is an ephemeral key-value store that implements the Database
// interface.
@ -191,7 +193,11 @@ func (b *batch) Write() error {
// Reset implements the Batch interface
func (b *batch) Reset() {
b.writes = b.writes[:0]
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.size = 0
}

View File

@ -17,7 +17,7 @@ func (*Database) Has([]byte) (bool, error) { return false, database.ErrClosed }
func (*Database) Get([]byte) ([]byte, error) { return nil, database.ErrClosed }
// Put returns nil
func (*Database) Put(_ []byte, _ []byte) error { return database.ErrClosed }
func (*Database) Put(_, _ []byte) error { return database.ErrClosed }
// Delete returns nil
func (*Database) Delete([]byte) error { return database.ErrClosed }

View File

@ -199,7 +199,11 @@ func (b *batch) Write() error {
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.writes = b.writes[:0]
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.Batch.Reset()
}

View File

@ -27,7 +27,7 @@ func NewClient(client rpcdbproto.DatabaseClient) *DatabaseClient {
return &DatabaseClient{client: client}
}
// Has returns false, nil
// Has attempts to return if the database has a key with the provided value.
func (db *DatabaseClient) Has(key []byte) (bool, error) {
resp, err := db.client.Has(context.Background(), &rpcdbproto.HasRequest{
Key: key,
@ -38,7 +38,7 @@ func (db *DatabaseClient) Has(key []byte) (bool, error) {
return resp.Has, nil
}
// Get returns nil, error
// Get attempts to return the value that was mapped to the key that was provided
func (db *DatabaseClient) Get(key []byte) ([]byte, error) {
resp, err := db.client.Get(context.Background(), &rpcdbproto.GetRequest{
Key: key,
@ -49,7 +49,7 @@ func (db *DatabaseClient) Get(key []byte) ([]byte, error) {
return resp.Value, nil
}
// Put returns nil
// Put attempts to set the value this key maps to
func (db *DatabaseClient) Put(key, value []byte) error {
_, err := db.client.Put(context.Background(), &rpcdbproto.PutRequest{
Key: key,
@ -58,7 +58,7 @@ func (db *DatabaseClient) Put(key, value []byte) error {
return updateError(err)
}
// Delete returns nil
// Delete attempts to remove any mapping from the key
func (db *DatabaseClient) Delete(key []byte) error {
_, err := db.client.Delete(context.Background(), &rpcdbproto.DeleteRequest{
Key: key,
@ -99,7 +99,7 @@ func (db *DatabaseClient) NewIteratorWithStartAndPrefix(start, prefix []byte) da
}
}
// Stat returns an error
// Stat attempts to return the statistic of this database
func (db *DatabaseClient) Stat(property string) (string, error) {
resp, err := db.client.Stat(context.Background(), &rpcdbproto.StatRequest{
Property: property,
@ -110,7 +110,7 @@ func (db *DatabaseClient) Stat(property string) (string, error) {
return resp.Stat, nil
}
// Compact returns nil
// Compact attempts to optimize the space utilization in the provided range
func (db *DatabaseClient) Compact(start, limit []byte) error {
_, err := db.client.Compact(context.Background(), &rpcdbproto.CompactRequest{
Start: start,
@ -119,7 +119,7 @@ func (db *DatabaseClient) Compact(start, limit []byte) error {
return updateError(err)
}
// Close returns nil
// Close attempts to close the database
func (db *DatabaseClient) Close() error {
_, err := db.client.Close(context.Background(), &rpcdbproto.CloseRequest{})
return updateError(err)
@ -180,7 +180,11 @@ func (b *batch) Write() error {
}
func (b *batch) Reset() {
b.writes = b.writes[:0]
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.size = 0
}
@ -207,7 +211,8 @@ type iterator struct {
err error
}
// Next returns false
// Next attempts to move the iterator to the next element and returns if this
// succeeded
func (it *iterator) Next() bool {
resp, err := it.db.client.IteratorNext(context.Background(), &rpcdbproto.IteratorNextRequest{
Id: it.id,
@ -221,7 +226,7 @@ func (it *iterator) Next() bool {
return resp.FoundNext
}
// Error returns any errors
// Error returns any that occurred while iterating
func (it *iterator) Error() error {
if it.err != nil {
return it.err
@ -234,19 +239,21 @@ func (it *iterator) Error() error {
return it.err
}
// Key returns nil
// Key returns the key of the current element
func (it *iterator) Key() []byte { return it.key }
// Value returns nil
// Value returns the value of the current element
func (it *iterator) Value() []byte { return it.value }
// Release does nothing
// Release frees any resources held by the iterator
func (it *iterator) Release() {
it.db.client.IteratorRelease(context.Background(), &rpcdbproto.IteratorReleaseRequest{
Id: it.id,
})
}
// updateError sets the error value to the errors required by the Database
// interface
func updateError(err error) error {
if err == nil {
return nil

View File

@ -34,16 +34,16 @@ func NewServer(db database.Database) *DatabaseServer {
}
}
// Has ...
// Has delegates the Has call to the managed database and returns the result
func (db *DatabaseServer) Has(_ context.Context, req *rpcdbproto.HasRequest) (*rpcdbproto.HasResponse, error) {
has, err := db.db.Has(req.Key)
if err != nil {
return nil, err
}
return &rpcdbproto.HasResponse{Has: has}, nil
return &rpcdbproto.HasResponse{Has: has}, err
}
// Get ...
// Get delegates the Get call to the managed database and returns the result
func (db *DatabaseServer) Get(_ context.Context, req *rpcdbproto.GetRequest) (*rpcdbproto.GetResponse, error) {
value, err := db.db.Get(req.Key)
if err != nil {
@ -52,17 +52,18 @@ func (db *DatabaseServer) Get(_ context.Context, req *rpcdbproto.GetRequest) (*r
return &rpcdbproto.GetResponse{Value: value}, nil
}
// Put ...
// Put delegates the Put call to the managed database and returns the result
func (db *DatabaseServer) Put(_ context.Context, req *rpcdbproto.PutRequest) (*rpcdbproto.PutResponse, error) {
return &rpcdbproto.PutResponse{}, db.db.Put(req.Key, req.Value)
}
// Delete ...
// Delete delegates the Delete call to the managed database and returns the
// result
func (db *DatabaseServer) Delete(_ context.Context, req *rpcdbproto.DeleteRequest) (*rpcdbproto.DeleteResponse, error) {
return &rpcdbproto.DeleteResponse{}, db.db.Delete(req.Key)
}
// Stat ...
// Stat delegates the Stat call to the managed database and returns the result
func (db *DatabaseServer) Stat(_ context.Context, req *rpcdbproto.StatRequest) (*rpcdbproto.StatResponse, error) {
stat, err := db.db.Stat(req.Property)
if err != nil {
@ -71,17 +72,19 @@ func (db *DatabaseServer) Stat(_ context.Context, req *rpcdbproto.StatRequest) (
return &rpcdbproto.StatResponse{Stat: stat}, nil
}
// Compact ...
// Compact delegates the Compact call to the managed database and returns the
// result
func (db *DatabaseServer) Compact(_ context.Context, req *rpcdbproto.CompactRequest) (*rpcdbproto.CompactResponse, error) {
return &rpcdbproto.CompactResponse{}, db.db.Compact(req.Start, req.Limit)
}
// Close ...
func (db *DatabaseServer) Close(_ context.Context, _ *rpcdbproto.CloseRequest) (*rpcdbproto.CloseResponse, error) {
// Close delegates the Close call to the managed database and returns the result
func (db *DatabaseServer) Close(context.Context, *rpcdbproto.CloseRequest) (*rpcdbproto.CloseResponse, error) {
return &rpcdbproto.CloseResponse{}, db.db.Close()
}
// WriteBatch ...
// WriteBatch takes in a set of key-value pairs and atomically writes them to
// the internal database
func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbproto.WriteBatchRequest) (*rpcdbproto.WriteBatchResponse, error) {
db.batch.Reset()
@ -100,7 +103,8 @@ func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbproto.WriteBat
return &rpcdbproto.WriteBatchResponse{}, db.batch.Write()
}
// NewIteratorWithStartAndPrefix ...
// NewIteratorWithStartAndPrefix allocates an iterator and returns the iterator
// ID
func (db *DatabaseServer) NewIteratorWithStartAndPrefix(_ context.Context, req *rpcdbproto.NewIteratorWithStartAndPrefixRequest) (*rpcdbproto.NewIteratorWithStartAndPrefixResponse, error) {
id := db.nextIteratorID
it := db.db.NewIteratorWithStartAndPrefix(req.Start, req.Prefix)
@ -110,7 +114,7 @@ func (db *DatabaseServer) NewIteratorWithStartAndPrefix(_ context.Context, req *
return &rpcdbproto.NewIteratorWithStartAndPrefixResponse{Id: id}, nil
}
// IteratorNext ...
// IteratorNext attempts to call next on the requested iterator
func (db *DatabaseServer) IteratorNext(_ context.Context, req *rpcdbproto.IteratorNextRequest) (*rpcdbproto.IteratorNextResponse, error) {
it, exists := db.iterators[req.Id]
if !exists {
@ -123,7 +127,7 @@ func (db *DatabaseServer) IteratorNext(_ context.Context, req *rpcdbproto.Iterat
}, nil
}
// IteratorError ...
// IteratorError attempts to report any errors that occurred during iteration
func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbproto.IteratorErrorRequest) (*rpcdbproto.IteratorErrorResponse, error) {
it, exists := db.iterators[req.Id]
if !exists {
@ -132,7 +136,7 @@ func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbproto.Itera
return &rpcdbproto.IteratorErrorResponse{}, it.Error()
}
// IteratorRelease ...
// IteratorRelease attempts to release the resources allocated to an iterator
func (db *DatabaseServer) IteratorRelease(_ context.Context, req *rpcdbproto.IteratorReleaseRequest) (*rpcdbproto.IteratorReleaseResponse, error) {
it, exists := db.iterators[req.Id]
if exists {

View File

@ -18,9 +18,10 @@ import (
// database, writing changes to the underlying database only when commit is
// called.
type Database struct {
lock sync.RWMutex
mem map[string]valueDelete
db database.Database
lock sync.RWMutex
mem map[string]valueDelete
db database.Database
batch database.Batch
}
type valueDelete struct {
@ -31,8 +32,9 @@ type valueDelete struct {
// New returns a new prefixed database
func New(db database.Database) *Database {
return &Database{
mem: make(map[string]valueDelete, memdb.DefaultSize),
db: db,
mem: make(map[string]valueDelete, memdb.DefaultSize),
db: db,
batch: db.NewBatch(),
}
}
@ -169,6 +171,7 @@ func (db *Database) SetDatabase(newDB database.Database) error {
}
db.db = newDB
db.batch = newDB.NewBatch()
return nil
}
@ -192,6 +195,7 @@ func (db *Database) Commit() error {
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
db.abort()
return nil
}
@ -206,7 +210,10 @@ func (db *Database) Abort() {
func (db *Database) abort() { db.mem = make(map[string]valueDelete, memdb.DefaultSize) }
// CommitBatch returns a batch that will commit all pending writes to the underlying database
// CommitBatch returns a batch that contains all uncommitted puts/deletes.
// Calling Write() on the returned batch causes the puts/deletes to be
// written to the underlying database. The returned batch should be written before
// future calls to this DB unless the batch will never be written.
func (db *Database) CommitBatch() (database.Batch, error) {
db.lock.Lock()
defer db.lock.Unlock()
@ -214,26 +221,25 @@ func (db *Database) CommitBatch() (database.Batch, error) {
return db.commitBatch()
}
// Put all of the puts/deletes in memory into db.batch
// and return the batch
func (db *Database) commitBatch() (database.Batch, error) {
if db.mem == nil {
return nil, database.ErrClosed
}
batch := db.db.NewBatch()
db.batch.Reset()
for key, value := range db.mem {
if value.delete {
if err := batch.Delete([]byte(key)); err != nil {
if err := db.batch.Delete([]byte(key)); err != nil {
return nil, err
}
} else if err := batch.Put([]byte(key), value.value); err != nil {
} else if err := db.batch.Put([]byte(key), value.value); err != nil {
return nil, err
}
}
if err := batch.Write(); err != nil {
return nil, err
}
return batch, nil
return db.batch, nil
}
// Close implements the database.Database interface
@ -244,6 +250,7 @@ func (db *Database) Close() error {
if db.mem == nil {
return database.ErrClosed
}
db.batch = nil
db.mem = nil
db.db = nil
return nil
@ -298,7 +305,11 @@ func (b *batch) Write() error {
// Reset implements the Database interface
func (b *batch) Reset() {
b.writes = b.writes[:0]
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.size = 0
}

View File

@ -299,6 +299,10 @@ func TestCommitBatch(t *testing.T) {
if err := db.Put(key1, value1); err != nil {
t.Fatalf("Unexpected error on db.Put: %s", err)
} else if has, err := baseDB.Has(key1); err != nil {
t.Fatalf("Unexpected error on db.Has: %s", err)
} else if has {
t.Fatalf("Unexpected result of db.Has: %v", has)
}
batch, err := db.CommitBatch()
@ -307,7 +311,11 @@ func TestCommitBatch(t *testing.T) {
}
db.Abort()
if err := batch.Write(); err != nil {
if has, err := db.Has(key1); err != nil {
t.Fatalf("Unexpected error on db.Has: %s", err)
} else if has {
t.Fatalf("Unexpected result of db.Has: %v", has)
} else if err := batch.Write(); err != nil {
t.Fatalf("Unexpected error on batch.Write: %s", err)
}

11
docker-compose.yml Normal file
View File

@ -0,0 +1,11 @@
version: "3"
services:
ava_node:
build: .
image: poanetwork/ava-node
volumes:
- ~/.gecko/db:/db
- ./nginx.template:/etc/nginx/conf.d/nginx.template
ports:
- ${NGINX_PORT}:${NGINX_PORT}
env_file: .env

8
entrypoint.sh Normal file
View File

@ -0,0 +1,8 @@
#!/bin/bash
echo "RECREATE NGINX CONFIG"
envsubst < /etc/nginx/conf.d/nginx.template > /etc/nginx/conf.d/default.conf
echo "START NGINX"
service nginx start
echo "START NODE WITH ARGS ${AVA_CMD_OPTIONS}"
./build/ava ${AVA_CMD_OPTIONS}

View File

@ -18,27 +18,27 @@ import (
// Aliases returns the default aliases based on the network ID
func Aliases(networkID uint32) (map[string][]string, map[[32]byte][]string, map[[32]byte][]string, error) {
generalAliases := map[string][]string{
"vm/" + platformvm.ID.String(): []string{"vm/platform"},
"vm/" + avm.ID.String(): []string{"vm/avm"},
"vm/" + EVMID.String(): []string{"vm/evm"},
"vm/" + spdagvm.ID.String(): []string{"vm/spdag"},
"vm/" + spchainvm.ID.String(): []string{"vm/spchain"},
"vm/" + timestampvm.ID.String(): []string{"vm/timestamp"},
"bc/" + ids.Empty.String(): []string{"P", "platform", "bc/P", "bc/platform"},
"vm/" + platformvm.ID.String(): {"vm/platform"},
"vm/" + avm.ID.String(): {"vm/avm"},
"vm/" + EVMID.String(): {"vm/evm"},
"vm/" + spdagvm.ID.String(): {"vm/spdag"},
"vm/" + spchainvm.ID.String(): {"vm/spchain"},
"vm/" + timestampvm.ID.String(): {"vm/timestamp"},
"bc/" + ids.Empty.String(): {"P", "platform", "bc/P", "bc/platform"},
}
chainAliases := map[[32]byte][]string{
ids.Empty.Key(): []string{"P", "platform"},
ids.Empty.Key(): {"P", "platform"},
}
vmAliases := map[[32]byte][]string{
platformvm.ID.Key(): []string{"platform"},
avm.ID.Key(): []string{"avm"},
EVMID.Key(): []string{"evm"},
spdagvm.ID.Key(): []string{"spdag"},
spchainvm.ID.Key(): []string{"spchain"},
timestampvm.ID.Key(): []string{"timestamp"},
secp256k1fx.ID.Key(): []string{"secp256k1fx"},
nftfx.ID.Key(): []string{"nftfx"},
propertyfx.ID.Key(): []string{"propertyfx"},
platformvm.ID.Key(): {"platform"},
avm.ID.Key(): {"avm"},
EVMID.Key(): {"evm"},
spdagvm.ID.Key(): {"spdag"},
spchainvm.ID.Key(): {"spchain"},
timestampvm.ID.Key(): {"timestamp"},
secp256k1fx.ID.Key(): {"secp256k1fx"},
nftfx.ID.Key(): {"nftfx"},
propertyfx.ID.Key(): {"propertyfx"},
}
genesisBytes, err := Genesis(networkID)

View File

@ -50,6 +50,122 @@ func (c *Config) init() error {
// Hard coded genesis constants
var (
EverestConfig = Config{
MintAddresses: []string{
"95YUFjhDG892VePMzpwKF9JzewGKvGRi3",
},
FundedAddresses: []string{
"9uKvvA7E35QCwLvAaohXTCfFejbf3Rv17",
"JLrYNMYXANGj43BfWXBxMMAEenUBp1Sbn",
"7TUTzwrU6nbZtWHjTHEpdneUvjKBxb3EM",
"77mPUXBdQKwQpPoX6rckCZGLGGdkuG1G6",
"4gGWdFZ4Gax1B466YKXyKRRpWLb42Afdt",
"CKTkzAPsRxCreyiDTnjGxLmjMarxF28fi",
"4ABm9gFHVtsNdcKSd1xsacFkGneSgzpaa",
"DpL8PTsrjtLzv5J8LL3D2A6YcnCTqrNH9",
"ZdhZv6oZrmXLyFDy6ovXAu6VxmbTsT2h",
"6cesTteH62Y5mLoDBUASaBvCXuL2AthL",
},
StakerIDs: []string{
"LQwRLm4cbJ7T2kxcxp4uXCU5XD8DFrE1C",
"hArafGhY2HFTbwaaVh1CSCUCUCiJ2Vfb",
"2m38qc95mhHXtrhjyGbe7r2NhniqHHJRB",
"4QBwET5o8kUhvt9xArhir4d3R25CtmZho",
"NpagUxt6KQiwPch9Sd4osv8kD1TZnkjdk",
},
EVMBytes: []byte{
0x7b, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x22, 0x3a, 0x7b, 0x22, 0x63, 0x68, 0x61, 0x69,
0x6e, 0x49, 0x64, 0x22, 0x3a, 0x34, 0x33, 0x31,
0x31, 0x30, 0x2c, 0x22, 0x68, 0x6f, 0x6d, 0x65,
0x73, 0x74, 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f,
0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64,
0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x42, 0x6c,
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
0x64, 0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x53,
0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a,
0x74, 0x72, 0x75, 0x65, 0x2c, 0x22, 0x65, 0x69,
0x70, 0x31, 0x35, 0x30, 0x42, 0x6c, 0x6f, 0x63,
0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69,
0x70, 0x31, 0x35, 0x30, 0x48, 0x61, 0x73, 0x68,
0x22, 0x3a, 0x22, 0x30, 0x78, 0x32, 0x30, 0x38,
0x36, 0x37, 0x39, 0x39, 0x61, 0x65, 0x65, 0x62,
0x65, 0x61, 0x65, 0x31, 0x33, 0x35, 0x63, 0x32,
0x34, 0x36, 0x63, 0x36, 0x35, 0x30, 0x32, 0x31,
0x63, 0x38, 0x32, 0x62, 0x34, 0x65, 0x31, 0x35,
0x61, 0x32, 0x63, 0x34, 0x35, 0x31, 0x33, 0x34,
0x30, 0x39, 0x39, 0x33, 0x61, 0x61, 0x63, 0x66,
0x64, 0x32, 0x37, 0x35, 0x31, 0x38, 0x38, 0x36,
0x35, 0x31, 0x34, 0x66, 0x30, 0x22, 0x2c, 0x22,
0x65, 0x69, 0x70, 0x31, 0x35, 0x35, 0x42, 0x6c,
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
0x65, 0x69, 0x70, 0x31, 0x35, 0x38, 0x42, 0x6c,
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
0x62, 0x79, 0x7a, 0x61, 0x6e, 0x74, 0x69, 0x75,
0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a,
0x30, 0x2c, 0x22, 0x63, 0x6f, 0x6e, 0x73, 0x74,
0x61, 0x6e, 0x74, 0x69, 0x6e, 0x6f, 0x70, 0x6c,
0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a,
0x30, 0x2c, 0x22, 0x70, 0x65, 0x74, 0x65, 0x72,
0x73, 0x62, 0x75, 0x72, 0x67, 0x42, 0x6c, 0x6f,
0x63, 0x6b, 0x22, 0x3a, 0x30, 0x7d, 0x2c, 0x22,
0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x3a, 0x22,
0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22,
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74,
0x61, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30,
0x22, 0x2c, 0x22, 0x67, 0x61, 0x73, 0x4c, 0x69,
0x6d, 0x69, 0x74, 0x22, 0x3a, 0x22, 0x30, 0x78,
0x35, 0x66, 0x35, 0x65, 0x31, 0x30, 0x30, 0x22,
0x2c, 0x22, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63,
0x75, 0x6c, 0x74, 0x79, 0x22, 0x3a, 0x22, 0x30,
0x78, 0x30, 0x22, 0x2c, 0x22, 0x6d, 0x69, 0x78,
0x48, 0x61, 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30,
0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x22, 0x2c, 0x22, 0x63, 0x6f, 0x69, 0x6e,
0x62, 0x61, 0x73, 0x65, 0x22, 0x3a, 0x22, 0x30,
0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x22, 0x2c, 0x22, 0x61, 0x6c, 0x6c, 0x6f,
0x63, 0x22, 0x3a, 0x7b, 0x22, 0x35, 0x37, 0x32,
0x66, 0x34, 0x64, 0x38, 0x30, 0x66, 0x31, 0x30,
0x66, 0x36, 0x36, 0x33, 0x62, 0x35, 0x30, 0x34,
0x39, 0x66, 0x37, 0x38, 0x39, 0x35, 0x34, 0x36,
0x66, 0x32, 0x35, 0x66, 0x37, 0x30, 0x62, 0x62,
0x36, 0x32, 0x61, 0x37, 0x66, 0x22, 0x3a, 0x7b,
0x22, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65,
0x22, 0x3a, 0x22, 0x30, 0x78, 0x33, 0x33, 0x62,
0x32, 0x65, 0x33, 0x63, 0x39, 0x66, 0x64, 0x30,
0x38, 0x30, 0x34, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x22, 0x7d, 0x7d, 0x2c,
0x22, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22,
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x22,
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61,
0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22,
0x7d,
},
}
DenaliConfig = Config{
MintAddresses: []string{
"95YUFjhDG892VePMzpwKF9JzewGKvGRi3",
@ -393,6 +509,8 @@ var (
// GetConfig ...
func GetConfig(networkID uint32) *Config {
switch networkID {
case EverestID:
return &EverestConfig
case DenaliID:
return &DenaliConfig
case CascadeID:

View File

@ -9,12 +9,12 @@ import (
"time"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/codec"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/json"
"github.com/ava-labs/gecko/utils/units"
"github.com/ava-labs/gecko/utils/wrappers"
"github.com/ava-labs/gecko/vms/avm"
"github.com/ava-labs/gecko/vms/components/codec"
"github.com/ava-labs/gecko/vms/nftfx"
"github.com/ava-labs/gecko/vms/platformvm"
"github.com/ava-labs/gecko/vms/propertyfx"
@ -156,7 +156,7 @@ func FromConfig(networkID uint32, config *Config) ([]byte, error) {
// Specify the chains that exist upon this network's creation
platformvmArgs.Chains = []platformvm.APIChain{
platformvm.APIChain{
{
GenesisData: avmReply.Bytes,
SubnetID: platformvm.DefaultSubnetID,
VMID: avm.ID,
@ -167,25 +167,25 @@ func FromConfig(networkID uint32, config *Config) ([]byte, error) {
},
Name: "X-Chain",
},
platformvm.APIChain{
{
GenesisData: formatting.CB58{Bytes: config.EVMBytes},
SubnetID: platformvm.DefaultSubnetID,
VMID: EVMID,
Name: "C-Chain",
},
platformvm.APIChain{
{
GenesisData: spdagvmReply.Bytes,
SubnetID: platformvm.DefaultSubnetID,
VMID: spdagvm.ID,
Name: "Simple DAG Payments",
},
platformvm.APIChain{
{
GenesisData: spchainvmReply.Bytes,
SubnetID: platformvm.DefaultSubnetID,
VMID: spchainvm.ID,
Name: "Simple Chain Payments",
},
platformvm.APIChain{
{
GenesisData: formatting.CB58{Bytes: []byte{}}, // There is no genesis data
SubnetID: platformvm.DefaultSubnetID,
VMID: timestampvm.ID,

View File

@ -23,7 +23,10 @@ func TestNetworkName(t *testing.T) {
if name := NetworkName(DenaliID); name != DenaliName {
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, DenaliName)
}
if name := NetworkName(TestnetID); name != DenaliName {
if name := NetworkName(EverestID); name != EverestName {
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, EverestName)
}
if name := NetworkName(DenaliID); name != DenaliName {
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, DenaliName)
}
if name := NetworkName(4294967295); name != "network-4294967295" {

View File

@ -16,6 +16,7 @@ var (
MainnetID uint32 = 1
CascadeID uint32 = 2
DenaliID uint32 = 3
EverestID uint32 = 4
TestnetID uint32 = 3
LocalID uint32 = 12345
@ -23,6 +24,7 @@ var (
MainnetName = "mainnet"
CascadeName = "cascade"
DenaliName = "denali"
EverestName = "everest"
TestnetName = "testnet"
LocalName = "local"
@ -31,6 +33,7 @@ var (
MainnetID: MainnetName,
CascadeID: CascadeName,
DenaliID: DenaliName,
EverestID: EverestName,
LocalID: LocalName,
}
@ -38,6 +41,7 @@ var (
MainnetName: MainnetID,
CascadeName: CascadeID,
DenaliName: DenaliID,
EverestName: EverestID,
TestnetName: TestnetID,
LocalName: LocalID,

7
go.mod
View File

@ -6,10 +6,10 @@ require (
github.com/AppsFlyer/go-sundheit v0.2.0
github.com/allegro/bigcache v1.2.1 // indirect
github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f // indirect
github.com/ava-labs/coreth v0.2.4 // Added manually; don't delete
github.com/ava-labs/coreth v0.2.5 // indirect; Added manually; don't delete
github.com/ava-labs/go-ethereum v1.9.3 // indirect
github.com/deckarep/golang-set v1.7.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1 v1.0.3
github.com/decred/dcrd/dcrec/secp256k1 v1.0.3 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200526030155-0c6c7ca85d3b
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/elastic/gosigar v0.10.5 // indirect
@ -20,6 +20,7 @@ require (
github.com/gorilla/mux v1.7.4
github.com/gorilla/rpc v1.2.0
github.com/gorilla/websocket v1.4.2
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd
github.com/hashicorp/go-plugin v1.3.0
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/huin/goupnp v1.0.0
@ -28,7 +29,7 @@ require (
github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9 // indirect
github.com/mattn/go-colorable v0.1.6 // indirect
github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.1.3
github.com/mr-tron/base58 v1.2.0
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d
github.com/olekukonko/tablewriter v0.0.4 // indirect
github.com/pborman/uuid v1.2.0 // indirect

26
go.sum
View File

@ -2,13 +2,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
github.com/AppsFlyer/go-sundheit v0.2.0 h1:FArqX+HbqZ6U32RC3giEAWRUpkggqxHj91KIvxNgwjU=
github.com/AppsFlyer/go-sundheit v0.2.0/go.mod h1:rCRkVTMQo7/krF7xQ9X0XEF1an68viFR6/Gy02q+4ds=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
@ -19,6 +22,8 @@ github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f/go.mod h1:
github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc=
github.com/ava-labs/coreth v0.2.4 h1:MhnbuRyMcij7WU4+frayp40quc44AMPc4IrxXhmucWw=
github.com/ava-labs/coreth v0.2.4/go.mod h1:pGolKipwq5vGIY2IBBcBkMYrqniXMsS5SBn+BBi4+Js=
github.com/ava-labs/coreth v0.2.5 h1:2Al753rpPHvvZfcz7w96YbKhGFvrcZzsIZ/sIp0A0Ao=
github.com/ava-labs/coreth v0.2.5/go.mod h1:pGolKipwq5vGIY2IBBcBkMYrqniXMsS5SBn+BBi4+Js=
github.com/ava-labs/go-ethereum v1.9.3 h1:GmnMZ/dlvVAPFmWBzEpRJX49pUAymPfoASLNRJqR0AY=
github.com/ava-labs/go-ethereum v1.9.3/go.mod h1:a+agc6fXfZFsPZCylA3ry4Y8CLCqLKg3Rc23NXZ9aw8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -38,7 +43,9 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/decred/dcrd v1.3.0 h1:EEXm7BdiROfazDtuFsOu9mfotnyy00bgCuVwUqaszFo=
github.com/decred/dcrd/chaincfg/chainhash v1.0.2 h1:rt5Vlq/jM3ZawwiacWjPa+smINyLRN07EO0cNBV6DGU=
github.com/decred/dcrd/chaincfg/chainhash v1.0.2/go.mod h1:BpbrGgrPTr3YJYRN3Bm+D9NuaFd+zGyNeIKgrhCXK60=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec v1.0.0 h1:W+z6Es+Rai3MXYVoPAxYr5U1DGis0Co33scJ6uH2J6o=
github.com/decred/dcrd/dcrec/secp256k1 v1.0.3 h1:u4XpHqlscRolxPxt2YHrFBDVZYY1AK+KMV02H1r+HmU=
@ -61,8 +68,10 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays=
@ -76,6 +85,7 @@ github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -97,6 +107,7 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
@ -119,6 +130,7 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
@ -129,6 +141,7 @@ github.com/jackpal/gateway v1.0.6/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQ
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@ -142,8 +155,10 @@ github.com/klauspost/reedsolomon v1.9.3/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
@ -164,6 +179,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc=
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
@ -174,8 +191,10 @@ github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc=
github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw=
@ -217,6 +236,7 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 h1:Oo2KZNP70KE0+IUJSidPj/BFS/RXNHmKIJOdckzml2E=
@ -226,6 +246,7 @@ github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@ -310,6 +331,7 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20200221224223-e1da425f72fd/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -336,10 +358,13 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
@ -349,6 +374,7 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLv
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9 h1:ITeyKbRetrVzqR3U1eY+ywgp7IBspGd1U/bkwd1gWu4=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=

View File

@ -8,6 +8,10 @@ import (
"strings"
)
const (
minBagSize = 16
)
// Bag is a multiset of IDs.
//
// A bag has the ability to split and filter on it's bits for ease of use for
@ -25,7 +29,7 @@ type Bag struct {
func (b *Bag) init() {
if b.counts == nil {
b.counts = make(map[[32]byte]int)
b.counts = make(map[[32]byte]int, minBagSize)
}
}
@ -72,16 +76,21 @@ func (b *Bag) AddCount(id ID, count int) {
}
// Count returns the number of times the id has been added.
func (b *Bag) Count(id ID) int { return b.counts[*id.ID] }
func (b *Bag) Count(id ID) int {
b.init()
return b.counts[*id.ID]
}
// Len returns the number of times an id has been added.
func (b *Bag) Len() int { return b.size }
// List returns a list of all ids that have been added.
func (b *Bag) List() []ID {
idList := []ID(nil)
idList := make([]ID, len(b.counts), len(b.counts))
i := 0
for id := range b.counts {
idList = append(idList, NewID(id))
idList[i] = NewID(id)
i++
}
return idList
}

53
ids/bag_benchmark_test.go Normal file
View File

@ -0,0 +1,53 @@
package ids
import (
"crypto/rand"
"testing"
)
//
func BenchmarkBagListSmall(b *testing.B) {
smallLen := 5
bag := Bag{}
for i := 0; i < smallLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
bag.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
bag.List()
}
}
func BenchmarkBagListMedium(b *testing.B) {
mediumLen := 25
bag := Bag{}
for i := 0; i < mediumLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
bag.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
bag.List()
}
}
func BenchmarkBagListLarge(b *testing.B) {
largeLen := 100000
bag := Bag{}
for i := 0; i < largeLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
bag.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
bag.List()
}
}

View File

@ -18,8 +18,8 @@ func TestBagAdd(t *testing.T) {
} else if count := bag.Count(id1); count != 0 {
t.Fatalf("Bag.Count returned %d expected %d", count, 0)
} else if size := bag.Len(); size != 0 {
t.Fatalf("Bag.Len returned %d expected %d", count, 0)
} else if list := bag.List(); list != nil {
t.Fatalf("Bag.Len returned %d elements expected %d", count, 0)
} else if list := bag.List(); len(list) != 0 {
t.Fatalf("Bag.List returned %v expected %v", list, nil)
} else if mode, freq := bag.Mode(); !mode.IsZero() {
t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, ID{})

View File

@ -7,11 +7,19 @@ import (
"strings"
)
const (
// The minimum capacity of a set
minSetSize = 16
)
// Set is a set of IDs
type Set map[[32]byte]bool
func (ids *Set) init(size int) {
if *ids == nil {
if minSetSize > size {
size = minSetSize
}
*ids = make(map[[32]byte]bool, size)
}
}
@ -70,9 +78,32 @@ func (ids *Set) Clear() { *ids = nil }
// List converts this set into a list
func (ids Set) List() []ID {
idList := []ID(nil)
idList := make([]ID, ids.Len())
i := 0
for id := range ids {
idList = append(idList, NewID(id))
idList[i] = NewID(id)
i++
}
return idList
}
// CappedList returns a list of length at most [size].
// Size should be >= 0. If size < 0, returns nil.
func (ids Set) CappedList(size int) []ID {
if size < 0 {
return nil
}
if l := ids.Len(); l < size {
size = l
}
i := 0
idList := make([]ID, size)
for id := range ids {
if i >= size {
break
}
idList[i] = NewID(id)
i++
}
return idList
}

53
ids/set_benchmark_test.go Normal file
View File

@ -0,0 +1,53 @@
package ids
import (
"crypto/rand"
"testing"
)
//
func BenchmarkSetListSmall(b *testing.B) {
smallLen := 5
set := Set{}
for i := 0; i < smallLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
set.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
set.List()
}
}
func BenchmarkSetListMedium(b *testing.B) {
mediumLen := 25
set := Set{}
for i := 0; i < mediumLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
set.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
set.List()
}
}
func BenchmarkSetListLarge(b *testing.B) {
largeLen := 100000
set := Set{}
for i := 0; i < largeLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
set.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
set.List()
}
}

View File

@ -55,3 +55,46 @@ func TestSet(t *testing.T) {
t.Fatalf("Sets overlap")
}
}
func TestSetCappedList(t *testing.T) {
set := Set{}
id := Empty
if list := set.CappedList(0); len(list) != 0 {
t.Fatalf("List should have been empty but was %v", list)
}
set.Add(id)
if list := set.CappedList(0); len(list) != 0 {
t.Fatalf("List should have been empty but was %v", list)
} else if list := set.CappedList(1); len(list) != 1 {
t.Fatalf("List should have had length %d but had %d", 1, len(list))
} else if returnedID := list[0]; !id.Equals(returnedID) {
t.Fatalf("List should have been %s but was %s", id, returnedID)
} else if list := set.CappedList(2); len(list) != 1 {
t.Fatalf("List should have had length %d but had %d", 1, len(list))
} else if returnedID := list[0]; !id.Equals(returnedID) {
t.Fatalf("List should have been %s but was %s", id, returnedID)
}
id2 := NewID([32]byte{1})
set.Add(id2)
if list := set.CappedList(0); len(list) != 0 {
t.Fatalf("List should have been empty but was %v", list)
} else if list := set.CappedList(1); len(list) != 1 {
t.Fatalf("List should have had length %d but had %d", 1, len(list))
} else if returnedID := list[0]; !id.Equals(returnedID) && !id2.Equals(returnedID) {
t.Fatalf("List should have been %s but was %s", id, returnedID)
} else if list := set.CappedList(2); len(list) != 2 {
t.Fatalf("List should have had length %d but had %d", 2, len(list))
} else if list := set.CappedList(3); len(list) != 2 {
t.Fatalf("List should have had length %d but had %d", 2, len(list))
} else if returnedID := list[0]; !id.Equals(returnedID) && !id2.Equals(returnedID) {
t.Fatalf("list contains unexpected element %s", returnedID)
} else if returnedID := list[1]; !id.Equals(returnedID) && !id2.Equals(returnedID) {
t.Fatalf("list contains unexpected element %s", returnedID)
}
}

View File

@ -5,11 +5,18 @@ package ids
import "strings"
const (
minShortSetSize = 16
)
// ShortSet is a set of ShortIDs
type ShortSet map[[20]byte]bool
func (ids *ShortSet) init(size int) {
if *ids == nil {
if minShortSetSize > size {
size = minShortSetSize
}
*ids = make(map[[20]byte]bool, size)
}
}
@ -50,24 +57,34 @@ func (ids *ShortSet) Remove(idList ...ShortID) {
// Clear empties this set
func (ids *ShortSet) Clear() { *ids = nil }
// CappedList returns a list of length at most [size]. Size should be >= 0
// CappedList returns a list of length at most [size].
// Size should be >= 0. If size < 0, returns nil.
func (ids ShortSet) CappedList(size int) []ShortID {
idList := make([]ShortID, size)[:0]
if size < 0 {
return nil
}
if l := ids.Len(); l < size {
size = l
}
i := 0
idList := make([]ShortID, size)
for id := range ids {
if size <= 0 {
if i >= size {
break
}
size--
idList = append(idList, NewShortID(id))
idList[i] = NewShortID(id)
i++
}
return idList
}
// List converts this set into a list
func (ids ShortSet) List() []ShortID {
idList := make([]ShortID, len(ids))[:0]
idList := make([]ShortID, len(ids), len(ids))
i := 0
for id := range ids {
idList = append(idList, NewShortID(id))
idList[i] = NewShortID(id)
i++
}
return idList
}

View File

@ -8,12 +8,16 @@ import (
"strings"
)
const (
minUniqueBagSize = 16
)
// UniqueBag ...
type UniqueBag map[[32]byte]BitSet
func (b *UniqueBag) init() {
if *b == nil {
*b = make(map[[32]byte]BitSet)
*b = make(map[[32]byte]BitSet, minUniqueBagSize)
}
}

View File

@ -40,12 +40,11 @@ func main() {
defer log.StopOnPanic()
defer Config.DB.Close()
if Config.StakingIP.IsZero() {
log.Warn("NAT traversal has failed. It will be able to connect to less nodes.")
}
// Track if sybil control is enforced
if !Config.EnableStaking {
if !Config.EnableStaking && Config.EnableP2PTLS {
log.Warn("Staking is disabled. Sybil control is not enforced.")
}
if !Config.EnableStaking && !Config.EnableP2PTLS {
log.Warn("Staking and p2p encryption are disabled. Packet spoofing is possible.")
}
@ -65,11 +64,19 @@ func main() {
log.Debug("assertions are enabled. This may slow down execution")
}
mapper := nat.NewDefaultMapper(log, Config.Nat, nat.TCP, "gecko")
mapper := nat.NewPortMapper(log, Config.Nat)
defer mapper.UnmapAllPorts()
mapper.MapPort(Config.StakingIP.Port, Config.StakingIP.Port)
mapper.MapPort(Config.HTTPPort, Config.HTTPPort)
port, err := mapper.Map("TCP", Config.StakingLocalPort, "gecko-staking") // Open staking port
if err == nil {
Config.StakingIP.Port = port
} else {
log.Warn("NAT traversal has failed. The node will be able to connect to less nodes.")
}
if Config.HTTPHost != "127.0.0.1" && Config.HTTPHost != "localhost" { // Open HTTP port iff HTTP server not listening on localhost
_, _ = mapper.Map("TCP", Config.HTTPPort, "gecko-http")
}
node := node.Node{}

View File

@ -35,21 +35,25 @@ const (
// Results of parsing the CLI
var (
Config = node.Config{}
Err error
defaultDbDir = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "db"))
defaultStakingKeyPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.key"))
defaultStakingCertPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.crt"))
Config = node.Config{}
Err error
defaultNetworkName = genesis.TestnetName
defaultPluginDirs = []string{
"./build/plugins",
"./plugins",
os.ExpandEnv(filepath.Join("$HOME", ".gecko", "plugins")),
homeDir = os.ExpandEnv("$HOME")
defaultDbDir = filepath.Join(homeDir, ".gecko", "db")
defaultStakingKeyPath = filepath.Join(homeDir, ".gecko", "staking", "staker.key")
defaultStakingCertPath = filepath.Join(homeDir, ".gecko", "staking", "staker.crt")
defaultPluginDirs = []string{
filepath.Join(".", "build", "plugins"),
filepath.Join(".", "plugins"),
filepath.Join("/", "usr", "local", "lib", "gecko"),
filepath.Join(homeDir, ".gecko", "plugins"),
}
)
var (
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
errStakingRequiresTLS = errors.New("if staking is enabled, network TLS must also be enabled")
)
// GetIPs returns the default IPs for each network
@ -169,7 +173,7 @@ func init() {
version := fs.Bool("version", false, "If true, print version and quit")
// NetworkID:
networkName := fs.String("network-id", genesis.TestnetName, "Network ID this node will connect to")
networkName := fs.String("network-id", defaultNetworkName, "Network ID this node will connect to")
// Ava fees:
fs.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva")
@ -188,7 +192,7 @@ func init() {
consensusIP := fs.String("public-ip", "", "Public IP of this node")
// HTTP Server:
httpHost := fs.String("http-host", "", "Address of the HTTP server")
httpHost := fs.String("http-host", "127.0.0.1", "Address of the HTTP server")
httpPort := fs.Uint("http-port", 9650, "Port of the HTTP server")
fs.BoolVar(&Config.EnableHTTPS, "http-tls-enabled", false, "Upgrade the HTTP server to HTTPs")
fs.StringVar(&Config.HTTPSKeyFile, "http-tls-key-file", "", "TLS private key file for the HTTPs server")
@ -200,7 +204,9 @@ func init() {
// Staking:
consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server")
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections")
// TODO - keeping same flag for backwards compatibility, should be changed to "staking-enabled"
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Enable staking. If enabled, Network TLS is required.")
fs.BoolVar(&Config.EnableP2PTLS, "p2p-tls-enabled", true, "Require TLS to authenticate network communication")
fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", defaultStakingKeyPath, "TLS private key for staking")
fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", defaultStakingCertPath, "TLS certificate for staking")
@ -221,7 +227,8 @@ func init() {
fs.IntVar(&Config.ConsensusParams.ConcurrentRepolls, "snow-concurrent-repolls", 1, "Minimum number of concurrent polls for finalizing consensus")
// Enable/Disable APIs:
fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", true, "If true, this node exposes the Admin API")
fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", false, "If true, this node exposes the Admin API")
fs.BoolVar(&Config.InfoAPIEnabled, "api-info-enabled", true, "If true, this node exposes the Info API")
fs.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API")
fs.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API")
fs.BoolVar(&Config.HealthAPIEnabled, "api-health-enabled", true, "If true, this node exposes the Health API")
@ -234,7 +241,15 @@ func init() {
ferr := fs.Parse(os.Args[1:])
if *version { // If --version used, print version and exit
fmt.Println(node.Version.String())
networkID, err := genesis.NetworkID(defaultNetworkName)
if errs.Add(err); err != nil {
return
}
networkGeneration := genesis.NetworkName(networkID)
fmt.Printf(
"%s [database=%s, network=%s/%s]\n",
node.Version, dbVersion, defaultNetworkName, networkGeneration,
)
os.Exit(0)
}
@ -269,16 +284,16 @@ func init() {
Config.DB = memdb.New()
}
Config.Nat = nat.NewRouter()
var ip net.IP
// If public IP is not specified, get it using shell command dig
if *consensusIP == "" {
ip, err = Config.Nat.IP()
Config.Nat = nat.GetRouter()
ip, err = Config.Nat.ExternalIP()
if err != nil {
ip = net.IPv4zero // Couldn't get my IP...set to 0.0.0.0
}
} else {
Config.Nat = nat.NewNoRouter()
ip = net.ParseIP(*consensusIP)
}
@ -291,6 +306,7 @@ func init() {
IP: ip,
Port: uint16(*consensusPort),
}
Config.StakingLocalPort = uint16(*consensusPort)
defaultBootstrapIPs, defaultBootstrapIDs := GetDefaultBootstraps(networkID, 5)
@ -318,7 +334,13 @@ func init() {
*bootstrapIDs = strings.Join(defaultBootstrapIDs, ",")
}
}
if Config.EnableStaking {
if Config.EnableStaking && !Config.EnableP2PTLS {
errs.Add(errStakingRequiresTLS)
return
}
if Config.EnableP2PTLS {
i := 0
cb58 := formatting.CB58{}
for _, id := range strings.Split(*bootstrapIDs, ",") {

View File

@ -1,143 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package nat
import (
"sync"
"time"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/wrappers"
)
const (
defaultMappingTimeout = 30 * time.Minute
defaultMappingUpdateInterval = 3 * defaultMappingTimeout / 4
)
// Mapper maps port
type Mapper interface {
MapPort(newInternalPort, newExternalPort uint16) error
UnmapAllPorts() error
}
type mapper struct {
log logging.Logger
router Router
networkProtocol NetworkProtocol
mappingNames string
mappingTimeout time.Duration
mappingUpdateInterval time.Duration
closer chan struct{}
wg sync.WaitGroup
errLock sync.Mutex
errs wrappers.Errs
}
// NewMapper returns a new mapper that can map ports on a router
func NewMapper(
log logging.Logger,
router Router,
networkProtocol NetworkProtocol,
mappingNames string,
mappingTimeout time.Duration,
mappingUpdateInterval time.Duration,
) Mapper {
return &mapper{
log: log,
router: router,
networkProtocol: networkProtocol,
mappingNames: mappingNames,
mappingTimeout: mappingTimeout,
mappingUpdateInterval: mappingUpdateInterval,
closer: make(chan struct{}),
}
}
// NewDefaultMapper returns a new mapper that can map ports on a router with
// default settings
func NewDefaultMapper(
log logging.Logger,
router Router,
networkProtocol NetworkProtocol,
mappingNames string,
) Mapper {
return NewMapper(
log,
router,
networkProtocol,
mappingNames,
defaultMappingTimeout, // uses the default value
defaultMappingUpdateInterval, // uses the default value
)
}
// MapPort maps a local port to a port on the router until UnmapAllPorts is
// called.
func (m *mapper) MapPort(newInternalPort, newExternalPort uint16) error {
m.wg.Add(1)
go m.mapPort(newInternalPort, newExternalPort)
return nil
}
func (m *mapper) mapPort(newInternalPort, newExternalPort uint16) {
// duration is set to 0 here so that the select case will execute
// immediately
updateTimer := time.NewTimer(0)
defer func() {
updateTimer.Stop()
m.errLock.Lock()
m.errs.Add(m.router.UnmapPort(
m.networkProtocol,
newInternalPort,
newExternalPort))
m.errLock.Unlock()
m.log.Debug("Unmapped external port %d to internal port %d",
newExternalPort,
newInternalPort)
m.wg.Done()
}()
for {
select {
case <-updateTimer.C:
err := m.router.MapPort(
m.networkProtocol,
newInternalPort,
newExternalPort,
m.mappingNames,
m.mappingTimeout)
if err != nil {
m.errLock.Lock()
m.errs.Add(err)
m.errLock.Unlock()
m.log.Debug("Failed to add mapping from external port %d to internal port %d due to %s",
newExternalPort,
newInternalPort,
err)
} else {
m.log.Debug("Mapped external port %d to internal port %d",
newExternalPort,
newInternalPort)
}
// remap the port in m.mappingUpdateInterval
updateTimer.Reset(m.mappingUpdateInterval)
case _, _ = <-m.closer:
return // only return when all ports are unmapped
}
}
}
func (m *mapper) UnmapAllPorts() error {
close(m.closer)
m.wg.Wait()
return m.errs.Err
}

139
nat/nat.go Normal file
View File

@ -0,0 +1,139 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package nat
import (
"errors"
"net"
"sync"
"time"
"github.com/ava-labs/gecko/utils/logging"
)
const (
mapTimeout = 30 * time.Second
mapUpdateTimeout = mapTimeout / 2
maxRetries = 20
)
// Router describes the functionality that a network device must support to be
// able to open ports to an external IP.
type Router interface {
MapPort(protocol string, intPort, extPort uint16, desc string, duration time.Duration) error
UnmapPort(protocol string, intPort, extPort uint16) error
ExternalIP() (net.IP, error)
GetPortMappingEntry(extPort uint16, protocol string) (
InternalIP string,
InternalPort uint16,
Description string,
err error,
)
}
// GetRouter returns a router on the current network.
func GetRouter() Router {
if r := getUPnPRouter(); r != nil {
return r
}
if r := getPMPRouter(); r != nil {
return r
}
return NewNoRouter()
}
// Mapper attempts to open a set of ports on a router
type Mapper struct {
log logging.Logger
r Router
closer chan struct{}
wg sync.WaitGroup
}
// NewPortMapper returns an initialized mapper
func NewPortMapper(log logging.Logger, r Router) Mapper {
return Mapper{
log: log,
r: r,
closer: make(chan struct{}),
}
}
// Map sets up port mapping using given protocol, internal and external ports
// and returns the final port mapped. It returns 0 if mapping failed after the
// maximun number of retries
func (dev *Mapper) Map(protocol string, intPort uint16, desc string) (uint16, error) {
mappedPort := make(chan uint16)
go dev.keepPortMapping(mappedPort, protocol, intPort, desc)
port := <-mappedPort
if port == 0 {
return 0, errors.New("failed to map port")
}
return port, nil
}
// keepPortMapping runs in the background to keep a port mapped. It renews the
// the port mapping in mapUpdateTimeout.
func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string,
intPort uint16, desc string) {
updateTimer := time.NewTimer(mapUpdateTimeout)
for i := 0; i <= maxRetries; i++ {
extPort := intPort + uint16(i)
if intaddr, intPort, desc, err := dev.r.GetPortMappingEntry(extPort, protocol); err == nil {
dev.log.Debug("Port %d is taken by %s:%d: %s, retry with the next port",
extPort, intaddr, intPort, desc)
} else if err := dev.r.MapPort(protocol, intPort, extPort, desc, mapTimeout); err != nil {
dev.log.Debug("Map port failed. Protocol %s Internal %d External %d. %s",
protocol, intPort, extPort, err)
} else {
dev.log.Info("Mapped Protocol %s Internal %d External %d.", protocol,
intPort, extPort)
dev.wg.Add(1)
mappedPort <- extPort
defer func(extPort uint16) {
updateTimer.Stop()
dev.log.Debug("Unmap protocol %s external port %d", protocol, extPort)
dev.r.UnmapPort(protocol, intPort, extPort)
dev.wg.Done()
}(extPort)
for {
select {
case <-updateTimer.C:
if err := dev.r.MapPort(protocol, intPort, extPort, desc, mapTimeout); err != nil {
dev.log.Error("Renewing port mapping from external port %d to internal port %d failed with %s",
intPort, extPort, err)
} else {
dev.log.Debug("Renewed port mapping from external port %d to internal port %d.",
intPort, extPort)
}
updateTimer.Reset(mapUpdateTimeout)
case _, _ = <-dev.closer:
return
}
}
}
}
dev.log.Debug("Unable to map port %d~%d", intPort, intPort+maxRetries)
mappedPort <- 0
}
// UnmapAllPorts stops mapping all ports from this mapper and attempts to unmap
// them.
func (dev *Mapper) UnmapAllPorts() {
close(dev.closer)
dev.wg.Wait()
dev.log.Info("Unmapped all ports")
}

View File

@ -4,25 +4,57 @@
package nat
import (
"errors"
"fmt"
"net"
"time"
)
var (
errNoRouter = errors.New("no nat enabled router was discovered")
)
const googleDNSServer = "8.8.8.8:80"
type noRouter struct{}
func (noRouter) MapPort(_ NetworkProtocol, _, _ uint16, _ string, _ time.Duration) error {
return errNoRouter
type noRouter struct {
ip net.IP
}
func (noRouter) UnmapPort(_ NetworkProtocol, _, _ uint16) error {
return errNoRouter
func (noRouter) MapPort(_ string, intPort, extPort uint16, _ string, _ time.Duration) error {
if intPort != extPort {
return fmt.Errorf("cannot map port %d to %d", intPort, extPort)
}
return nil
}
func (noRouter) IP() (net.IP, error) {
return nil, errNoRouter
func (noRouter) UnmapPort(string, uint16, uint16) error {
return nil
}
func (r noRouter) ExternalIP() (net.IP, error) {
return r.ip, nil
}
func (noRouter) GetPortMappingEntry(uint16, string) (string, uint16, string, error) {
return "", 0, "", nil
}
func getOutboundIP() (net.IP, error) {
conn, err := net.Dial("udp", googleDNSServer)
if err != nil {
return nil, err
}
if udpAddr, ok := conn.LocalAddr().(*net.UDPAddr); ok {
return udpAddr.IP, conn.Close()
}
conn.Close()
return nil, fmt.Errorf("getting outbound IP failed")
}
// NewNoRouter returns a router that assumes the network is public
func NewNoRouter() Router {
ip, err := getOutboundIP()
if err != nil {
return nil
}
return &noRouter{
ip: ip,
}
}

View File

@ -4,11 +4,13 @@
package nat
import (
"fmt"
"net"
"time"
"github.com/jackpal/gateway"
"github.com/jackpal/go-nat-pmp"
natpmp "github.com/jackpal/go-nat-pmp"
)
var (
@ -17,12 +19,12 @@ var (
// natPMPClient adapts the NAT-PMP protocol implementation so it conforms to
// the common interface.
type pmpClient struct {
type pmpRouter struct {
client *natpmp.Client
}
func (pmp *pmpClient) MapPort(
networkProtocol NetworkProtocol,
func (pmp *pmpRouter) MapPort(
networkProtocol string,
newInternalPort uint16,
newExternalPort uint16,
mappingName string,
@ -37,8 +39,8 @@ func (pmp *pmpClient) MapPort(
return err
}
func (pmp *pmpClient) UnmapPort(
networkProtocol NetworkProtocol,
func (pmp *pmpRouter) UnmapPort(
networkProtocol string,
internalPort uint16,
_ uint16) error {
protocol := string(networkProtocol)
@ -48,7 +50,7 @@ func (pmp *pmpClient) UnmapPort(
return err
}
func (pmp *pmpClient) IP() (net.IP, error) {
func (pmp *pmpRouter) ExternalIP() (net.IP, error) {
response, err := pmp.client.GetExternalAddress()
if err != nil {
return nil, err
@ -56,14 +58,20 @@ func (pmp *pmpClient) IP() (net.IP, error) {
return response.ExternalIPAddress[:], nil
}
func getPMPRouter() Router {
// go-nat-pmp does not support port mapping entry query
func (pmp *pmpRouter) GetPortMappingEntry(externalPort uint16, protocol string) (
string, uint16, string, error) {
return "", 0, "", fmt.Errorf("port mapping entry not found")
}
func getPMPRouter() *pmpRouter {
gatewayIP, err := gateway.DiscoverGateway()
if err != nil {
return nil
}
pmp := &pmpClient{natpmp.NewClientWithTimeout(gatewayIP, pmpClientTimeout)}
if _, err := pmp.IP(); err != nil {
pmp := &pmpRouter{natpmp.NewClientWithTimeout(gatewayIP, pmpClientTimeout)}
if _, err := pmp.ExternalIP(); err != nil {
return nil
}

View File

@ -1,65 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
// Package nat performs network address translation and provides helpers for
// routing ports.
package nat
import (
"net"
"time"
)
// NetworkProtocol is a protocol that will be used through a port
type NetworkProtocol string
// Available protocol
const (
TCP NetworkProtocol = "TCP"
UDP NetworkProtocol = "UDP"
)
// Router provides a standard NAT router functions. Specifically, allowing the
// fetching of public IPs and port forwarding to this computer.
type Router interface {
// mapPort creates a mapping between a port on the local computer to an
// external port on the router.
//
// The mappingName is something displayed on the router, so it is included
// for completeness.
MapPort(
networkProtocol NetworkProtocol,
newInternalPort uint16,
newExternalPort uint16,
mappingName string,
mappingDuration time.Duration) error
// UnmapPort clears a mapping that was previous made by a call to MapPort
UnmapPort(
networkProtocol NetworkProtocol,
internalPort uint16,
externalPort uint16) error
// Returns the routers IP address on the network the router considers
// external
IP() (net.IP, error)
}
// NewRouter returns a new router discovered on the local network
func NewRouter() Router {
routers := make(chan Router)
// Because getting a router can take a noticeable amount of time to error,
// we run these requests in parallel
go func() {
routers <- getUPnPRouter()
}()
go func() {
routers <- getPMPRouter()
}()
for i := 0; i < 2; i++ {
if router := <-routers; router != nil {
return router
}
}
return noRouter{}
}

View File

@ -4,7 +4,6 @@
package nat
import (
"errors"
"fmt"
"net"
"time"
@ -15,11 +14,7 @@ import (
)
const (
soapTimeout = time.Second
)
var (
errNoGateway = errors.New("Failed to connect to any avaliable gateways")
soapRequestTimeout = 3 * time.Second
)
// upnpClient is the interface used by goupnp for their client implementations
@ -47,69 +42,30 @@ type upnpClient interface {
// returns if there is rsip available, nat enabled, or an unexpected error.
GetNATRSIPStatus() (newRSIPAvailable bool, natEnabled bool, err error)
}
type upnpRouter struct {
root *goupnp.RootDevice
client upnpClient
}
func (n *upnpRouter) MapPort(
networkProtocol NetworkProtocol,
newInternalPort uint16,
newExternalPort uint16,
mappingName string,
mappingDuration time.Duration,
) error {
ip, err := n.localAddress()
if err != nil {
return err
}
protocol := string(networkProtocol)
// goupnp uses seconds to denote their lifetime
lifetime := uint32(mappingDuration / time.Second)
// UnmapPort's error is intentionally dropped, because the mapping may not
// exist.
n.UnmapPort(networkProtocol, newInternalPort, newExternalPort)
return n.client.AddPortMapping(
"", // newRemoteHost isn't used to limit the mapping to a host
newExternalPort,
protocol,
newInternalPort,
ip.String(), // newInternalClient is the client traffic should be sent to
true, // newEnabled enables port mappings
mappingName,
lifetime,
// attempts to get port mapping information give a external port and protocol
GetSpecificPortMappingEntry(
NewRemoteHost string,
NewExternalPort uint16,
NewProtocol string,
) (
NewInternalPort uint16,
NewInternalClient string,
NewEnabled bool,
NewPortMappingDescription string,
NewLeaseDuration uint32,
err error,
)
}
func (n *upnpRouter) UnmapPort(networkProtocol NetworkProtocol, _, externalPort uint16) error {
protocol := string(networkProtocol)
return n.client.DeletePortMapping(
"", // newRemoteHost isn't used to limit the mapping to a host
externalPort,
protocol)
type upnpRouter struct {
dev *goupnp.RootDevice
client upnpClient
}
func (n *upnpRouter) IP() (net.IP, error) {
ipStr, err := n.client.GetExternalIPAddress()
if err != nil {
return nil, err
}
ip := net.ParseIP(ipStr)
if ip == nil {
return nil, fmt.Errorf("invalid IP %s", ipStr)
}
return ip, nil
}
func (n *upnpRouter) localAddress() (net.IP, error) {
func (r *upnpRouter) localIP() (net.IP, error) {
// attempt to get an address on the router
deviceAddr, err := net.ResolveUDPAddr("udp4", n.root.URLBase.Host)
deviceAddr, err := net.ResolveUDPAddr("udp", r.dev.URLBase.Host)
if err != nil {
return nil, err
}
@ -120,7 +76,7 @@ func (n *upnpRouter) localAddress() (net.IP, error) {
return nil, err
}
// attempt to find one of my ips that the router would know about
// attempt to find one of my IPs that matches router's record
for _, netInterface := range netInterfaces {
addrs, err := netInterface.Addrs()
if err != nil {
@ -128,9 +84,6 @@ func (n *upnpRouter) localAddress() (net.IP, error) {
}
for _, addr := range addrs {
// this is pretty janky, but it seems to be the best way to get the
// ip mask and properly check if the ip references the device we are
// connected to
ipNet, ok := addr.(*net.IPNet)
if !ok {
continue
@ -144,110 +97,119 @@ func (n *upnpRouter) localAddress() (net.IP, error) {
return nil, fmt.Errorf("couldn't find the local address in the same network as %s", deviceIP)
}
// getUPnPRouter searches for all Gateway Devices that have avaliable
// connections in the goupnp library and returns the first connection it can
// find.
func getUPnPRouter() Router {
routers := make(chan *upnpRouter)
// Because DiscoverDevices takes a noticeable amount of time to error, we
// run these requests in parallel
go func() {
routers <- connectToGateway(internetgateway1.URN_WANConnectionDevice_1, gateway1)
}()
go func() {
routers <- connectToGateway(internetgateway2.URN_WANConnectionDevice_2, gateway2)
}()
for i := 0; i < 2; i++ {
if router := <-routers; router != nil {
return router
}
func (r *upnpRouter) ExternalIP() (net.IP, error) {
str, err := r.client.GetExternalIPAddress()
if err != nil {
return nil, err
}
return nil
ip := net.ParseIP(str)
if ip == nil {
return nil, fmt.Errorf("invalid IP %s", str)
}
return ip, nil
}
func gateway1(client goupnp.ServiceClient) upnpClient {
func (r *upnpRouter) MapPort(protocol string, intPort, extPort uint16,
desc string, duration time.Duration) error {
ip, err := r.localIP()
if err != nil {
return nil
}
lifetime := uint32(duration / time.Second)
return r.client.AddPortMapping("", extPort, protocol, intPort,
ip.String(), true, desc, lifetime)
}
func (r *upnpRouter) UnmapPort(protocol string, _, extPort uint16) error {
return r.client.DeletePortMapping("", extPort, protocol)
}
func (r *upnpRouter) GetPortMappingEntry(extPort uint16, protocol string) (string, uint16, string, error) {
intPort, intAddr, _, desc, _, err := r.client.GetSpecificPortMappingEntry("", extPort, protocol)
return intAddr, intPort, desc, err
}
// create UPnP SOAP service client with URN
func getUPnPClient(client goupnp.ServiceClient) upnpClient {
switch client.Service.ServiceType {
case internetgateway1.URN_WANIPConnection_1:
return &internetgateway1.WANIPConnection1{ServiceClient: client}
case internetgateway1.URN_WANPPPConnection_1:
return &internetgateway1.WANPPPConnection1{ServiceClient: client}
default:
return nil
}
}
func gateway2(client goupnp.ServiceClient) upnpClient {
switch client.Service.ServiceType {
case internetgateway2.URN_WANIPConnection_1:
return &internetgateway2.WANIPConnection1{ServiceClient: client}
case internetgateway2.URN_WANIPConnection_2:
return &internetgateway2.WANIPConnection2{ServiceClient: client}
case internetgateway2.URN_WANPPPConnection_1:
return &internetgateway2.WANPPPConnection1{ServiceClient: client}
default:
return nil
}
}
func connectToGateway(deviceType string, toClient func(goupnp.ServiceClient) upnpClient) *upnpRouter {
devs, err := goupnp.DiscoverDevices(deviceType)
// discover() tries to find gateway device
func discover(target string) *upnpRouter {
devs, err := goupnp.DiscoverDevices(target)
if err != nil {
return nil
}
// we are iterating over all the network devices, acting a possible roots
for i := range devs {
dev := &devs[i]
if dev.Root == nil {
router := make(chan *upnpRouter)
for i := 0; i < len(devs); i++ {
if devs[i].Root == nil {
continue
}
go func(dev *goupnp.MaybeRootDevice) {
var r *upnpRouter = nil
dev.Root.Device.VisitServices(func(service *goupnp.Service) {
c := goupnp.ServiceClient{
SOAPClient: service.NewSOAPClient(),
RootDevice: dev.Root,
Location: dev.Location,
Service: service,
}
c.SOAPClient.HTTPClient.Timeout = soapRequestTimeout
client := getUPnPClient(c)
if client == nil {
return
}
if _, nat, err := client.GetNATRSIPStatus(); err != nil || !nat {
return
}
r = &upnpRouter{dev.Root, client}
})
router <- r
}(&devs[i])
}
// the root device may be a router, so attempt to connect to that
rootDevice := &dev.Root.Device
if upnp := getRouter(dev, rootDevice, toClient); upnp != nil {
return upnp
}
// attempt to connect to any sub devices
devices := rootDevice.Devices
for i := range devices {
if upnp := getRouter(dev, &devices[i], toClient); upnp != nil {
return upnp
}
for i := 0; i < len(devs); i++ {
if r := <-router; r != nil {
return r
}
}
return nil
}
func getRouter(rootDevice *goupnp.MaybeRootDevice, device *goupnp.Device, toClient func(goupnp.ServiceClient) upnpClient) *upnpRouter {
for i := range device.Services {
service := &device.Services[i]
// getUPnPRouter searches for internet gateway using both Device Control Protocol
// and returns the first one it can find. It returns nil if no UPnP gateway is found
func getUPnPRouter() *upnpRouter {
targets := []string{
internetgateway1.URN_WANConnectionDevice_1,
internetgateway2.URN_WANConnectionDevice_2,
}
soapClient := service.NewSOAPClient()
// make sure the client times out if needed
soapClient.HTTPClient.Timeout = soapTimeout
routers := make(chan *upnpRouter)
// attempt to create a client connection
serviceClient := goupnp.ServiceClient{
SOAPClient: soapClient,
RootDevice: rootDevice.Root,
Location: rootDevice.Location,
Service: service,
}
client := toClient(serviceClient)
if client == nil {
continue
}
for _, urn := range targets {
go func(urn string) {
routers <- discover(urn)
}(urn)
}
// check whether port mapping is enabled
if _, nat, err := client.GetNATRSIPStatus(); err != nil || !nat {
continue
}
// we found a router!
return &upnpRouter{
root: rootDevice.Root,
client: client,
for i := 0; i < len(targets); i++ {
if r := <-routers; r != nil {
return r
}
}
return nil
}

View File

@ -33,6 +33,12 @@ func (m Builder) PeerList(ipDescs []utils.IPDesc) (Msg, error) {
return m.Pack(PeerList, map[Field]interface{}{Peers: ipDescs})
}
// Ping message
func (m Builder) Ping() (Msg, error) { return m.Pack(Ping, nil) }
// Pong message
func (m Builder) Pong() (Msg, error) { return m.Pack(Pong, nil) }
// GetAcceptedFrontier message
func (m Builder) GetAcceptedFrontier(chainID ids.ID, requestID uint32) (Msg, error) {
return m.Pack(GetAcceptedFrontier, map[Field]interface{}{

View File

@ -132,6 +132,10 @@ func (op Op) String() string {
return "get_peerlist"
case PeerList:
return "peerlist"
case Ping:
return "ping"
case Pong:
return "pong"
case GetAcceptedFrontier:
return "get_accepted_frontier"
case AcceptedFrontier:
@ -177,11 +181,12 @@ const (
PushQuery
PullQuery
Chits
// Bootstrapping:
// TODO: Move GetAncestors and MultiPut with the rest of the bootstrapping
// commands when we do non-backwards compatible upgrade
// TODO: Reorder these messages when we transition to everest
GetAncestors
MultiPut
Ping
Pong
)
// Defines the messages that can be sent/received with this network
@ -192,6 +197,8 @@ var (
Version: {NetworkID, NodeID, MyTime, IP, VersionStr},
GetPeerList: {},
PeerList: {Peers},
Ping: {},
Pong: {},
// Bootstrapping:
GetAcceptedFrontier: {ChainID, RequestID},
AcceptedFrontier: {ChainID, RequestID, ContainerIDs},

View File

@ -54,6 +54,7 @@ type metrics struct {
getVersion, version,
getPeerlist, peerlist,
ping, pong,
getAcceptedFrontier, acceptedFrontier,
getAccepted, accepted,
get, getAncestors, put, multiPut,
@ -78,6 +79,8 @@ func (m *metrics) initialize(registerer prometheus.Registerer) error {
errs.Add(m.version.initialize(Version, registerer))
errs.Add(m.getPeerlist.initialize(GetPeerList, registerer))
errs.Add(m.peerlist.initialize(PeerList, registerer))
errs.Add(m.ping.initialize(Ping, registerer))
errs.Add(m.pong.initialize(Pong, registerer))
errs.Add(m.getAcceptedFrontier.initialize(GetAcceptedFrontier, registerer))
errs.Add(m.acceptedFrontier.initialize(AcceptedFrontier, registerer))
errs.Add(m.getAccepted.initialize(GetAccepted, registerer))
@ -103,6 +106,10 @@ func (m *metrics) message(msgType Op) *messageMetrics {
return &m.getPeerlist
case PeerList:
return &m.peerlist
case Ping:
return &m.ping
case Pong:
return &m.pong
case GetAcceptedFrontier:
return &m.getAcceptedFrontier
case AcceptedFrontier:

View File

@ -21,6 +21,7 @@ import (
"github.com/ava-labs/gecko/snow/triggers"
"github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/random"
"github.com/ava-labs/gecko/utils/timer"
@ -42,6 +43,12 @@ const (
defaultGetVersionTimeout = 2 * time.Second
defaultAllowPrivateIPs = true
defaultGossipSize = 50
defaultPingPongTimeout = time.Minute
defaultPingFrequency = 3 * defaultPingPongTimeout / 4
// Request ID used when sending a Put message to gossip an accepted container
// (ie not sent in response to a Get)
GossipMsgRequestID = math.MaxUint32
)
// Network defines the functionality of the networking library.
@ -98,6 +105,7 @@ type network struct {
serverUpgrader Upgrader
clientUpgrader Upgrader
vdrs validators.Set // set of current validators in the AVAnet
beacons validators.Set // set of beacons in the AVAnet
router router.Router // router must be thread safe
nodeID uint32
@ -118,6 +126,8 @@ type network struct {
getVersionTimeout time.Duration
allowPrivateIPs bool
gossipSize int
pingPongTimeout time.Duration
pingFrequency time.Duration
executor timer.Executor
@ -150,6 +160,7 @@ func NewDefaultNetwork(
serverUpgrader,
clientUpgrader Upgrader,
vdrs validators.Set,
beacons validators.Set,
router router.Router,
) Network {
return NewNetwork(
@ -165,6 +176,7 @@ func NewDefaultNetwork(
serverUpgrader,
clientUpgrader,
vdrs,
beacons,
router,
defaultInitialReconnectDelay,
defaultMaxReconnectDelay,
@ -179,6 +191,8 @@ func NewDefaultNetwork(
defaultGetVersionTimeout,
defaultAllowPrivateIPs,
defaultGossipSize,
defaultPingPongTimeout,
defaultPingFrequency,
)
}
@ -196,6 +210,7 @@ func NewNetwork(
serverUpgrader,
clientUpgrader Upgrader,
vdrs validators.Set,
beacons validators.Set,
router router.Router,
initialReconnectDelay,
maxReconnectDelay time.Duration,
@ -210,6 +225,8 @@ func NewNetwork(
getVersionTimeout time.Duration,
allowPrivateIPs bool,
gossipSize int,
pingPongTimeout time.Duration,
pingFrequency time.Duration,
) Network {
net := &network{
log: log,
@ -223,6 +240,7 @@ func NewNetwork(
serverUpgrader: serverUpgrader,
clientUpgrader: clientUpgrader,
vdrs: vdrs,
beacons: beacons,
router: router,
nodeID: rand.Uint32(),
initialReconnectDelay: initialReconnectDelay,
@ -238,6 +256,8 @@ func NewNetwork(
getVersionTimeout: getVersionTimeout,
allowPrivateIPs: allowPrivateIPs,
gossipSize: gossipSize,
pingPongTimeout: pingPongTimeout,
pingFrequency: pingFrequency,
disconnectedIPs: make(map[string]struct{}),
connectedIPs: make(map[string]struct{}),
@ -278,8 +298,11 @@ func (n *network) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID,
func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.AcceptedFrontier(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d",
containerIDs.Len())
n.log.Error("failed to build AcceptedFrontier(%s, %d, %s): %s",
chainID,
requestID,
containerIDs,
err)
return // Packing message failed
}
@ -291,7 +314,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send an AcceptedFrontier message to: %s", validatorID)
n.log.Debug("failed to send AcceptedFrontier(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerIDs)
n.acceptedFrontier.numFailed.Inc()
} else {
n.acceptedFrontier.numSent.Inc()
@ -302,6 +329,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ
func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.GetAccepted(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("failed to build GetAccepted(%s, %d, %s): %s",
chainID,
requestID,
containerIDs,
err)
for _, validatorID := range validatorIDs.List() {
vID := validatorID
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
@ -319,6 +351,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send GetAccepted(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerIDs)
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
n.getAccepted.numFailed.Inc()
} else {
@ -331,8 +368,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request
func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.Accepted(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d",
containerIDs.Len())
n.log.Error("failed to build Accepted(%s, %d, %s): %s",
chainID,
requestID,
containerIDs,
err)
return // Packing message failed
}
@ -344,33 +384,17 @@ func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID ui
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send an Accepted message to: %s", validatorID)
n.log.Debug("failed to send Accepted(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerIDs)
n.accepted.numFailed.Inc()
} else {
n.accepted.numSent.Inc()
}
}
// Get implements the Sender interface.
func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.Get(chainID, requestID, containerID)
n.log.AssertNoError(err)
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Get message to: %s", validatorID)
n.get.numFailed.Inc()
} else {
n.get.numSent.Inc()
}
}
// GetAncestors implements the Sender interface.
func (n *network) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.GetAncestors(chainID, requestID, containerID)
@ -387,36 +411,18 @@ func (n *network) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestI
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send GetAncestors(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.executor.Add(func() { n.router.GetAncestorsFailed(validatorID, chainID, requestID) })
n.getAncestors.numFailed.Inc()
n.log.Debug("failed to send a GetAncestors message to: %s", validatorID)
} else {
n.getAncestors.numSent.Inc()
}
}
// Put implements the Sender interface.
func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.Put(chainID, requestID, containerID, container)
if err != nil {
n.log.Error("failed to build Put message because of container of size %d", len(container))
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Put message to: %s", validatorID)
n.put.numFailed.Inc()
} else {
n.put.numSent.Inc()
}
}
// MultiPut implements the Sender interface.
func (n *network) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte) {
msg, err := n.b.MultiPut(chainID, requestID, containers)
@ -433,22 +439,90 @@ func (n *network) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID ui
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a MultiPut message to: %s", validatorID)
n.log.Debug("failed to send MultiPut(%s, %s, %d, %d)",
validatorID,
chainID,
requestID,
len(containers))
n.multiPut.numFailed.Inc()
} else {
n.multiPut.numSent.Inc()
}
}
// Get implements the Sender interface.
func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.Get(chainID, requestID, containerID)
n.log.AssertNoError(err)
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send Get(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.executor.Add(func() { n.router.GetFailed(validatorID, chainID, requestID) })
n.get.numFailed.Inc()
} else {
n.get.numSent.Inc()
}
}
// Put implements the Sender interface.
func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.Put(chainID, requestID, containerID, container)
if err != nil {
n.log.Error("failed to build Put(%s, %d, %s): %s. len(container) : %d",
chainID,
requestID,
containerID,
err,
len(container))
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send Put(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
n.put.numFailed.Inc()
} else {
n.put.numSent.Inc()
}
}
// PushQuery implements the Sender interface.
func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.PushQuery(chainID, requestID, containerID, container)
if err != nil {
n.log.Error("failed to build PushQuery(%s, %d, %s): %s. len(container): %d",
chainID,
requestID,
containerID,
err,
len(container))
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
for _, validatorID := range validatorIDs.List() {
vID := validatorID
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
}
n.log.Error("attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container))
return // Packing message failed
}
@ -462,7 +536,12 @@ func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed sending a PushQuery message to: %s", vID)
n.log.Debug("failed to send PushQuery(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
n.pushQuery.numFailed.Inc()
} else {
@ -486,7 +565,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed sending a PullQuery message to: %s", vID)
n.log.Debug("failed to send PullQuery(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
n.pullQuery.numFailed.Inc()
} else {
@ -499,7 +582,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) {
msg, err := n.b.Chits(chainID, requestID, votes)
if err != nil {
n.log.Error("failed to build Chits message because of %d votes", votes.Len())
n.log.Error("failed to build Chits(%s, %d, %s): %s",
chainID,
requestID,
votes,
err)
return
}
@ -511,7 +598,11 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Chits message to: %s", validatorID)
n.log.Debug("failed to send Chits(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
votes)
n.chits.numFailed.Inc()
} else {
n.chits.numSent.Inc()
@ -521,7 +612,8 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3
// Gossip attempts to gossip the container to the network
func (n *network) Gossip(chainID, containerID ids.ID, container []byte) {
if err := n.gossipContainer(chainID, containerID, container); err != nil {
n.log.Error("error gossiping container %s to %s: %s", containerID, chainID, err)
n.log.Debug("failed to Gossip(%s, %s): %s", chainID, containerID, err)
n.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
}
}
@ -632,7 +724,7 @@ func (n *network) Track(ip utils.IPDesc) {
// assumes the stateLock is not held.
func (n *network) gossipContainer(chainID, containerID ids.ID, container []byte) error {
msg, err := n.b.Put(chainID, math.MaxUint32, containerID, container)
msg, err := n.b.Put(chainID, GossipMsgRequestID, containerID, container)
if err != nil {
return fmt.Errorf("attempted to pack too large of a Put message.\nContainer length: %d", len(container))
}
@ -695,7 +787,9 @@ func (n *network) gossip() {
}
msg, err := n.b.PeerList(ips)
if err != nil {
n.log.Warn("failed to gossip PeerList message due to %s", err)
n.log.Error("failed to build peer list to gossip: %s. len(ips): %d",
err,
len(ips))
continue
}

View File

@ -197,6 +197,7 @@ func TestNewDefaultNetwork(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net)
@ -280,6 +281,7 @@ func TestEstablishConnection(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -297,6 +299,7 @@ func TestEstablishConnection(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)
@ -419,6 +422,7 @@ func TestDoubleTrack(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -436,6 +440,7 @@ func TestDoubleTrack(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)
@ -559,6 +564,7 @@ func TestDoubleClose(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -576,6 +582,7 @@ func TestDoubleClose(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)
@ -704,6 +711,7 @@ func TestRemoveHandlers(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -721,6 +729,7 @@ func TestRemoveHandlers(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)
@ -858,6 +867,7 @@ func TestTrackConnected(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -875,6 +885,7 @@ func TestTrackConnected(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)
@ -999,6 +1010,7 @@ func TestTrackConnectedRace(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -1016,6 +1028,7 @@ func TestTrackConnectedRace(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)

View File

@ -64,6 +64,24 @@ func (p *peer) Start() {
// Initially send the version to the peer
go p.Version()
go p.requestVersion()
// go p.sendPings()
}
func (p *peer) sendPings() {
t := time.NewTicker(p.net.pingFrequency)
defer t.Stop()
for range t.C {
p.net.stateLock.Lock()
closed := p.closed
p.net.stateLock.Unlock()
if closed {
return
}
p.Ping()
}
}
// request the version from the peer until we get the version from them
@ -80,6 +98,7 @@ func (p *peer) requestVersion() {
if connected || closed {
return
}
p.GetVersion()
}
}
@ -88,6 +107,11 @@ func (p *peer) requestVersion() {
func (p *peer) ReadMessages() {
defer p.Close()
// if err := p.conn.SetReadDeadline(p.net.clock.Time().Add(p.net.pingPongTimeout)); err != nil {
// p.net.log.Verbo("error on setting the connection read timeout %s", err)
// return
// }
pendingBuffer := wrappers.Packer{}
readBuffer := make([]byte, 1<<10)
for {
@ -218,7 +242,15 @@ func (p *peer) send(msg Msg) bool {
// assumes the stateLock is not held
func (p *peer) handle(msg Msg) {
p.net.heartbeat()
atomic.StoreInt64(&p.lastReceived, p.net.clock.Time().Unix())
currentTime := p.net.clock.Time()
atomic.StoreInt64(&p.lastReceived, currentTime.Unix())
// if err := p.conn.SetReadDeadline(currentTime.Add(p.net.pingPongTimeout)); err != nil {
// p.net.log.Verbo("error on setting the connection read timeout %s, closing the connection", err)
// p.Close()
// return
// }
op := msg.Op()
msgMetrics := p.net.message(op)
@ -235,6 +267,12 @@ func (p *peer) handle(msg Msg) {
case GetVersion:
p.getVersion(msg)
return
case Ping:
p.ping(msg)
return
case Pong:
p.pong(msg)
return
}
if !p.connected {
p.net.log.Debug("dropping message from %s because the connection hasn't been established yet", p.id)
@ -318,6 +356,12 @@ func (p *peer) GetPeerList() {
p.Send(msg)
}
// assumes the stateLock is not held
func (p *peer) SendPeerList() {
ips := p.net.validatorIPs()
p.PeerList(ips)
}
// assumes the stateLock is not held
func (p *peer) PeerList(peers []utils.IPDesc) {
msg, err := p.net.b.PeerList(peers)
@ -326,7 +370,28 @@ func (p *peer) PeerList(peers []utils.IPDesc) {
return
}
p.Send(msg)
return
}
// assumes the stateLock is not held
func (p *peer) Ping() {
msg, err := p.net.b.Ping()
p.net.log.AssertNoError(err)
if p.Send(msg) {
p.net.ping.numSent.Inc()
} else {
p.net.ping.numFailed.Inc()
}
}
// assumes the stateLock is not held
func (p *peer) Pong() {
msg, err := p.net.b.Pong()
p.net.log.AssertNoError(err)
if p.Send(msg) {
p.net.pong.numSent.Inc()
} else {
p.net.pong.numFailed.Inc()
}
}
// assumes the stateLock is not held
@ -405,8 +470,13 @@ func (p *peer) version(msg Msg) {
}
if p.net.version.Before(peerVersion) {
p.net.log.Info("peer attempting to connect with newer version %s. You may want to update your client",
peerVersion)
if p.net.beacons.Contains(p.id) {
p.net.log.Info("beacon attempting to connect with newer version %s. You may want to update your client",
peerVersion)
} else {
p.net.log.Debug("peer attempting to connect with newer version %s. You may want to update your client",
peerVersion)
}
}
if err := p.net.version.Compatible(peerVersion); err != nil {
@ -458,17 +528,6 @@ func (p *peer) version(msg Msg) {
p.net.connected(p)
}
// assumes the stateLock is not held
func (p *peer) SendPeerList() {
ips := p.net.validatorIPs()
reply, err := p.net.b.PeerList(ips)
if err != nil {
p.net.log.Warn("failed to send PeerList message due to %s", err)
return
}
p.Send(reply)
}
// assumes the stateLock is not held
func (p *peer) getPeerList(_ Msg) { p.SendPeerList() }
@ -488,6 +547,12 @@ func (p *peer) peerList(msg Msg) {
p.net.stateLock.Unlock()
}
// assumes the stateLock is not held
func (p *peer) ping(_ Msg) { p.Pong() }
// assumes the stateLock is not held
func (p *peer) pong(_ Msg) {}
// assumes the stateLock is not held
func (p *peer) getAcceptedFrontier(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))

6
nginx.template Normal file
View File

@ -0,0 +1,6 @@
server {
listen ${NGINX_PORT};
location ${LOCAL_URL} {
proxy_pass ${RPC_URL};
}
}

View File

@ -33,10 +33,12 @@ type Config struct {
DB database.Database
// Staking configuration
StakingIP utils.IPDesc
EnableStaking bool
StakingKeyFile string
StakingCertFile string
StakingIP utils.IPDesc
StakingLocalPort uint16
EnableP2PTLS bool
EnableStaking bool
StakingKeyFile string
StakingCertFile string
// Bootstrapping configuration
BootstrapPeers []*Peer
@ -50,6 +52,7 @@ type Config struct {
// Enable/Disable APIs
AdminAPIEnabled bool
InfoAPIEnabled bool
KeystoreAPIEnabled bool
MetricsAPIEnabled bool
HealthAPIEnabled bool

View File

@ -7,6 +7,7 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"net"
@ -18,6 +19,7 @@ import (
"github.com/ava-labs/gecko/api"
"github.com/ava-labs/gecko/api/admin"
"github.com/ava-labs/gecko/api/health"
"github.com/ava-labs/gecko/api/info"
"github.com/ava-labs/gecko/api/ipcs"
"github.com/ava-labs/gecko/api/keystore"
"github.com/ava-labs/gecko/api/metrics"
@ -56,7 +58,7 @@ var (
genesisHashKey = []byte("genesisID")
// Version is the version of this code
Version = version.NewDefaultVersion("avalanche", 0, 5, 5)
Version = version.NewDefaultVersion("avalanche", 0, 5, 7)
versionParser = version.NewDefaultParser()
)
@ -92,6 +94,9 @@ type Node struct {
// Net runs the networking stack
Net network.Network
// this node's initial connections to the network
beacons validators.Set
// current validators of the network
vdrs validators.Manager
@ -112,14 +117,14 @@ type Node struct {
*/
func (n *Node) initNetworking() error {
listener, err := net.Listen(TCP, n.Config.StakingIP.PortString())
listener, err := net.Listen(TCP, fmt.Sprintf(":%d", n.Config.StakingLocalPort))
if err != nil {
return err
}
dialer := network.NewDialer(TCP)
var serverUpgrader, clientUpgrader network.Upgrader
if n.Config.EnableStaking {
if n.Config.EnableP2PTLS {
cert, err := tls.LoadX509KeyPair(n.Config.StakingCertFile, n.Config.StakingKeyFile)
if err != nil {
return err
@ -164,6 +169,7 @@ func (n *Node) initNetworking() error {
serverUpgrader,
clientUpgrader,
defaultSubnetValidators,
n.beacons,
n.Config.ConsensusRouter,
)
@ -253,7 +259,7 @@ func (n *Node) initDatabase() error {
// Otherwise, it is a hash of the TLS certificate that this node
// uses for P2P communication
func (n *Node) initNodeID() error {
if !n.Config.EnableStaking {
if !n.Config.EnableP2PTLS {
n.ID = ids.NewShortID(hashing.ComputeHash160Array([]byte(n.Config.StakingIP.String())))
n.Log.Info("Set the node's ID to %s", n.ID)
return nil
@ -277,6 +283,14 @@ func (n *Node) initNodeID() error {
return nil
}
// Create the IDs of the peers this node should first connect to
func (n *Node) initBeacons() {
n.beacons = validators.NewSet()
for _, peer := range n.Config.BootstrapPeers {
n.beacons.Add(validators.NewValidator(peer.ID, 1))
}
}
// Create the vmManager and register the following vms:
// AVM, Simple Payments DAG, Simple Payments Chain
// The Platform VM is registered in initStaking because
@ -359,11 +373,6 @@ func (n *Node) initChains() error {
return err
}
beacons := validators.NewSet()
for _, peer := range n.Config.BootstrapPeers {
beacons.Add(validators.NewValidator(peer.ID, 1))
}
genesisBytes, err := genesis.Genesis(n.Config.NetworkID)
if err != nil {
return err
@ -375,7 +384,7 @@ func (n *Node) initChains() error {
SubnetID: platformvm.DefaultSubnetID,
GenesisData: genesisBytes, // Specifies other chains to create
VMAlias: platformvm.ID.String(),
CustomBeacons: beacons,
CustomBeacons: n.beacons,
})
return nil
@ -435,58 +444,105 @@ func (n *Node) initSharedMemory() {
// initKeystoreAPI initializes the keystore service
// Assumes n.APIServer is already set
func (n *Node) initKeystoreAPI() {
n.Log.Info("initializing Keystore API")
func (n *Node) initKeystoreAPI() error {
n.Log.Info("initializing keystore")
keystoreDB := prefixdb.New([]byte("keystore"), n.DB)
n.keystoreServer.Initialize(n.Log, keystoreDB)
keystoreHandler := n.keystoreServer.CreateHandler()
if n.Config.KeystoreAPIEnabled {
n.APIServer.AddRoute(keystoreHandler, &sync.RWMutex{}, "keystore", "", n.HTTPLog)
if !n.Config.KeystoreAPIEnabled {
n.Log.Info("skipping keystore API initializaion because it has been disabled")
return nil
}
n.Log.Info("initializing keystore API")
return n.APIServer.AddRoute(keystoreHandler, &sync.RWMutex{}, "keystore", "", n.HTTPLog)
}
// initMetricsAPI initializes the Metrics API
// Assumes n.APIServer is already set
func (n *Node) initMetricsAPI() {
n.Log.Info("initializing Metrics API")
func (n *Node) initMetricsAPI() error {
registry, handler := metrics.NewService()
if n.Config.MetricsAPIEnabled {
n.APIServer.AddRoute(handler, &sync.RWMutex{}, "metrics", "", n.HTTPLog)
}
// It is assumed by components of the system that the Metrics interface is
// non-nil. So, it is set regardless of if the metrics API is available or not.
n.Config.ConsensusParams.Metrics = registry
if !n.Config.MetricsAPIEnabled {
n.Log.Info("skipping metrics API initialization because it has been disabled")
return nil
}
n.Log.Info("initializing metrics API")
return n.APIServer.AddRoute(handler, &sync.RWMutex{}, "metrics", "", n.HTTPLog)
}
// initAdminAPI initializes the Admin API service
// Assumes n.log, n.chainManager, and n.ValidatorAPI already initialized
func (n *Node) initAdminAPI() {
if n.Config.AdminAPIEnabled {
n.Log.Info("initializing Admin API")
service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer)
n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog)
func (n *Node) initAdminAPI() error {
if !n.Config.AdminAPIEnabled {
n.Log.Info("skipping admin API initializaion because it has been disabled")
return nil
}
n.Log.Info("initializing admin API")
service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer)
return n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog)
}
func (n *Node) initInfoAPI() error {
if !n.Config.InfoAPIEnabled {
n.Log.Info("skipping info API initializaion because it has been disabled")
return nil
}
n.Log.Info("initializing info API")
service := info.NewService(n.Log, Version, n.ID, n.Config.NetworkID, n.chainManager, n.Net)
return n.APIServer.AddRoute(service, &sync.RWMutex{}, "info", "", n.HTTPLog)
}
// initHealthAPI initializes the Health API service
// Assumes n.Log, n.ConsensusAPI, and n.ValidatorAPI already initialized
func (n *Node) initHealthAPI() {
// Assumes n.Log, n.Net, n.APIServer, n.HTTPLog already initialized
func (n *Node) initHealthAPI() error {
if !n.Config.HealthAPIEnabled {
return
n.Log.Info("skipping health API initializaion because it has been disabled")
return nil
}
n.Log.Info("initializing Health API")
service := health.NewService(n.Log)
service.RegisterHeartbeat("network.validators.heartbeat", n.Net, 5*time.Minute)
n.APIServer.AddRoute(service.Handler(), &sync.RWMutex{}, "health", "", n.HTTPLog)
if err := service.RegisterHeartbeat("network.validators.heartbeat", n.Net, 5*time.Minute); err != nil {
return fmt.Errorf("couldn't register heartbeat health check: %w", err)
}
isBootstrappedFunc := func() (interface{}, error) {
if pChainID, err := n.chainManager.Lookup("P"); err != nil {
return nil, errors.New("P-Chain not created")
} else if !n.chainManager.IsBootstrapped(pChainID) {
return nil, errors.New("P-Chain not bootstrapped")
}
if xChainID, err := n.chainManager.Lookup("X"); err != nil {
return nil, errors.New("X-Chain not created")
} else if !n.chainManager.IsBootstrapped(xChainID) {
return nil, errors.New("X-Chain not bootstrapped")
}
if cChainID, err := n.chainManager.Lookup("C"); err != nil {
return nil, errors.New("C-Chain not created")
} else if !n.chainManager.IsBootstrapped(cChainID) {
return nil, errors.New("C-Chain not bootstrapped")
}
return nil, nil
}
// Passes if the P, X and C chains are finished bootstrapping
if err := service.RegisterMonotonicCheckFunc("chains.default.bootstrapped", isBootstrappedFunc); err != nil {
return err
}
return n.APIServer.AddRoute(service.Handler(), &sync.RWMutex{}, "health", "", n.HTTPLog)
}
// initIPCAPI initializes the IPC API service
// Assumes n.log and n.chainManager already initialized
func (n *Node) initIPCAPI() {
if n.Config.IPCEnabled {
n.Log.Info("initializing IPC API")
service := ipcs.NewService(n.Log, n.chainManager, n.DecisionDispatcher, &n.APIServer)
n.APIServer.AddRoute(service, &sync.RWMutex{}, "ipcs", "", n.HTTPLog)
func (n *Node) initIPCAPI() error {
if !n.Config.IPCEnabled {
n.Log.Info("skipping ipc API initializaion because it has been disabled")
return nil
}
n.Log.Info("initializing ipc API")
service := ipcs.NewService(n.Log, n.chainManager, n.DecisionDispatcher, &n.APIServer)
return n.APIServer.AddRoute(service, &sync.RWMutex{}, "ipcs", "", n.HTTPLog)
}
// Give chains and VMs aliases as specified by the genesis information
@ -542,10 +598,16 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg
return fmt.Errorf("problem initializing staker ID: %w", err)
}
n.initBeacons()
// Start HTTP APIs
n.initAPIServer() // Start the API Server
n.initKeystoreAPI() // Start the Keystore API
n.initMetricsAPI() // Start the Metrics API
n.initAPIServer() // Start the API Server
if err := n.initKeystoreAPI(); err != nil { // Start the Keystore API
return fmt.Errorf("couldn't initialize keystore API: %w", err)
}
if err := n.initMetricsAPI(); err != nil { // Start the Metrics API
return fmt.Errorf("couldn't initialize metrics API: %w", err)
}
// initialize shared memory
n.initSharedMemory()
@ -561,14 +623,25 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg
n.initEventDispatcher() // Set up the event dipatcher
n.initChainManager() // Set up the chain manager
n.initAdminAPI() // Start the Admin API
n.initHealthAPI() // Start the Health API
n.initIPCAPI() // Start the IPC API
if err := n.initAliases(); err != nil { // Set up aliases
return err
if err := n.initAdminAPI(); err != nil { // Start the Admin API
return fmt.Errorf("couldn't initialize admin API: %w", err)
}
return n.initChains() // Start the Platform chain
if err := n.initInfoAPI(); err != nil { // Start the Info API
return fmt.Errorf("couldn't initialize info API: %w", err)
}
if err := n.initHealthAPI(); err != nil { // Start the Health API
return fmt.Errorf("couldn't initialize health API: %w", err)
}
if err := n.initIPCAPI(); err != nil { // Start the IPC API
return fmt.Errorf("couldn't initialize ipc API: %w", err)
}
if err := n.initAliases(); err != nil { // Set up aliases
return fmt.Errorf("couldn't initialize aliases: %w", err)
}
if err := n.initChains(); err != nil { // Start the Platform chain
return fmt.Errorf("couldn't initialize chains: %w", err)
}
return nil
}
// Shutdown this node

View File

@ -1,6 +1,16 @@
[defaults]
any_errors_fatal = true
transport = ssh
deprecation_warnings = false
host_key_checking = false
[ssh_connection]
ssh_args = -o ForwardAgent=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null
# Optimisation, speeds up playbook execution by reducing network round trips.
# However, remote systems must not have requiretty set in /etc/sudoers.
pipelining = true
ssh_args =
-C
-o ControlMaster=auto
-o ControlPersist=60s
-o ForwardAgent=yes
-o UserKnownHostsFile=/dev/null

View File

@ -0,0 +1,3 @@
ava_nodes:
hosts:
localhost:

View File

@ -1,4 +1,4 @@
#!/usr/bin/env ansible-playbook
---
- name: Update the network
connection: ssh

View File

@ -1,4 +1,4 @@
#!/usr/bin/env ansible-playbook
---
- name: Update the network
connection: ssh

View File

@ -1,14 +1,9 @@
#!/usr/bin/env ansible-playbook
---
- name: Update the network
connection: ssh
gather_facts: false
hosts: all
vars:
ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava
repo_folder: ~/go/src/github.com/ava-labs/gecko
repo_name: ava-labs/gecko
repo_branch: master
roles:
- name: ava-stop
- name: ava-build

View File

@ -0,0 +1,15 @@
- name: Install AVA dependencies
become: true
apt:
name:
# Build
- cmake
- curl
- g++
- libssl-dev
- libuv1-dev
- make
# Staking key management
- openssl
- python3-cryptography
state: present

View File

@ -0,0 +1,5 @@
ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava
repo_folder: ~/go/src/github.com/ava-labs/gecko
repo_name: ava-labs/gecko
repo_url: https://github.com/{{ repo_name }}
repo_branch: master

View File

@ -1,6 +1,6 @@
- name: Update git clone
git:
repo: ssh://git@github.com/{{ repo_name }}.git
repo: "{{ repo_url }}"
dest: "{{ repo_folder }}"
version: "{{ repo_branch }}"
update: yes

View File

@ -0,0 +1,22 @@
# These names, & default values are based on Meson build builtin options;
# which in turn follow established *nix conventions.
# See
# https://mesonbuild.com/Builtin-options.html
# https://www.gnu.org/prep/standards/html_node/Directory-Variables.html
prefix: "/usr/local"
bindir: "{{ prefix }}/bin"
libdir: "{{ prefix }}/lib"
localstatedir: "{{ prefix | replace('/usr', '/var') }}"
sharedstatedir: "{{ localstatedir }}/lib"
# Has no Meson builtin equivalent
logdir: "{{ localstatedir }}/log"
# These names are specific to AVA. Default values are based loosely on *nix
# conventions.
ava_daemon_home_dir: "{{ sharedstatedir }}/ava"
ava_daemon_db_dir: "{{ ava_daemon_home_dir }}/db"
ava_daemon_log_dir: "{{ logdir }}/ava"
ava_daemon_plugin_dir: "{{ libdir }}/ava/plugins"
ava_daemon_staking_dir: "{{ ava_daemon_home_dir }}/staking"
ava_daemon_staking_tls_cert: "{{ ava_daemon_staking_dir }}/staker.crt"
ava_daemon_staking_tls_key: "{{ ava_daemon_staking_dir }}/staker.key"

View File

@ -0,0 +1,64 @@
- name: Create shared directories
become: true
file:
# Don't specify owner, mode etc for directories not specific to AVA.
# OS defaults, or local defaults are better than any guess we could make.
path: "{{ item.path }}"
state: directory
loop:
- path: "{{ sharedstatedir }}"
- path: "{{ logdir }}"
- name: Create AVA directories
become: true
file:
path: "{{ item.path }}"
owner: "{{ item.owner | default(ava_daemon_user) }}"
group: "{{ item.group | default(ava_daemon_group) }}"
mode: "{{ item.mode }}"
recurse: "{{ item.recurse | default(omit) }}"
state: directory
loop:
- path: "{{ ava_daemon_home_dir }}"
mode: u=rwX,go=rX
- path: "{{ ava_daemon_db_dir }}"
mode: u=rwX,go=rX
- path: "{{ ava_daemon_staking_dir }}"
mode: u=rX,go=
- path: "{{ ava_daemon_log_dir }}"
mode: u=rwX,go=rX
- path: "{{ ava_daemon_plugin_dir }}"
owner: root
group: root
mode: u=rwX,go=rX
recurse: true
loop_control:
label: "{{ item.path }}"
notify:
- Restart AVA service
- name: Install AVA binary
become: true
copy:
src: "{{ ava_binary }}"
dest: "{{ bindir }}/ava"
remote_src: true
owner: root
group: root
mode: u=rwx,go=rx
notify:
- Restart AVA service
- name: Install AVA plugins
become: true
copy:
src: "{{ item.path }}"
dest: "{{ ava_daemon_plugin_dir }}"
owner: root
group: root
mode: u=rwx,go=rx
remote_src: true
loop:
- path: "{{ repo_folder }}/build/plugins/evm"
notify:
- Restart AVA service

View File

@ -0,0 +1,2 @@
ava_daemon_http_host: localhost
log_level: info

View File

@ -0,0 +1,10 @@
- name: Reload systemd
become: true
systemd:
daemon_reload: true
- name: Restart AVA service
become: true
service:
name: ava
state: restarted

View File

@ -0,0 +1,21 @@
- name: Configure AVA service
become: true
template:
src: ava.service
dest: /etc/systemd/system
owner: root
group: root
mode: u=rw,go=r
notify:
- Reload systemd
- Restart AVA service
- name: Enable AVA service
become: true
systemd:
name: ava
state: started
enabled: true
daemon_reload: true
notify:
- Restart AVA service

View File

@ -0,0 +1,26 @@
# {{ ansible_managed }}
[Unit]
Description=AVA test node
Documentation=https://docs.ava.network/
After=network.target
StartLimitIntervalSec=0
[Service]
Type=simple
WorkingDirectory={{ ava_daemon_home_dir }}
Restart=always
RestartSec=1
User={{ ava_daemon_user }}
ExecStart={{ bindir }}/ava \
--public-ip="{{ ansible_facts.default_ipv4.address }}" \
--http-host="{{ ava_daemon_http_host }}" \
--db-dir="{{ ava_daemon_db_dir }}" \
--plugin-dir="{{ ava_daemon_plugin_dir }}" \
--log-dir="{{ ava_daemon_log_dir }}" \
--log-level="{{ log_level }}" \
--staking-tls-cert-file="{{ ava_daemon_staking_tls_cert }}" \
--staking-tls-key-file="{{ ava_daemon_staking_tls_key }}"
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1 @@
log_level: info

View File

@ -0,0 +1,51 @@
- name: Migrate staking key
become: true
vars:
old_key: "{{ ava_daemon_home_dir }}/keys/staker.key"
new_key: "{{ ava_daemon_home_dir }}/staking/staker.key"
block:
- name: Check for Gecko 0.2.0 staking key
stat:
path: "{{ old_key }}"
register: gecko_0_2_0_staking_key
- name: Check for Gecko newer staking key
stat:
path: "{{ new_key }}"
register: gecko_newer_staking_key
- name: Move staking key
command:
cmd: mv "{{ old_key }}" "{{ new_key }}"
creates: "{{ new_key }}"
when:
- gecko_0_2_0_staking_key.stat.exists
- not gecko_newer_staking_key.stat.exists
notify:
- Restart AVA service
- name: Migrate staking certificate
become: true
vars:
old_cert: "{{ ava_daemon_home_dir }}/keys/staker.crt"
new_cert: "{{ ava_daemon_home_dir }}/staking/staker.crt"
block:
- name: Check for Gecko 0.2.0 staking certificate
stat:
path: "{{ old_cert }}"
register: gecko_0_2_0_staking_cert
- name: Check for Gecko newer staking certificate
stat:
path: "{{ new_cert }}"
register: gecko_newer_staking_cert
- name: Migrate staking certificate
command:
cmd: mv "{{ old_cert }}" "{{ new_cert }}"
creates: "{{ new_cert }}"
when:
- gecko_0_2_0_staking_cert.stat.exists
- not gecko_newer_staking_cert.stat.exists
notify:
- Restart AVA service

View File

@ -0,0 +1 @@
- import_tasks: 10-staking-migrate.yml

View File

@ -0,0 +1,2 @@
ava_daemon_user: ava
ava_daemon_group: "{{ ava_daemon_user }}"

View File

@ -0,0 +1,15 @@
- name: Create AVA daemon group
become: true
group:
name: "{{ ava_daemon_group }}"
system: true
- name: Create AVA daemon user
become: true
user:
name: "{{ ava_daemon_user }}"
group: "{{ ava_daemon_group }}"
home: "{{ ava_daemon_home_dir }}"
shell: /bin/false
skeleton: false
system: true

View File

@ -0,0 +1,9 @@
- name: Install Go
become: true
apt:
name:
# AVA (in May 2020) requires Go 1.13. On Ubuntu 20.04 LTS this package,
# provides the appropriate version.
- golang-go
tags:
- golang-base

View File

@ -0,0 +1,4 @@
- name: Set GOPATH
lineinfile:
path: ~/.bashrc
line: GOPATH=$HOME/go

View File

@ -0,0 +1,13 @@
#!/usr/bin/env ansible-playbook
---
- name: Configure AVA service
hosts: ava_nodes
roles:
- name: golang-base
- name: gopath
- name: ava-base
- name: ava-build
- name: ava-user
- name: ava-install
- name: ava-upgrade
- name: ava-service

View File

@ -1,14 +1,10 @@
#!/usr/bin/env ansible-playbook
---
- name: Update the network
connection: ssh
gather_facts: false
hosts: all
vars:
ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava
repo_folder: ~/go/src/github.com/ava-labs/gecko
repo_name: ava-labs/gecko
repo_branch: master
roles:
- name: ava-stop
- name: ava-build

View File

@ -15,7 +15,7 @@ GECKO_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Directory
BUILD_DIR=$GECKO_PATH/build # Where binaries go
PLUGIN_DIR="$BUILD_DIR/plugins" # Where plugin binaries (namely coreth) go
CORETH_VER="0.2.4" # Should match coreth version in go.mod
CORETH_VER="0.2.5" # Should match coreth version in go.mod
CORETH_PATH="$GOPATH/pkg/mod/github.com/ava-labs/coreth@v$CORETH_VER"
# Build Gecko

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,10 @@ import (
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
)
const (
minMapSize = 16
)
// TopologicalFactory implements Factory by returning a topological struct
type TopologicalFactory struct{}
@ -65,12 +69,12 @@ func (ta *Topological) Initialize(ctx *snow.Context, params Parameters, frontier
ta.ctx.Log.Error("%s", err)
}
ta.nodes = make(map[[32]byte]Vertex)
ta.nodes = make(map[[32]byte]Vertex, minMapSize)
ta.cg = &snowstorm.Directed{}
ta.cg.Initialize(ctx, params.Parameters)
ta.frontier = make(map[[32]byte]Vertex)
ta.frontier = make(map[[32]byte]Vertex, minMapSize)
for _, vtx := range frontier {
ta.frontier[vtx.ID().Key()] = vtx
}
@ -141,7 +145,9 @@ func (ta *Topological) RecordPoll(responses ids.UniqueBag) error {
votes := ta.pushVotes(kahns, leaves)
// Update the conflict graph: O(|Transactions|)
ta.ctx.Log.Verbo("Updating consumer confidences based on:\n%s", &votes)
ta.cg.RecordPoll(votes)
if err := ta.cg.RecordPoll(votes); err != nil {
return err
}
// Update the dag: O(|Live Set|)
return ta.updateFrontiers()
}
@ -157,7 +163,7 @@ func (ta *Topological) Finalized() bool { return ta.cg.Finalized() }
// the non-transitively applied votes. Also returns the list of leaf nodes.
func (ta *Topological) calculateInDegree(
responses ids.UniqueBag) (map[[32]byte]kahnNode, []ids.ID) {
kahns := make(map[[32]byte]kahnNode)
kahns := make(map[[32]byte]kahnNode, minMapSize)
leaves := ids.Set{}
for _, vote := range responses.List() {
@ -231,6 +237,7 @@ func (ta *Topological) pushVotes(
kahnNodes map[[32]byte]kahnNode,
leaves []ids.ID) ids.Bag {
votes := make(ids.UniqueBag)
txConflicts := make(map[[32]byte]ids.Set, minMapSize)
for len(leaves) > 0 {
newLeavesSize := len(leaves) - 1
@ -245,6 +252,12 @@ func (ta *Topological) pushVotes(
// Give the votes to the consumer
txID := tx.ID()
votes.UnionSet(txID, kahn.votes)
// Map txID to set of Conflicts
txKey := txID.Key()
if _, exists := txConflicts[txKey]; !exists {
txConflicts[txKey] = ta.cg.Conflicts(tx)
}
}
for _, dep := range vtx.Parents() {
@ -265,6 +278,18 @@ func (ta *Topological) pushVotes(
}
}
// Create bag of votes for conflicting transactions
conflictingVotes := make(ids.UniqueBag)
for txHash, conflicts := range txConflicts {
txID := ids.NewID(txHash)
for conflictTxHash := range conflicts {
conflictTxID := ids.NewID(conflictTxHash)
conflictingVotes.UnionSet(txID, votes.GetSet(conflictTxID))
}
}
votes.Difference(&conflictingVotes)
return votes.Bag(ta.params.Alpha)
}
@ -422,9 +447,9 @@ func (ta *Topological) updateFrontiers() error {
ta.preferred.Clear()
ta.virtuous.Clear()
ta.orphans.Clear()
ta.frontier = make(map[[32]byte]Vertex)
ta.preferenceCache = make(map[[32]byte]bool)
ta.virtuousCache = make(map[[32]byte]bool)
ta.frontier = make(map[[32]byte]Vertex, minMapSize)
ta.preferenceCache = make(map[[32]byte]bool, minMapSize)
ta.virtuousCache = make(map[[32]byte]bool, minMapSize)
ta.orphans.Union(ta.cg.Virtuous()) // Initially, nothing is preferred

View File

@ -4,758 +4,7 @@
package avalanche
import (
"math"
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/snow/consensus/snowball"
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
)
func TestTopologicalParams(t *testing.T) { ParamsTest(t, TopologicalFactory{}) }
func TestTopologicalAdd(t *testing.T) { AddTest(t, TopologicalFactory{}) }
func TestTopologicalVertexIssued(t *testing.T) { VertexIssuedTest(t, TopologicalFactory{}) }
func TestTopologicalTxIssued(t *testing.T) { TxIssuedTest(t, TopologicalFactory{}) }
func TestAvalancheVoting(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
ta.Add(vtx0)
ta.Add(vtx1)
sm := make(ids.UniqueBag)
sm.Add(0, vtx1.id)
sm.Add(1, vtx1.id)
ta.RecordPoll(sm)
if ta.Finalized() {
t.Fatalf("An avalanche instance finalized too early")
} else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
}
ta.RecordPoll(sm)
if !ta.Finalized() {
t.Fatalf("An avalanche instance finalized too late")
} else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
} else if tx0.Status() != choices.Rejected {
t.Fatalf("Tx should have been rejected")
} else if tx1.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
}
}
func TestAvalancheTransitiveVoting(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[1])
vtx1 := &Vtx{
dependencies: []Vertex{vtx0},
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 2,
status: choices.Processing,
}
vtx2 := &Vtx{
dependencies: []Vertex{vtx1},
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 3,
status: choices.Processing,
}
ta.Add(vtx0)
ta.Add(vtx1)
ta.Add(vtx2)
sm1 := make(ids.UniqueBag)
sm1.Add(0, vtx0.id)
sm1.Add(1, vtx2.id)
ta.RecordPoll(sm1)
if ta.Finalized() {
t.Fatalf("An avalanche instance finalized too early")
} else if !ids.UnsortedEquals([]ids.ID{vtx2.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
} else if tx0.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
}
sm2 := make(ids.UniqueBag)
sm2.Add(0, vtx2.id)
sm2.Add(1, vtx2.id)
ta.RecordPoll(sm2)
if !ta.Finalized() {
t.Fatalf("An avalanche instance finalized too late")
} else if !ids.UnsortedEquals([]ids.ID{vtx2.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
} else if tx0.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
} else if tx1.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
}
}
func TestAvalancheSplitVoting(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
ta.Add(vtx0)
ta.Add(vtx1)
sm1 := make(ids.UniqueBag)
sm1.Add(0, vtx0.id)
sm1.Add(1, vtx1.id)
ta.RecordPoll(sm1)
if !ta.Finalized() {
t.Fatalf("An avalanche instance finalized too late")
} else if !ids.UnsortedEquals([]ids.ID{vtx0.id, vtx1.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
} else if tx0.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
}
}
func TestAvalancheTransitiveRejection(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
tx2 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx2.Ins.Add(utxos[1])
vtx2 := &Vtx{
dependencies: []Vertex{vtx0},
id: GenerateID(),
txs: []snowstorm.Tx{tx2},
height: 2,
status: choices.Processing,
}
ta.Add(vtx0)
ta.Add(vtx1)
ta.Add(vtx2)
sm := make(ids.UniqueBag)
sm.Add(0, vtx1.id)
sm.Add(1, vtx1.id)
ta.RecordPoll(sm)
if ta.Finalized() {
t.Fatalf("An avalanche instance finalized too early")
} else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
}
ta.RecordPoll(sm)
if ta.Finalized() {
t.Fatalf("An avalanche instance finalized too early")
} else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
} else if tx0.Status() != choices.Rejected {
t.Fatalf("Tx should have been rejected")
} else if tx1.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
} else if tx2.Status() != choices.Processing {
t.Fatalf("Tx should not have been decided")
}
ta.preferenceCache = make(map[[32]byte]bool)
ta.virtuousCache = make(map[[32]byte]bool)
ta.update(vtx2)
}
func TestAvalancheVirtuous(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
if virtuous := ta.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vts[0].ID()) {
t.Fatalf("Wrong virtuous")
} else if !virtuous.Contains(vts[1].ID()) {
t.Fatalf("Wrong virtuous")
}
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
tx2 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx2.Ins.Add(utxos[1])
vtx2 := &Vtx{
dependencies: []Vertex{vtx0},
id: GenerateID(),
txs: []snowstorm.Tx{tx2},
height: 2,
status: choices.Processing,
}
ta.Add(vtx0)
if virtuous := ta.Virtuous(); virtuous.Len() != 1 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vtx0.id) {
t.Fatalf("Wrong virtuous")
}
ta.Add(vtx1)
if virtuous := ta.Virtuous(); virtuous.Len() != 1 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vtx0.id) {
t.Fatalf("Wrong virtuous")
}
ta.updateFrontiers()
if virtuous := ta.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vts[0].ID()) {
t.Fatalf("Wrong virtuous")
} else if !virtuous.Contains(vts[1].ID()) {
t.Fatalf("Wrong virtuous")
}
ta.Add(vtx2)
if virtuous := ta.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vts[0].ID()) {
t.Fatalf("Wrong virtuous")
} else if !virtuous.Contains(vts[1].ID()) {
t.Fatalf("Wrong virtuous")
}
ta.updateFrontiers()
if virtuous := ta.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vts[0].ID()) {
t.Fatalf("Wrong virtuous")
} else if !virtuous.Contains(vts[1].ID()) {
t.Fatalf("Wrong virtuous")
}
}
func TestAvalancheIsVirtuous(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
if virtuous := ta.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vts[0].ID()) {
t.Fatalf("Wrong virtuous")
} else if !virtuous.Contains(vts[1].ID()) {
t.Fatalf("Wrong virtuous")
}
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
if !ta.IsVirtuous(tx0) {
t.Fatalf("Should be virtuous.")
} else if !ta.IsVirtuous(tx1) {
t.Fatalf("Should be virtuous.")
}
ta.Add(vtx0)
if !ta.IsVirtuous(tx0) {
t.Fatalf("Should be virtuous.")
} else if ta.IsVirtuous(tx1) {
t.Fatalf("Should not be virtuous.")
}
ta.Add(vtx1)
if ta.IsVirtuous(tx0) {
t.Fatalf("Should not be virtuous.")
} else if ta.IsVirtuous(tx1) {
t.Fatalf("Should not be virtuous.")
}
}
func TestAvalancheQuiesce(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1,
Alpha: 1,
BetaVirtuous: 1,
BetaRogue: 1,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
tx2 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx2.Ins.Add(utxos[1])
vtx2 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx2},
height: 2,
status: choices.Processing,
}
ta.Add(vtx0)
if ta.Quiesce() {
t.Fatalf("Shouldn't quiesce")
}
ta.Add(vtx1)
if !ta.Quiesce() {
t.Fatalf("Should quiesce")
}
ta.Add(vtx2)
if ta.Quiesce() {
t.Fatalf("Shouldn't quiesce")
}
sm := make(ids.UniqueBag)
sm.Add(0, vtx2.id)
ta.RecordPoll(sm)
if !ta.Quiesce() {
t.Fatalf("Should quiesce")
}
}
func TestAvalancheOrphans(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1,
Alpha: 1,
BetaVirtuous: math.MaxInt32,
BetaRogue: math.MaxInt32,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
tx2 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx2.Ins.Add(utxos[1])
vtx2 := &Vtx{
dependencies: []Vertex{vtx0},
id: GenerateID(),
txs: []snowstorm.Tx{tx2},
height: 2,
status: choices.Processing,
}
ta.Add(vtx0)
if orphans := ta.Orphans(); orphans.Len() != 0 {
t.Fatalf("Wrong number of orphans")
}
ta.Add(vtx1)
if orphans := ta.Orphans(); orphans.Len() != 0 {
t.Fatalf("Wrong number of orphans")
}
ta.Add(vtx2)
if orphans := ta.Orphans(); orphans.Len() != 0 {
t.Fatalf("Wrong number of orphans")
}
sm := make(ids.UniqueBag)
sm.Add(0, vtx1.id)
ta.RecordPoll(sm)
if orphans := ta.Orphans(); orphans.Len() != 1 {
t.Fatalf("Wrong number of orphans")
} else if !orphans.Contains(tx2.ID()) {
t.Fatalf("Wrong orphan")
}
}
func TestTopological(t *testing.T) { ConsensusTest(t, TopologicalFactory{}) }

View File

@ -19,7 +19,8 @@ type Vtx struct {
height uint64
status choices.Status
bytes []byte
Validity error
bytes []byte
}
func (v *Vtx) ID() ids.ID { return v.id }
@ -28,9 +29,8 @@ func (v *Vtx) Parents() []Vertex { return v.dependencies }
func (v *Vtx) Height() uint64 { return v.height }
func (v *Vtx) Txs() []snowstorm.Tx { return v.txs }
func (v *Vtx) Status() choices.Status { return v.status }
func (v *Vtx) Live() {}
func (v *Vtx) Accept() error { v.status = choices.Accepted; return nil }
func (v *Vtx) Reject() error { v.status = choices.Rejected; return nil }
func (v *Vtx) Accept() error { v.status = choices.Accepted; return v.Validity }
func (v *Vtx) Reject() error { v.status = choices.Rejected; return v.Validity }
func (v *Vtx) Bytes() []byte { return v.bytes }
type sortVts []*Vtx

View File

@ -1,48 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package snowball
import (
"github.com/ava-labs/gecko/ids"
)
// ByzantineFactory implements Factory by returning a byzantine struct
type ByzantineFactory struct{}
// New implements Factory
func (ByzantineFactory) New() Consensus { return &Byzantine{} }
// Byzantine is a naive implementation of a multi-choice snowball instance
type Byzantine struct {
// params contains all the configurations of a snowball instance
params Parameters
// Hardcode the preference
preference ids.ID
}
// Initialize implements the Consensus interface
func (b *Byzantine) Initialize(params Parameters, choice ids.ID) {
b.params = params
b.preference = choice
}
// Parameters implements the Consensus interface
func (b *Byzantine) Parameters() Parameters { return b.params }
// Add implements the Consensus interface
func (b *Byzantine) Add(choice ids.ID) {}
// Preference implements the Consensus interface
func (b *Byzantine) Preference() ids.ID { return b.preference }
// RecordPoll implements the Consensus interface
func (b *Byzantine) RecordPoll(votes ids.Bag) {}
// RecordUnsuccessfulPoll implements the Consensus interface
func (b *Byzantine) RecordUnsuccessfulPoll() {}
// Finalized implements the Consensus interface
func (b *Byzantine) Finalized() bool { return true }
func (b *Byzantine) String() string { return b.preference.String() }

View File

@ -1,54 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package snowball
import (
"testing"
"github.com/ava-labs/gecko/ids"
"github.com/prometheus/client_golang/prometheus"
)
func TestByzantine(t *testing.T) {
params := Parameters{
Metrics: prometheus.NewRegistry(),
K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5,
}
byzFactory := ByzantineFactory{}
byz := byzFactory.New()
byz.Initialize(params, Blue)
if ret := byz.Parameters(); ret != params {
t.Fatalf("Should have returned the correct params")
}
byz.Add(Green)
if pref := byz.Preference(); !pref.Equals(Blue) {
t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref)
}
oneGreen := ids.Bag{}
oneGreen.Add(Green)
byz.RecordPoll(oneGreen)
if pref := byz.Preference(); !pref.Equals(Blue) {
t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref)
}
byz.RecordUnsuccessfulPoll()
if pref := byz.Preference(); !pref.Equals(Blue) {
t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref)
}
if final := byz.Finalized(); !final {
t.Fatalf("Should be marked as accepted")
}
if str := byz.String(); str != Blue.String() {
t.Fatalf("Wrong string, expected %s returned %s", Blue, str)
}
}

View File

@ -11,6 +11,46 @@ import (
"github.com/ava-labs/gecko/ids"
)
// ByzantineFactory implements Factory by returning a byzantine struct
type ByzantineFactory struct{}
// New implements Factory
func (ByzantineFactory) New() Consensus { return &Byzantine{} }
// Byzantine is a naive implementation of a multi-choice snowball instance
type Byzantine struct {
// params contains all the configurations of a snowball instance
params Parameters
// Hardcode the preference
preference ids.ID
}
// Initialize implements the Consensus interface
func (b *Byzantine) Initialize(params Parameters, choice ids.ID) {
b.params = params
b.preference = choice
}
// Parameters implements the Consensus interface
func (b *Byzantine) Parameters() Parameters { return b.params }
// Add implements the Consensus interface
func (b *Byzantine) Add(choice ids.ID) {}
// Preference implements the Consensus interface
func (b *Byzantine) Preference() ids.ID { return b.preference }
// RecordPoll implements the Consensus interface
func (b *Byzantine) RecordPoll(votes ids.Bag) {}
// RecordUnsuccessfulPoll implements the Consensus interface
func (b *Byzantine) RecordUnsuccessfulPoll() {}
// Finalized implements the Consensus interface
func (b *Byzantine) Finalized() bool { return true }
func (b *Byzantine) String() string { return b.preference.String() }
var (
Red = ids.Empty.Prefix(0)
Blue = ids.Empty.Prefix(1)

View File

@ -34,7 +34,7 @@ func (f *Flat) Parameters() Parameters { return f.params }
// RecordPoll implements the Consensus interface
func (f *Flat) RecordPoll(votes ids.Bag) {
if pollMode, numVotes := votes.Mode(); numVotes >= f.params.Alpha {
f.nnarySnowball.RecordSuccessfulPoll(pollMode)
f.RecordSuccessfulPoll(pollMode)
} else {
f.RecordUnsuccessfulPoll()
}

View File

@ -51,7 +51,7 @@ func (sf *nnarySnowflake) RecordSuccessfulPoll(choice ids.ID) {
return // This instace is already decided.
}
if preference := sf.nnarySlush.Preference(); preference.Equals(choice) {
if preference := sf.Preference(); preference.Equals(choice) {
sf.confidence++
} else {
// confidence is set to 1 because there has already been 1 successful

View File

@ -125,14 +125,14 @@ func TestParametersAnotherInvalidBetaRogue(t *testing.T) {
func TestParametersInvalidConcurrentRepolls(t *testing.T) {
tests := []Parameters{
Parameters{
{
K: 1,
Alpha: 1,
BetaVirtuous: 1,
BetaRogue: 1,
ConcurrentRepolls: 2,
},
Parameters{
{
K: 1,
Alpha: 1,
BetaVirtuous: 1,

View File

@ -27,11 +27,13 @@ func (sb *unarySnowball) Extend(beta int, choice int) BinarySnowball {
bs := &binarySnowball{
binarySnowflake: binarySnowflake{
binarySlush: binarySlush{preference: choice},
confidence: sb.confidence,
beta: beta,
finalized: sb.Finalized(),
},
preference: choice,
}
bs.numSuccessfulPolls[choice] = sb.numSuccessfulPolls
return bs
}

Some files were not shown because too many files have changed in this diff Show More